-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
62ce45c
commit 953f765
Showing
70 changed files
with
26,322 additions
and
23 deletions.
There are no files selected for viewing
Binary file added
BIN
+3.42 KB
application_deployment_automation/code/__pycache__/app_deployment.cpython-310.pyc
Binary file not shown.
Binary file added
BIN
+2.61 KB
application_deployment_automation/code/__pycache__/app_yaml_templates.cpython-310.pyc
Binary file not shown.
Binary file added
BIN
+2.65 KB
...ication_deployment_automation/code/__pycache__/create_app_deployment_yaml.cpython-310.pyc
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
import pandas as pd | ||
import matplotlib.pyplot as plt | ||
import numpy as np | ||
import subprocess | ||
import json | ||
|
||
import os | ||
from os import listdir | ||
from os.path import isfile, join | ||
from app_yaml_templates import mod_file, docker_file, application_file | ||
|
||
local_registry_url = "206.12.96.137:5000" | ||
iat_workload_data = "../data/meta_func_deployment_100/dataframe/workload_event_data_max_63_100_apps.pickle" | ||
image_url_template = "{}/application-{}.go:latest" | ||
app_url_template = "application-{}.default.192.168.106.10.sslip.io" | ||
|
||
|
||
def deploy_apps(workload_scaling_factor): | ||
# df = pd.read_pickle(iat_workload_data) | ||
# df = scale_workload(df, workload_scaling_factor) | ||
# # create_go_modules(df) | ||
|
||
subprocess.run(["sh", "deploy_apps.sh"]) | ||
|
||
# gen_faas_profiler(df) | ||
|
||
return df | ||
|
||
|
||
def scale_workload(df, scaling_factor): | ||
df["ExecDurations"] = df["ExecDurations"].apply(lambda x: scale_exec_duration(x, scaling_factor)) | ||
df["IAT"] = df["IAT"].apply(lambda x: scale_each_trace(x, scaling_factor)) | ||
return df | ||
|
||
# scale exec time by 2 | ||
def scale_exec_duration(duration, scaling_factor): | ||
return duration / scaling_factor | ||
|
||
# scale IAT by 2 | ||
def scale_each_trace(iat_list, scaling_factor): | ||
if len(iat_list) == 0: | ||
return iat_list | ||
else: | ||
return [iat/scaling_factor for iat in iat_list] | ||
|
||
def create_go_modules(df): | ||
# create go modules, class with correct avg exec and mem alloc and go mod for ko | ||
for index, row in df.iterrows(): | ||
code_folder = f'../data/meta_func_deployment_100/apps/{row.HashApp}' | ||
if not os.path.exists(code_folder): | ||
print("creating directory structure...") | ||
os.makedirs(code_folder) | ||
# first get all lines from file | ||
|
||
with open(code_folder + "/" + 'go.mod', 'w') as fp: | ||
fp.write(str(mod_file['go.mod'].format(row.HashApp))) | ||
fp.close() | ||
|
||
with open(code_folder + "/" + 'application_{}.go'.format(str(row.HashApp)[:8]), 'w') as fp: | ||
fp.write(str(application_file.format(row.ExecDurations, row.AverageMemUsage))) | ||
fp.close() | ||
|
||
with open(code_folder + "/" + 'Dockerfile', 'w') as fp: | ||
fp.write(str(docker_file["Dockerfile"])) | ||
fp.close() | ||
with open(code_folder + "/" + str(row.HashApp)+"_output.txt", 'w') as fp: | ||
fp.write("") | ||
fp.close() | ||
|
||
|
||
def gen_faas_profiler(complete_df): | ||
# iat generation | ||
with open('../data/misc/workload_configs_default.json', 'r+') as f: | ||
data = json.load(f) | ||
number_of_instances = len(complete_df) | ||
overall_list = np.array([]) | ||
index = 0 | ||
for _, row in complete_df.iterrows(): | ||
instance_structure = { | ||
"application": f"application-{row['HashApp'][:8]}", | ||
"url": "http://127.0.0.1:8080/hello", | ||
"host": f"{app_url_template.format(row['HashApp'][:8])}", | ||
"data": {}, | ||
"interarrivals_list": [] | ||
} | ||
instance_structure["interarrivals_list"] = list(row["IAT"]) | ||
data["instances"][f"instance_{index}"] = instance_structure | ||
index += 1 | ||
|
||
f = open('../data/misc/new_json_files/work_load_configs_rep_meta_func_IAT.json', 'x') | ||
json.dump(data, f, indent=4) | ||
|
||
|
||
if __name__ == "__main__": | ||
scaling_factor = 1 | ||
deploy_apps(scaling_factor) | ||
Check failure on line 96 in application_deployment_automation/code/app_deployment.py GitHub Actions / style / Golang / Lint
|
79 changes: 79 additions & 0 deletions
79
application_deployment_automation/code/app_yaml_templates.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
mod_file = { | ||
"go.mod": """module {}\ngo 1.17""" | ||
} | ||
docker_file = { | ||
"Dockerfile": """ | ||
## We specify the base image we need for our | ||
## go application | ||
FROM golang:1.12.1-alpine3.9 | ||
RUN apk add build-base | ||
## We create an /app directory within our | ||
## image that will hold our application source | ||
## files | ||
RUN mkdir /app | ||
## We copy everything in the root directory | ||
## into our /app directory | ||
ADD . /app | ||
## We specify that we now wish to execute | ||
Check failure on line 17 in application_deployment_automation/code/app_yaml_templates.py GitHub Actions / style / Golang / Lint
|
||
## any further commands inside our /app | ||
## directory | ||
WORKDIR /app | ||
## we run go build to compile the binary | ||
## executable of our Go program | ||
RUN go build -o main . | ||
## Our start command which kicks off | ||
## our newly created binary executable | ||
CMD ["/app/main"] | ||
""" | ||
} | ||
|
||
application_file = ''' | ||
package main | ||
import ( | ||
"fmt" | ||
"math" | ||
"net/http" | ||
"runtime/debug" | ||
"time" | ||
) | ||
func hello(w http.ResponseWriter, req *http.Request) {{ | ||
start_time := time.Now() | ||
given_time := {} | ||
const given_memory = {} * 1024 * 1024 | ||
time_in_milliseconds := math.Floor(given_time) | ||
time_in_nanoseconds := (given_time - time_in_milliseconds) * 1000000 | ||
actual_exec_time := time.Duration(time_in_milliseconds)*time.Millisecond + time.Duration(time_in_nanoseconds)*time.Nanosecond | ||
mem_alloc_start_time := time.Now() | ||
var memory_footprint_ds [int(given_memory)]byte | ||
mem_alloc_end_time := time.Now() | ||
fmt.Printf("\\nmemory alloc time: %v\\n", mem_alloc_end_time.Sub(mem_alloc_start_time)) | ||
curr_time := time.Now() | ||
index := 0 | ||
for curr_time.Sub(mem_alloc_end_time) < actual_exec_time {{ | ||
memory_footprint_ds[index] = 1 | ||
index += 1 | ||
curr_time = time.Now() | ||
}} | ||
end_time_before_sleep := time.Now() | ||
fmt.Printf("\\nloop time: %v\\n", end_time_before_sleep.Sub(mem_alloc_end_time)) | ||
overhead_exec_so_far := end_time_before_sleep.Sub(start_time) | ||
if overhead_exec_so_far < actual_exec_time {{ | ||
time.Sleep(actual_exec_time - overhead_exec_so_far) | ||
}} | ||
end_time := time.Now() | ||
fmt.Printf("\\narr size %vstart: %v end: %v duration: %v\\n", len(memory_footprint_ds), start_time.UTC(), end_time.UTC(), end_time.Sub(start_time)) | ||
debug.FreeOSMemory() | ||
}} | ||
func main() {{ | ||
http.HandleFunc("/hello", hello) | ||
http.ListenAndServe(":8080", nil) | ||
}} | ||
''' | ||
Check failure on line 79 in application_deployment_automation/code/app_yaml_templates.py GitHub Actions / style / Golang / Lint
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,152 @@ | ||
|
||
|
||
import json | ||
import yaml | ||
import os | ||
from datetime import timedelta | ||
|
||
import pandas as pd | ||
import matplotlib.pyplot as plt | ||
|
||
def get_arrival_time(interarrival_times): | ||
arrival_times = [] | ||
for iat_per_app in interarrival_times: | ||
cur_arrival_times = [] | ||
curr_time = 0 | ||
for iat in iat_per_app: | ||
curr_time += iat | ||
cur_arrival_times.append(curr_time) | ||
arrival_times.append(cur_arrival_times) | ||
return arrival_times | ||
|
||
|
||
def get_min_buckets(): | ||
iter = 0 | ||
per_min_buckets = [] | ||
|
||
while iter < TOTAL_EXPERIMENT_TIME: | ||
per_min_buckets.append(0) | ||
iter += 1 | ||
|
||
return per_min_buckets | ||
|
||
|
||
# Load the JSON file | ||
with open('../data/misc/new_json_files/work_load_configs_rep_mid_meta_func_100_IAT.json', 'r') as json_file: | ||
data = json.load(json_file) | ||
|
||
TOTAL_EXPERIMENT_TIME = 18*60 # min | ||
|
||
# Extract interarrival times | ||
interarrival_times = [data['instances'][instance]['interarrivals_list'] for i, instance in enumerate(data['instances'])] # [data['instances'][f'instance_{index}']['interarrivals_list']] | ||
arrival_times = get_arrival_time(interarrival_times) | ||
|
||
per_min_buckets = get_min_buckets() | ||
counter = 0 | ||
for i in range(len(per_min_buckets)-1): | ||
for per_app_arrival_times in arrival_times: | ||
|
||
for arrival_time in per_app_arrival_times: | ||
if arrival_time/60.0 >= i and arrival_time/60.0 < i+1: | ||
per_min_buckets[i] += 1 | ||
print(f"Bucket {i} completed") | ||
|
||
plt.stem(per_min_buckets) | ||
plt.xlabel("Time (min)") | ||
plt.ylabel("# of requests per min") | ||
plt.savefig("combined_arrival_times.pdf") | ||
print(sum(per_min_buckets[:TOTAL_EXPERIMENT_TIME])) | ||
|
||
# def get_event_based_concurrency(df): | ||
# request_event_queue = [] | ||
|
||
# for _, row in df.iterrows(): | ||
# # use IAT to get AT | ||
# arrival_times = get_arrival_time(row.IAT) | ||
|
||
# # use AT and ExecDurations to get request concurrency | ||
# for arrival_time in arrival_times: | ||
# request_event_queue.append((arrival_time, 's')) | ||
# request_event_queue.append((arrival_time + (row.ExecDurations)/1000.0, 'e')) | ||
|
||
# request_event_queue.sort(key=lambda x: x[0]) | ||
|
||
# request_concurrency = 0 | ||
# concurrency_map = {} | ||
# for event in request_event_queue: | ||
# if event[1] == 's': | ||
# request_concurrency += 1 | ||
# else: | ||
# request_concurrency -= 1 | ||
|
||
# if event[0] not in concurrency_map: | ||
# concurrency_map[event[0]] = (request_concurrency, event[1]) | ||
# elif concurrency_map[event[0]][1] == 's' and event[1] == 'e': | ||
# del concurrency_map[event[0]] | ||
# else: | ||
# concurrency_map[event[0]] = (request_concurrency, event[1]) | ||
|
||
# return concurrency_map | ||
|
||
# # use arrival times and request exec time per app in df to get the request concurrency | ||
# df = pd.read_pickle('../data/mid_meta_func_deployment_100/dataframe/workload_event_data_max_72_100_apps.pickle') | ||
|
||
|
||
# concurrency_map = get_event_based_concurrency(df) | ||
|
||
# # print((concurrency_map)) | ||
|
||
# per_min_buckets = get_min_buckets() | ||
|
||
# start_time = list(concurrency_map.keys())[0] | ||
|
||
# prev_bucket_conc = 0 | ||
|
||
# # get average request concurrency per min | ||
# for i in range(len(per_min_buckets)-1): | ||
# cur_bucket_events = [] | ||
# cur_bucket_ts = [] | ||
# cur_bucket_conc = 0 | ||
# avg_conc = 0 | ||
|
||
# for ts, event in concurrency_map.items(): | ||
# # print(ts) | ||
# ts = ts/60.0 | ||
|
||
# if ts > i+1: | ||
# break | ||
|
||
# if ts >= i and ts < i+1: | ||
# cur_bucket_events.append(event) | ||
# cur_bucket_ts.append(ts) | ||
|
||
# if len(cur_bucket_ts) != 0: | ||
# if cur_bucket_ts[0] != i: | ||
# cur_bucket_events.insert(0, (prev_bucket_conc, 's')) | ||
# cur_bucket_ts.insert(0, i) | ||
|
||
# for j in range(len(cur_bucket_ts)-1): | ||
# cur_event = cur_bucket_events[j] | ||
# duration = cur_bucket_ts[j+1] - cur_bucket_ts[j] | ||
|
||
# cur_bucket_conc += cur_event[0] * duration | ||
|
||
# prev_bucket_conc = cur_event[0] | ||
|
||
# if cur_bucket_ts[-1] != i+1: | ||
# cur_bucket_conc += cur_bucket_events[-1][0] * (i+1 - cur_bucket_ts[-1]) | ||
# prev_bucket_conc = cur_bucket_events[-1][0] | ||
|
||
# print(f"Bucket {i}: {cur_bucket_conc}") | ||
# if cur_bucket_conc == 0: | ||
# cur_bucket_conc = prev_bucket_conc | ||
# avg_conc = cur_bucket_conc | ||
# avg_conc = cur_bucket_conc | ||
# per_min_buckets[i] = avg_conc | ||
|
||
|
||
# # print(concurrency_map) | ||
|
||
# # plt.step(concurrency_map.keys(), [val[0] for val in concurrency_map.values()], where='post') | ||
# plt.step(range(len(per_min_buckets)), per_min_buckets, where='post') | ||
# plt.savefig("request_concurrency.pdf") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
from create_app_deployment_yaml import create_deployment | ||
from app_deployment import deploy_apps | ||
import pandas as pd | ||
import os | ||
import subprocess | ||
|
||
APP_TYPE = "100_meta_func" | ||
|
||
OUTPUT_FOLDER = f"../output/{APP_TYPE}/" | ||
|
||
NODE_COUNT = 10 | ||
|
||
CUSHION_MEMORY = 50 # MB | ||
|
||
PER_POD_CONCURRENCY = 1 | ||
|
||
CPU_PER_APP = 350 # millicores | ||
|
||
SCALING_FACTOR = 1 | ||
|
||
df = deploy_apps(workload_scaling_factor=SCALING_FACTOR) | ||
|
||
create_deployment(df=df, app_type=APP_TYPE, output_folder=OUTPUT_FOLDER, node_count=NODE_COUNT, cushion_memory=CUSHION_MEMORY, per_pod_concurrency=PER_POD_CONCURRENCY, cpu_per_app=CPU_PER_APP) | ||
Oops, something went wrong.