├── KNative_prototype ├── cnn_serving │ ├── Dockerfile │ ├── app.py │ ├── cnn_serving.yaml │ ├── dnld_blob.py │ └── runner.py ├── deploy.sh ├── img_res │ ├── Dockerfile │ ├── app.py │ ├── dnld_blob.py │ ├── img_res.yaml │ └── runner.py ├── img_rot │ ├── Dockerfile │ ├── app.py │ ├── dnld_blob.py │ ├── img_rot.yaml │ └── runner.py ├── knative-all.py ├── ml_train │ ├── Dockerfile │ ├── app.py │ ├── dnld_blob.py │ ├── ml_train.yaml │ └── runner.py ├── myLibSocket.c ├── nodeController.py ├── vid_proc │ ├── Dockerfile │ ├── app.py │ ├── dnld_blob.py │ ├── runner.py │ └── vid_proc.yaml └── web_serve │ ├── Dockerfile │ ├── app.py │ ├── dnld_blob.py │ ├── runner.py │ └── web_serve.yaml ├── LICENSE ├── README.md ├── characterization ├── azure_blobs.py ├── azure_burstiness.py ├── characterize.sh ├── download-traces.sh ├── expected-output-funcs.txt ├── functions-idle-time │ ├── __pycache__ │ │ ├── rnn.cpython-36.pyc │ │ └── rnn.cpython-38.pyc │ ├── cnn_serving.py │ ├── create_ord.py │ ├── finalized_model.sav │ ├── img0.png │ ├── img1.png │ ├── img10.jpg │ ├── img10.png │ ├── img11.png │ ├── img12.png │ ├── img13.png │ ├── img14.png │ ├── img15.png │ ├── img16.png │ ├── img17.png │ ├── img18.png │ ├── img19.png │ ├── img2.png │ ├── img20.png │ ├── img3.png │ ├── img4.png │ ├── img5.png │ ├── img6.png │ ├── img7.png │ ├── img8.png │ ├── img9.png │ ├── img_res.py │ ├── img_rot.py │ ├── in.txt │ ├── lr_model.pk │ ├── lr_serving.py │ ├── minioDataset.csv │ ├── ml_train.py │ ├── money.txt │ ├── moneyTemp.txt │ ├── newImage.jpeg │ ├── newImage.png │ ├── ordIDs.txt │ ├── ordTemp.txt │ ├── output.avi │ ├── pay_ord.py │ ├── pulled_minioDataset.csv │ ├── rnn.py │ ├── rnn_model.pth │ ├── rnn_params.pkl │ ├── rnn_serving.py │ ├── tempImage.jpeg │ ├── vid1.mp4 │ ├── vid_proc.py │ └── web_serve.py ├── functions-mem-footprint │ ├── __pycache__ │ │ └── rnn.cpython-36.pyc │ ├── cnn_serving.py │ ├── create_ord.py │ ├── finalized_model.sav │ ├── img0.png │ ├── img1.png │ ├── img10.jpg │ ├── img10.png │ ├── img11.png │ ├── img12.png │ ├── img13.png │ ├── img14.png │ ├── img15.png │ ├── img16.png │ ├── img17.png │ ├── img18.png │ ├── img19.png │ ├── img2.png │ ├── img20.png │ ├── img3.png │ ├── img4.png │ ├── img5.png │ ├── img6.png │ ├── img7.png │ ├── img8.png │ ├── img9.png │ ├── img_res.py │ ├── img_rot.py │ ├── in.txt │ ├── lr_model.pk │ ├── lr_serving.py │ ├── minioDataset.csv │ ├── ml_train.py │ ├── money.txt │ ├── moneyTemp.txt │ ├── newImage.jpeg │ ├── newImage.png │ ├── ordIDs.txt │ ├── ordTemp.txt │ ├── output.avi │ ├── pay_ord.py │ ├── pulled_minioDataset.csv │ ├── rnn.py │ ├── rnn_model.pth │ ├── rnn_params.pkl │ ├── rnn_serving.py │ ├── tempImage.jpeg │ ├── vid1.mp4 │ ├── vid_proc.py │ └── web_serve.py └── install-libs.sh ├── ecofaas_init ├── node_controller │ ├── node_controller.proto │ ├── node_controller_pb2.py │ ├── node_controller_pb2_grpc.py │ ├── server.py │ └── test_client.py ├── pool_controller │ ├── pool_controller.proto │ ├── pool_controller_pb2.py │ ├── pool_controller_pb2_grpc.py │ ├── server.py │ └── test_client.py ├── pool_manager │ ├── pool_manager.proto │ ├── pool_manager_pb2.py │ ├── pool_manager_pb2_grpc.py │ ├── server.py │ └── test_client.py └── workflow_controller │ ├── profiling.csv │ ├── server.py │ ├── test_client.py │ ├── workflow_controller.proto │ ├── workflow_controller_pb2.py │ └── workflow_controller_pb2_grpc.py ├── experiments ├── __pycache__ │ └── rnn.cpython-38.pyc ├── baseline_P99.txt ├── cpu-util-out.txt ├── cpu_utils.py ├── expected-output-all.txt ├── hist-out-ref.txt ├── hist.py ├── mem-out-ref.txt ├── mem.py ├── microarch-out-ref.txt ├── microarch.py ├── plot_sens_io.py ├── rnn.py ├── run-all.py ├── sens-io-out-ref.txt └── sens_io.py ├── pythonAction ├── CHANGELOG.md ├── Dockerfile ├── build.gradle ├── init.py ├── mylib.c ├── mylib.so ├── mylibSocket.c ├── mylibSocket.so ├── myprogram ├── myprogram.c ├── runner.py ├── runnerCoalesce.py ├── runnerFuncCallTable.py ├── runnerNoCoalesce.py ├── runnerNoSleep.py ├── runnerThreadPlusCallService.py ├── runner_new.py ├── runner_old.py ├── scheduler.py ├── start.py ├── test.txt ├── testHTTP.py ├── testHTTP1.py └── vid1.mp4 └── setup.sh /KNative_prototype/cnn_serving/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/cnn_serve_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy jsonpickle 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/cnn_serving/app.py: -------------------------------------------------------------------------------- 1 | from mxnet import gluon 2 | import os 3 | import mxnet as mx 4 | from PIL import Image 5 | from azure.storage.blob import BlobServiceClient, BlobClient 6 | import dnld_blob 7 | 8 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 9 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 10 | container_client = blob_service_client.get_container_client("artifacteval") 11 | 12 | net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, root = '/tmp/') 13 | net.hybridize(static_alloc=True, static_shape=True) 14 | lblPath = gluon.utils.download('http://data.mxnet.io/models/imagenet/synset.txt',path='/tmp/') 15 | with open(lblPath, 'r') as f: 16 | labels = [l.rstrip() for l in f] 17 | 18 | def lambda_handler(): 19 | blobName = "img10.jpg" 20 | dnld_blob.download_blob_new(blobName) 21 | full_blob_name = blobName.split(".") 22 | proc_blob_name = full_blob_name[0] + "_" + str(os.getpid()) + "." + full_blob_name[1] 23 | image = Image.open(proc_blob_name) 24 | image.save('tempImage_'+str(os.getpid())+'.jpeg') 25 | 26 | # format image as (batch, RGB, width, height) 27 | img = mx.image.imread('tempImage_'+str(os.getpid())+'.jpeg') 28 | img = mx.image.imresize(img, 224, 224) # resize 29 | img = mx.image.color_normalize(img.astype(dtype='float32')/255, 30 | mean=mx.nd.array([0.485, 0.456, 0.406]), 31 | std=mx.nd.array([0.229, 0.224, 0.225])) # normalize 32 | img = img.transpose((2, 0, 1)) # channel first 33 | img = img.expand_dims(axis=0) # batchify 34 | 35 | prob = net(img).softmax() # predict and normalize output 36 | idx = prob.topk(k=5)[0] # get top 5 result 37 | inference = '' 38 | for i in idx: 39 | i = int(i.asscalar()) 40 | # print('With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i])) 41 | inference = inference + 'With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i]) + '. ' 42 | 43 | return {"result = ":inference} -------------------------------------------------------------------------------- /KNative_prototype/cnn_serving/cnn_serving.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: cnn-serving 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/cnn_serving_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /KNative_prototype/cnn_serving/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/deploy.sh: -------------------------------------------------------------------------------- 1 | cd cnn_serving 2 | kubectl apply -f cnn_serving.yaml 3 | 4 | cd ../img_res 5 | kubectl apply -f img_res.yaml 6 | 7 | cd ../img_rot 8 | kubectl apply -f img_rot.yaml 9 | 10 | cd ../ml_train 11 | kubectl apply -f ml_train.yaml 12 | 13 | cd ../vid_proc 14 | kubectl apply -f vid_proc.yaml 15 | 16 | cd ../web_serve 17 | kubectl apply -f web_serve.yaml 18 | 19 | cd .. -------------------------------------------------------------------------------- /KNative_prototype/img_res/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/img_proc_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/img_res/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | import dnld_blob 5 | 6 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 7 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 8 | container_client = blob_service_client.get_container_client("artifacteval") 9 | 10 | def lambda_handler(): 11 | blobName = "img10.jpg" 12 | dnld_blob.download_blob_new(blobName) 13 | full_blob_name = blobName.split(".") 14 | proc_blob_name = full_blob_name[0] + "_" + str(os.getpid()) + "." + full_blob_name[1] 15 | 16 | image = Image.open(proc_blob_name) 17 | width, height = image.size 18 | # Setting the points for cropped image 19 | left = 4 20 | top = height / 5 21 | right = 100 22 | bottom = 3 * height / 5 23 | im1 = image.crop((left, top, right, bottom)) 24 | im1.save('tempImage_'+str(os.getpid())+'.jpeg') 25 | 26 | fReadname = 'tempImage_'+str(os.getpid())+'.jpeg' 27 | blobName = "img10_res.jpg" 28 | dnld_blob.upload_blob_new(blobName, fReadname) 29 | 30 | return {"Image":"resized"} -------------------------------------------------------------------------------- /KNative_prototype/img_res/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/img_res/img_res.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: img-res 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/img_res_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /KNative_prototype/img_rot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/img_proc_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/img_rot/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | import dnld_blob 5 | 6 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 7 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 8 | container_client = blob_service_client.get_container_client("artifacteval") 9 | 10 | fileAppend = open("../funcs.txt", "a") 11 | 12 | def lambda_handler(): 13 | blobName = "img10.jpg" 14 | dnld_blob.download_blob_new(blobName) 15 | full_blob_name = blobName.split(".") 16 | proc_blob_name = full_blob_name[0] + "_" + str(os.getpid()) + "." + full_blob_name[1] 17 | 18 | image = Image.open(proc_blob_name) 19 | img = image.transpose(Image.ROTATE_90) 20 | img.save('tempImage_'+str(os.getpid())+'.jpeg') 21 | 22 | fReadname = 'tempImage_'+str(os.getpid())+'.jpeg' 23 | blobName = "img10_rot.jpg" 24 | dnld_blob.upload_blob_new(blobName, fReadname) 25 | 26 | return {"Image":"rotated"} -------------------------------------------------------------------------------- /KNative_prototype/img_rot/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/img_rot/img_rot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: img-rot 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/img_rot_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /KNative_prototype/knative-all.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | import numpy as np 4 | import threading 5 | import requests 6 | from statistics import mean, median,variance,stdev 7 | 8 | # get the url of a function 9 | def getUrlByFuncName(funcName): 10 | try: 11 | output = subprocess.check_output("kn service describe " + funcName + " -vvv", shell=True).decode("utf-8") 12 | except Exception as e: 13 | print("Error in kn service describe == " + str(e)) 14 | return None 15 | lines = output.splitlines() 16 | for line in lines: 17 | if "URL:" in line: 18 | url = line.split()[1] 19 | return url 20 | 21 | output = subprocess.check_output("kn service list", shell=True).decode("utf-8") 22 | lines = output.splitlines() 23 | lines = lines[1:] # delete the first line 24 | 25 | services = [] 26 | serviceNames = [] 27 | 28 | for line in lines: 29 | serviceName = line.split()[0] 30 | if serviceName not in serviceNames: 31 | serviceNames.append(serviceName) 32 | 33 | for serviceName in serviceNames: 34 | services.append(getUrlByFuncName(serviceName)) 35 | 36 | def lambda_func(service): 37 | global times 38 | t1 = time.time() 39 | r = requests.post(service, json={"name": "test"}) 40 | print(r.text) 41 | t2 = time.time() 42 | times.append(t2-t1) 43 | 44 | def EnforceActivityWindow(start_time, end_time, instance_events): 45 | events_iit = [] 46 | events_abs = [0] + instance_events 47 | event_times = [sum(events_abs[:i]) for i in range(1, len(events_abs) + 1)] 48 | event_times = [e for e in event_times if (e > start_time)and(e < end_time)] 49 | try: 50 | events_iit = [event_times[0]] + [event_times[i]-event_times[i-1] 51 | for i in range(1, len(event_times))] 52 | except: 53 | pass 54 | return events_iit 55 | 56 | loads = [1, 5, 10] 57 | 58 | output_file = open("run-all-out.txt", "w") 59 | 60 | indR = 0 61 | for load in loads: 62 | duration = 1 63 | seed = 100 64 | rate = load 65 | # generate Poisson's distribution of events 66 | inter_arrivals = [] 67 | np.random.seed(seed) 68 | beta = 1.0/rate 69 | oversampling_factor = 2 70 | inter_arrivals = list(np.random.exponential(scale=beta, size=int(oversampling_factor*duration*rate))) 71 | instance_events = EnforceActivityWindow(0,duration,inter_arrivals) 72 | 73 | for service in services: 74 | 75 | threads = [] 76 | times = [] 77 | after_time, before_time = 0, 0 78 | 79 | st = 0 80 | for t in instance_events: 81 | st = st + t - (after_time - before_time) 82 | before_time = time.time() 83 | if st > 0: 84 | time.sleep(st) 85 | 86 | threadToAdd = threading.Thread(target=lambda_func, args=(service, )) 87 | threads.append(threadToAdd) 88 | threadToAdd.start() 89 | after_time = time.time() 90 | 91 | for thread in threads: 92 | thread.join() 93 | 94 | print("=====================" + serviceNames[services.index(service)] + "=====================", file=output_file, flush=True) 95 | print(mean(times), file=output_file, flush=True) 96 | print(median(times), file=output_file, flush=True) 97 | print(np.percentile(times, 90), file=output_file, flush=True) 98 | print(np.percentile(times, 95), file=output_file, flush=True) 99 | print(np.percentile(times, 99), file=output_file, flush=True) 100 | -------------------------------------------------------------------------------- /KNative_prototype/ml_train/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/ml_train_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/ml_train/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import pickle 4 | from azure.storage.blob import BlobServiceClient, BlobClient 5 | from sklearn.feature_extraction.text import TfidfVectorizer 6 | from sklearn.linear_model import LogisticRegression 7 | import pandas as pd 8 | import re 9 | import warnings 10 | import dnld_blob 11 | 12 | warnings.filterwarnings("ignore") 13 | 14 | cleanup_re = re.compile('[^a-z]+') 15 | 16 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 17 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 18 | container_client = blob_service_client.get_container_client("artifacteval") 19 | 20 | def cleanup(sentence): 21 | sentence = sentence.lower() 22 | sentence = cleanup_re.sub(' ', sentence).strip() 23 | return sentence 24 | 25 | df_name = 'minioDataset.csv' 26 | 27 | def lambda_handler(): 28 | 29 | t1 = time.time() 30 | blobName = df_name 31 | dnld_blob.download_blob_new(blobName) 32 | full_blob_name = df_name.split(".") 33 | proc_blob_name = full_blob_name[0] + "_" + str(os.getpid()) + "." + full_blob_name[1] 34 | t2 = time.time() 35 | print("Time 1 = " + str(t2-t1)) 36 | 37 | df = pd.read_csv(proc_blob_name) 38 | df['train'] = df['Text'].apply(cleanup) 39 | 40 | model = LogisticRegression(max_iter=10) 41 | tfidf_vector = TfidfVectorizer(min_df=1000).fit(df['train']) 42 | train = tfidf_vector.transform(df['train']) 43 | model.fit(train, df['Score']) 44 | t3 = time.time() 45 | print("Time 2 = " + str(t3-t2)) 46 | 47 | filename = 'finalized_model_'+str(os.getpid())+'.sav' 48 | pickle.dump(model, open(filename, 'wb')) 49 | 50 | fReadName = 'finalized_model_'+str(os.getpid())+'.sav' 51 | blobName = 'finalized_model_'+str(os.getpid())+'.sav' 52 | dnld_blob.upload_blob_new(blobName, fReadName) 53 | t4 = time.time() 54 | print("Time 3 = " + str(t4-t3)) 55 | 56 | return {"Ok":"done"} -------------------------------------------------------------------------------- /KNative_prototype/ml_train/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/ml_train/ml_train.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: ml-train 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/ml_train_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /KNative_prototype/myLibSocket.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #define PORT 3333 18 | 19 | ssize_t recvfrom(int socketm, void *restrict buffer, size_t length, int flags, struct sockaddr *restrict address, socklen_t *restrict address_len){ 20 | ssize_t (*lrecvfrom)(int, void *restrict, size_t, int, struct sockaddr *restrict, socklen_t *restrict) = dlsym(RTLD_NEXT, "recvfrom"); 21 | pid_t tid = syscall(SYS_gettid); 22 | pid_t pid = getpid(); 23 | int sock = 0, valread, client_fd; 24 | struct sockaddr_in serv_addr; 25 | 26 | if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { 27 | printf("\n Socket creation error \n"); 28 | return -1; 29 | } 30 | 31 | bzero(&serv_addr, sizeof(serv_addr)); 32 | serv_addr.sin_family = AF_INET; 33 | serv_addr.sin_port = htons(PORT); 34 | 35 | if (inet_pton(AF_INET, "0.0.0.0", &serv_addr.sin_addr)<= 0) { 36 | printf("\nInvalid address/ Address not supported \n"); 37 | return -1; 38 | } 39 | 40 | if ((client_fd = connect(sock, (struct sockaddr*)&serv_addr, sizeof(serv_addr))) < 0) { 41 | printf("\nConnection Failed \n"); 42 | return -1; 43 | } 44 | 45 | { 46 | char* num; 47 | const char* str1 = "\nblocked - "; 48 | char mybuffer[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 49 | bzero(mybuffer, sizeof(mybuffer)); 50 | asprintf(&num, "%d", tid); 51 | strcat(strcpy(mybuffer, str1), num); 52 | write(sock, mybuffer, sizeof(mybuffer)); 53 | } 54 | ssize_t toReturnValue = lrecvfrom(socketm, buffer, length, flags, address, address_len); 55 | { 56 | char* num; 57 | const char* str1 = "\nunblocked - "; 58 | char mybuffer[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 59 | bzero(mybuffer, sizeof(mybuffer)); 60 | asprintf(&num, "%d", tid); 61 | strcat(strcpy(mybuffer, str1), num); 62 | write(sock, mybuffer, sizeof(mybuffer)); 63 | bzero(mybuffer, sizeof(mybuffer)); 64 | valread = read(sock, mybuffer, sizeof(mybuffer)); 65 | //close(client_fd); 66 | } 67 | return toReturnValue; 68 | } 69 | -------------------------------------------------------------------------------- /KNative_prototype/vid_proc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/vid_proc_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/vid_proc/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | import dnld_blob 5 | 6 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 7 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 8 | container_client = blob_service_client.get_container_client("artifacteval") 9 | 10 | tmp = "/tmp/" 11 | 12 | vid_name = 'vid1.mp4' 13 | 14 | result_file_path = tmp + vid_name 15 | 16 | def lambda_handler(): 17 | blobName = "vid1.mp4" 18 | dnld_blob.download_blob_new(blobName) 19 | video = cv2.VideoCapture("vid1_"+str(os.getpid())+".mp4") 20 | 21 | width = int(video.get(3)) 22 | height = int(video.get(4)) 23 | fourcc = cv2.VideoWriter_fourcc(*'MPEG') 24 | out = cv2.VideoWriter('output_'+str(os.getpid())+'.avi',fourcc, 20.0, (width, height)) 25 | 26 | while video.isOpened(): 27 | ret, frame = video.read() 28 | if ret: 29 | gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 30 | tmp_file_path = tmp+'tmp'+str(os.getpid())+'.jpg' 31 | cv2.imwrite(tmp_file_path, gray_frame) 32 | gray_frame = cv2.imread(tmp_file_path) 33 | out.write(gray_frame) 34 | break 35 | else: 36 | break 37 | 38 | fReadname = 'output_'+str(os.getpid())+'.avi' 39 | blobName = "output.avi" 40 | dnld_blob.upload_blob_new(blobName, fReadname) 41 | 42 | video.release() 43 | out.release() 44 | 45 | return {"Video": "Done"} -------------------------------------------------------------------------------- /KNative_prototype/vid_proc/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/vid_proc/vid_proc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: vid-proc 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/vid_proc_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /KNative_prototype/web_serve/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jovanvr97/web_serve_start 2 | 3 | RUN pip install sockets pickle-mixin requests psutil js2py JPype1 redis Flask numpy 4 | RUN mkdir -p /pythonAction 5 | ADD runner.py /pythonAction/ 6 | ADD app.py /pythonAction/ 7 | ADD dnld_blob.py /pythonAction/ 8 | 9 | CMD cd pythonAction && python3 -u runner.py -------------------------------------------------------------------------------- /KNative_prototype/web_serve/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from azure.storage.blob import BlobServiceClient, BlobClient 3 | import dnld_blob 4 | 5 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 6 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 7 | container_client = blob_service_client.get_container_client("artifacteval") 8 | 9 | def lambda_handler(): 10 | blobName = "money.txt" 11 | dnld_blob.download_blob_new(blobName) 12 | 13 | moneyF = open("money_"+str(os.getpid())+".txt", "r") 14 | money = float(moneyF.readline()) 15 | moneyF.close() 16 | money -= 100.0 17 | new_file = open("moneyTemp"+str(os.getpid())+".txt", "w") 18 | new_file.write(str(money)) 19 | new_file.close() 20 | fReadname = "moneyTemp"+str(os.getpid())+".txt" 21 | blobName = "money.txt" 22 | dnld_blob.upload_blob_new(blobName, fReadname) 23 | 24 | return {"Money":"withdrawn"} -------------------------------------------------------------------------------- /KNative_prototype/web_serve/dnld_blob.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | 4 | import socket 5 | import os 6 | import json 7 | import base64 8 | import time 9 | 10 | def download_blob_new(blobName): 11 | myHost = '0.0.0.0' 12 | myPort = 3333 13 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | clientSocket.connect((myHost, myPort)) 15 | message = {"blobName": blobName, "operation": "get", "pid": os.getpid()} 16 | messageStr = json.dumps(message) 17 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 18 | 19 | data_ = b'' 20 | data_ += clientSocket.recv(1024) 21 | 22 | def upload_blob_new(blobName, value): 23 | myHost = '0.0.0.0' 24 | myPort = 3333 25 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | clientSocket.connect((myHost, myPort)) 27 | message = {"blobName": blobName, "operation": "set", "value": value, "pid": os.getpid()} 28 | messageStr = json.dumps(message) 29 | clientSocket.sendall(messageStr.encode(encoding="utf-8")) 30 | 31 | data_ = b'' 32 | data_ += clientSocket.recv(1024) -------------------------------------------------------------------------------- /KNative_prototype/web_serve/web_serve.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: web-serve 5 | namespace: default 6 | spec: 7 | template: 8 | spec: 9 | containerConcurrency: 1000 10 | containers: 11 | - image: jovanvr97/web_serve_knative:latest 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 640M 16 | limits: 17 | cpu: 2 18 | metadata: 19 | annotations: 20 | autoscaling.knative.dev/target: "1000" 21 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "1h" 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 jovans2 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MXFaaS: Resource Sharing in Serverless Environments for Parallelism and Efficiency 2 | 3 | This repository is an artifact for the paper: MXFaaS: Resource Sharing in Serverless Environments for Parallelism and Efficiency [PDF] by __Jovan Stojkovic (UIUC), Tianyin Xu (UIUC), Hubertus Franke (IBM Research), and Josep Torrellas (UIUC)__. The paper is published at __International Symposium on Computer Architecture (ISCA), June 2023__. 4 | 5 | MXFaaS is a novel serverless platform designed for high-efficiency. MXFaaS improves function performance by efficiently multiplexing processor 6 | cycles, I/O bandwidth, and memory/processor state between 7 | concurrently-executing instances of the same function. MXFaaS 8 | introduces the new MXContainer abstraction, which can con- 9 | currently execute multiple invocations of the same function and 10 | owns a set of cores. To enable efficient use of processor cycles, the 11 | MXContainer carefully helps schedule same-function invocations 12 | for minimal response time. To enable efficient use of I/O band- 13 | width, the MXContainer coalesces remote storage accesses and 14 | remote function calls from same-function invocations. Finally, to 15 | enable efficient use of memory/processor state, the MXContainer 16 | first initializes function state and only later, on demand, spawns 17 | a process per function invocation, so that all invocations share 18 | unmodified memory state and minimize memory footprint 19 | 20 | This repository includes the prototype implementation of MXContainers, integration of MXFaaS with KNative, 21 | the scripts to perform the characterization study on the open-source 22 | production-level traces and serverless benchmarks, and the 23 | experiment workflow to run these workloads 24 | 25 | 26 | ## Artifact Description 27 | Our artifact includes the prototype implementation of MXContainers, integration of MXFaaS with KNative, 28 | the scripts to perform the characterization study on the open-source production-level traces and serverless benchmarks, and the experiment workflow to run these workloads. We have two main software portions. 29 | 30 | First, we provide scripts to reproduce our characterization study. 31 | The scripts include the analyses of (i) the open-source production-level traces from Azure, and (ii) the open-source serverless benchmarks from FunctionBench. 32 | The scripts analyze (i) the request burstiness in serverless environments, (ii) the idle time of serverless functions, 33 | (iii) the breakdown of memory footprint of serverless functions, 34 | and (iv) the bursty access pattern in to the remote storage. 35 | 36 | Second, we provide our implementation of MXFaaS: a novel serverless platform built upon KNative. MXFaaS includes two main components: (i) MXContainers that support efficient CPU, I/O and memory sharing across invocations of the same function, 37 | and (ii) Node Controller that supports core assignment across collocated MXContainers and extends auto-scaling features. 38 | 39 | ## Hardware Dependencies 40 | 41 | This artifact was tested on Intel (Haswell, Broadwell, Skylake), and AMD EPYC processors: Rome, Milan. Each processor has at least 8 cores. 42 | 43 | ## Software Dependencies 44 | 45 | This artifact requires Ubuntu 18.04+, Docker 23.0.1, minikube v1.29.0, and KNative. 46 | 47 | ## Installation 48 | 49 | First, clone our artifact repository: 50 | git clone https://github.com/jovans2/MXFaaS_Artifact.git 51 | 52 | ### Setting up the environment 53 | In the main directory of the repository, script setup.sh 54 | installs all the software dependencies: ./setup.sh. 55 | 56 | The script will first install Docker and set up all the required privileges. 57 | Then, it will install minikube, as a local Kubernetes, convenient for testing purposes. 58 | Finally, it will install KNative. 59 | The script will ask twice to choose one of multiple options. 60 | Both times choose the default value. 61 | 62 | Once the installation is completed, open a new terminal and execute the following command minikube tunnel. 63 | 64 | ### Downloading open-source production-level traces 65 | To reproduce our characterization study we need open source traces from the Azure's production workload. 66 | We need (i) [Azure Functions Blob Access Trace](https://github.com/Azure/AzurePublicDataset/blob/master/AzureFunctionsBlobDataset2020.md), and 67 | (ii) [Azure Functions Invocation Trace](https://github.com/Azure/AzurePublicDataset/blob/master/AzureFunctionsInvocationTrace2021.md). 68 | Download the traces in the characterization directory of our repository by running 69 | ./download-traces.sh. 70 | 71 | ### Installing application specific libraries 72 | To locally install all the libraries needed by our Python applications, execute 73 | ./install-libs.sh in the characterization directory. 74 | 75 | 76 | ## Experiment workflow 77 | 78 | ### Characterization study 79 | After the Azure's traces are in the characterization directory, to run all of our characterization experiments 80 | you need to execute ./characterize.sh. 81 | This script will first analyze the request burstiness and 82 | bursty storage access pattern 83 | from 84 | Azure's traces, 85 | then it will analyze the serverless benchmarks from 86 | functions directory. 87 | 88 | ### KNative prototype 89 | Running ./deploy.sh in the KNative_prototype directory deploys the target functions as MXContainers on the KNative. 90 | To test if all functions are successfully deployed, run 91 | kn service list. 92 | It should show all functions and their urls. 93 | After 2 minutes, the flag READY should be set to True. 94 | Each function can be invoked with curl . 95 | To test all functions at once, run python3 knative-all.py. 96 | 97 | 98 | Next, we need to test the performance of MXContainers. There are three loads we test: Low, Medium and High. All loads use the Poission distribution. 99 | The scripts to run theexperiments are located in the experiments directory. 100 | To run all the experiments at once execute python3 run-all.py. 101 | -------------------------------------------------------------------------------- /characterization/azure_blobs.py: -------------------------------------------------------------------------------- 1 | from csv import reader 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | per_blob_timestamps = {} 6 | with open('azurefunctions-accesses-2020.csv', 'r') as read_obj: 7 | csv_reader = reader(read_obj) 8 | # row0 row1 row2 row3 row4 row5 row6 row7 row8 row9 row10 9 | # 10 | # Timestamp AnonRegion AnonUserId AnonAppName AnonFunctionInvocationId AnonBlobName BlobType AnonBlobETag BlobBytes Read Write 11 | for row in csv_reader: 12 | blobName = row[5] 13 | invocationId = row[4] 14 | if blobName == "AnonBlobName": 15 | continue 16 | timestamp = int(row[0]) 17 | if blobName not in per_blob_timestamps: 18 | per_blob_timestamps[blobName] = [] 19 | per_blob_timestamps[blobName].append(timestamp) 20 | 21 | diff_times = [] 22 | overall = 0 23 | index = 0 24 | for elem in per_blob_timestamps: 25 | timestamp_list = per_blob_timestamps[elem] 26 | if len(timestamp_list) > 1: 27 | timestamp_list.sort() 28 | new_timestamp_list = [j-i for i, j in zip(timestamp_list[:-1], timestamp_list[1:])] 29 | for new_elem in new_timestamp_list: 30 | overall += 1 31 | if new_elem <= 500: 32 | diff_times.append(new_elem) 33 | 34 | fileWrite = open("diff_times.txt","w") 35 | fileWrite.write(str(diff_times)) 36 | fileWrite.close() 37 | 38 | from matplotlib.pyplot import figure 39 | 40 | diffFile = open("diff_times.txt","r") 41 | strDiff = diffFile.readline() 42 | res = strDiff.strip('][').split(', ') 43 | diff_times = [] 44 | for elem in res: 45 | diff_times.append(int(elem)) 46 | 47 | x = np.sort(diff_times) 48 | y = np.arange(len(diff_times)) / float(len(diff_times)) 49 | 50 | figure(figsize=(8, 3)) 51 | plt.xlabel('Interarrival time (ms)',fontsize=18) 52 | plt.ylabel('CDF',fontsize=18) 53 | 54 | plt.plot(x, y, color="black", linewidth=3) 55 | 56 | plt.xticks(fontsize=18) 57 | plt.yticks(fontsize=18) 58 | plt.tight_layout() 59 | plt.savefig("azure_blobs.png") -------------------------------------------------------------------------------- /characterization/azure_burstiness.py: -------------------------------------------------------------------------------- 1 | import statistics 2 | import collections 3 | import numpy as np 4 | from csv import reader 5 | import matplotlib.pyplot as plt 6 | from matplotlib.pyplot import figure 7 | 8 | 9 | azureFile = open("AzureFunctionsInvocationTraceForTwoWeeksJan2021.txt","r") 10 | 11 | figure(figsize=(8, 3)) 12 | plt.xscale('log',base=2) 13 | 14 | index = 0 15 | response_times = [] 16 | apps = set() 17 | funcPerApps = {} 18 | lineFile = [] 19 | for line in azureFile: 20 | index += 1 21 | if index == 1: 22 | continue 23 | lineFile.append(line) 24 | listLines = line.split(",") 25 | duration = float(listLines[3]) 26 | apps.add(listLines[0]) 27 | if listLines[0] not in funcPerApps: 28 | funcPerApps[listLines[0]] = set() 29 | funcPerApps[listLines[0]].add(listLines[1]) 30 | 31 | numbers = [] 32 | targetFunc = "" 33 | for key in funcPerApps: 34 | numbers.append(len(funcPerApps[key])) 35 | 36 | apps_per_minute = {} 37 | for line in lineFile: 38 | listLines = line.split(",") 39 | duration = float(listLines[3]) 40 | endTime = float(listLines[2]) 41 | startTime = endTime - duration 42 | startMin = int(startTime/60) 43 | appName = listLines[0] 44 | if appName not in apps_per_minute: 45 | apps_per_minute[appName] = [] 46 | apps_per_minute[appName].append(startMin) 47 | 48 | calls_per_minute = [] 49 | bucket_2 = 0 50 | bucket_5 = 0 51 | bucket_10 = 0 52 | bucket_20 = 0 53 | bucket_50 = 0 54 | bucket_100 = 0 55 | bucket_200 = 0 56 | bucket_500 = 0 57 | bucket_1000 = 0 58 | overallNum = 0 59 | for elem in apps_per_minute: 60 | listMinutes = apps_per_minute[elem] 61 | frequency = {} 62 | for minute in listMinutes: 63 | if minute not in frequency: 64 | frequency[minute] = 0 65 | frequency[minute] += 1 66 | for key in frequency: 67 | overallNum += 1 68 | if frequency[key] <= 500 and frequency[key] > 0: 69 | calls_per_minute.append(frequency[key]) 70 | if frequency[key] >= 2: 71 | bucket_2 += 1 72 | if frequency[key] >= 5: 73 | bucket_5 += 1 74 | if frequency[key] >= 10: 75 | bucket_10 += 1 76 | if frequency[key] >= 20: 77 | bucket_20 += 1 78 | if frequency[key] >= 50: 79 | bucket_50 += 1 80 | if frequency[key] >= 100: 81 | bucket_100 += 1 82 | if frequency[key] >= 200: 83 | bucket_200 += 1 84 | if frequency[key] >= 500: 85 | bucket_500 += 1 86 | if frequency[key] >= 1000: 87 | bucket_1000 += 1 88 | 89 | x = np.sort(calls_per_minute) 90 | y = np.arange(len(calls_per_minute)) / float(len(calls_per_minute)) 91 | 92 | plt.xlabel('Number of concurrent invocations of the same function', fontsize=18) 93 | plt.ylabel('CDF', fontsize=18) 94 | 95 | plt.plot(x, y, label="Azure", color="black", linewidth=3) 96 | 97 | 98 | plt.subplots_adjust(wspace=0.3, hspace=0.5) 99 | 100 | plt.legend(fontsize=18) 101 | 102 | plt.xticks(fontsize=18) 103 | plt.yticks(fontsize=18) 104 | plt.tight_layout() 105 | plt.savefig("azure_burstiness.png") -------------------------------------------------------------------------------- /characterization/characterize.sh: -------------------------------------------------------------------------------- 1 | python3 azure_blobs.py 2 | python3 azure_burstiness.py 3 | 4 | cd functions-idle-time 5 | 6 | python3 cnn_serving.py 7 | python3 img_res.py 8 | python3 img_rot.py 9 | python3 ml_train.py 10 | python3 vid_proc.py 11 | python3 web_serve.py 12 | python3 lr_serving.py 13 | python3 rnn_serving.py 14 | python3 create_ord.py 15 | python3 pay_ord.py 16 | 17 | cd ../functions-mem-footprint 18 | 19 | python3 cnn_serving.py 20 | python3 img_res.py 21 | python3 img_rot.py 22 | python3 ml_train.py 23 | python3 vid_proc.py 24 | python3 web_serve.py 25 | python3 lr_serving.py 26 | python3 rnn_serving.py 27 | python3 create_ord.py 28 | python3 pay_ord.py -------------------------------------------------------------------------------- /characterization/download-traces.sh: -------------------------------------------------------------------------------- 1 | wget https://azurecloudpublicdataset2.blob.core.windows.net/azurepublicdatasetv2/azurefunctions_dataset2020/azurefunctions-accesses-2020.csv.bz2 2 | bzip2 -d azurefunctions-accesses-2020.csv.bz2 3 | 4 | sudo apt-get install unrar-free 5 | wget https://github.com/Azure/AzurePublicDataset/raw/master/data/AzureFunctionsInvocationTraceForTwoWeeksJan2021.rar 6 | unrar e AzureFunctionsInvocationTraceForTwoWeeksJan2021.rar 7 | -------------------------------------------------------------------------------- /characterization/expected-output-funcs.txt: -------------------------------------------------------------------------------- 1 | --- CNN SERVING --- 2 | Handler time = 0.6823794841766357 3 | Idle time = 0.547412633895874 4 | --- IMG RES --- 5 | Handler time = 0.5704681873321533 6 | Idle time = 0.567685604095459 7 | --- IMG ROT --- 8 | Handler time = 0.9418916702270508 9 | Idle time = 0.8897056579589844 10 | --- ML TRAIN --- 11 | Handler time = 4.619851589202881 12 | Idle time = 2.2565834522247314 13 | --- VID PROC --- 14 | Handler time = 1.2818608283996582 15 | Idle time = 1.2577641010284424 16 | --- WEB SERVE --- 17 | Handler time = 0.5353057384490967 18 | Idle time = 0.5348479747772217 19 | --- LR-SERV --- 20 | Handler time = 0.5394821166992188 21 | Idle time = 0.5381710529327393 22 | --- RNN-SERV --- 23 | Handler time = 0.5411827564239502 24 | Idle time = 0.5302822589874268 25 | --- Create Ord --- 26 | Handler time = 0.5529963970184326 27 | Idle time = 0.5525338649749756 28 | --- Pay Ord --- 29 | Handler time = 0.5168249607086182 30 | Idle time = 0.5163419246673584 31 | --- CNN SERVING --- 32 | memory use 1: 184.25 33 | memory use 2: 214.15625 34 | memory use 3: 71.80859375 35 | --- IMG RES --- 36 | memory use 1: 40.2890625 37 | memory use 2: 3.921875 38 | memory use 3: 1.23828125 39 | --- IMG ROT --- 40 | memory use 1: 40.20703125 41 | memory use 2: 0.203125 42 | memory use 3: 3.5546875 43 | --- ML TRAIN --- 44 | memory use 1: 118.4921875 45 | memory use 2: 36.1796875 46 | memory use 3: 20.3515625 47 | --- VID PROC --- 48 | memory use 1: 73.5078125 49 | memory use 2: 3.65234375 50 | memory use 3: 30.89453125 51 | --- WEB SERVE --- 52 | memory use 1: 38.46484375 53 | memory use 2: 0.0703125 54 | memory use 3: 2.3359375 55 | --- LR SERVING --- 56 | memory use 1: 112.578125 57 | memory use 2: 42.05859375 58 | memory use 3: 0.18359375 59 | --- RNN SERVING --- 60 | memory use 1: 260.33203125 61 | memory use 2: 6.41796875 62 | memory use 3: 4.890625 63 | --- CREATE-ORD --- 64 | memory use 1: 38.33203125 65 | memory use 2: 0.06640625 66 | memory use 3: 2.5390625 67 | --- PAY-ORD --- 68 | memory use 1: 38.12109375 69 | memory use 2: 0.06640625 70 | memory use 3: 2.41015625 71 | -------------------------------------------------------------------------------- /characterization/functions-idle-time/__pycache__/rnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/__pycache__/rnn.cpython-36.pyc -------------------------------------------------------------------------------- /characterization/functions-idle-time/__pycache__/rnn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/__pycache__/rnn.cpython-38.pyc -------------------------------------------------------------------------------- /characterization/functions-idle-time/cnn_serving.py: -------------------------------------------------------------------------------- 1 | import time 2 | from mxnet import gluon 3 | import mxnet as mx 4 | from PIL import Image 5 | from azure.storage.blob import BlobServiceClient, BlobClient 6 | 7 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 8 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 9 | container_client = blob_service_client.get_container_client("artifacteval") 10 | 11 | net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, root = '/tmp/') 12 | net.hybridize(static_alloc=True, static_shape=True) 13 | lblPath = gluon.utils.download('http://data.mxnet.io/models/imagenet/synset.txt',path='/tmp/') 14 | with open(lblPath, 'r') as f: 15 | labels = [l.rstrip() for l in f] 16 | 17 | fileAppend = open("../funcs.txt", "a") 18 | 19 | def lambda_handler(): 20 | t1 = time.time() 21 | blobName = "img10.jpg" 22 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 23 | with open(blobName, "wb") as my_blob: 24 | t3 = time.time() 25 | download_stream = blob_client.download_blob() 26 | t4 = time.time() 27 | my_blob.write(download_stream.readall()) 28 | image = Image.open(blobName) 29 | image.save('tempImage.jpeg') 30 | 31 | # format image as (batch, RGB, width, height) 32 | img = mx.image.imread("tempImage.jpeg") 33 | img = mx.image.imresize(img, 224, 224) # resize 34 | img = mx.image.color_normalize(img.astype(dtype='float32')/255, 35 | mean=mx.nd.array([0.485, 0.456, 0.406]), 36 | std=mx.nd.array([0.229, 0.224, 0.225])) # normalize 37 | img = img.transpose((2, 0, 1)) # channel first 38 | img = img.expand_dims(axis=0) # batchify 39 | 40 | prob = net(img).softmax() # predict and normalize output 41 | idx = prob.topk(k=5)[0] # get top 5 result 42 | inference = '' 43 | for i in idx: 44 | i = int(i.asscalar()) 45 | # print('With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i])) 46 | inference = inference + 'With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i]) + '. ' 47 | t2 = time.time() 48 | print("--- CNN SERVING ---", file=fileAppend) 49 | print("Handler time = ", t2-t1, file=fileAppend) 50 | print("Idle time = ", t4-t3, file=fileAppend) 51 | return inference 52 | 53 | lambda_handler() -------------------------------------------------------------------------------- /characterization/functions-idle-time/create_ord.py: -------------------------------------------------------------------------------- 1 | import time 2 | import uuid 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | 5 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 6 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 7 | container_client = blob_service_client.get_container_client("artifacteval") 8 | 9 | fileAppend = open("../funcs.txt", "a") 10 | 11 | def main(params): 12 | t1 = time.time() 13 | blobName = "ordIDs.txt" 14 | ordID = 1 15 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 16 | with open(blobName, "wb") as my_blob: 17 | t3 = time.time() 18 | download_stream = blob_client.download_blob() 19 | my_blob.write(download_stream.readall()) 20 | t4 = time.time() 21 | orderF = open(blobName, "r") 22 | orderIDs = orderF.readlines() 23 | orderPrice = -1 24 | for line in orderIDs: 25 | lineStr = line.split(" ") 26 | if int(lineStr[0]) == ordID: 27 | orderPrice = float(lineStr[1]) 28 | break 29 | orderF.close() 30 | new_file = open("ordTemp.txt", "w") 31 | new_file.write(str(uuid.uuid1()) + " ---" + str(orderPrice)) 32 | new_file.close() 33 | fRead = open("ordTemp.txt","rb") 34 | value = fRead.read() 35 | blobName = "ordPrice.txt" 36 | t5 = time.time() 37 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 38 | blob_client.upload_blob(value, overwrite=True) 39 | t6 = time.time() 40 | t2 = time.time() 41 | print("--- Create Ord ---", file=fileAppend) 42 | print("Handler time = ", t2-t1, file=fileAppend) 43 | print("Idle time = ", t4+t6-t3-t5, file=fileAppend) 44 | return {"Order":"created"} 45 | 46 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/finalized_model.sav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/finalized_model.sav -------------------------------------------------------------------------------- /characterization/functions-idle-time/img0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img0.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img1.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img10.jpg -------------------------------------------------------------------------------- /characterization/functions-idle-time/img10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img10.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img11.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img12.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img13.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img14.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img15.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img16.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img17.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img18.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img19.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img2.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img20.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img3.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img4.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img5.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img6.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img7.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img8.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/img9.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/img_res.py: -------------------------------------------------------------------------------- 1 | import time 2 | from PIL import Image 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | 5 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 6 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 7 | container_client = blob_service_client.get_container_client("artifacteval") 8 | 9 | images = {} 10 | for indI in range(0, 20): 11 | blbName = "img" + str(indI) + ".png" 12 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blbName) 13 | with open(blbName, "wb") as my_blob: 14 | download_stream = blob_client.download_blob() 15 | my_blob.write(download_stream.readall()) 16 | images[blbName] = Image.open(blbName) 17 | 18 | fileAppend = open("../funcs.txt", "a") 19 | 20 | def main(params): 21 | t1 = time.time() 22 | blobName = "img20.png" 23 | if blobName not in images: 24 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 25 | with open(blobName, "wb") as my_blob: 26 | t3 = time.time() 27 | download_stream = blob_client.download_blob() 28 | t4 = time.time() 29 | my_blob.write(download_stream.readall()) 30 | image = Image.open(blobName) 31 | images[blobName] = image 32 | else: 33 | image = images[blobName] 34 | width, height = image.size 35 | # Setting the points for cropped image 36 | left = 4 37 | top = height / 5 38 | right = 100 39 | bottom = 3 * height / 5 40 | im1 = image.crop((left, top, right, bottom)) 41 | im1.save("newImage.png") 42 | 43 | fRead = open("newImage.png","rb") 44 | value = fRead.read() 45 | blobName = "img10_res.jpg" 46 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 47 | t5 = time.time() 48 | blob_client.upload_blob(value, overwrite=True) 49 | t6 = time.time() 50 | t2 = time.time() 51 | print("--- IMG RES ---", file=fileAppend) 52 | print("Handler time = ", t2-t1, file=fileAppend) 53 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 54 | return {"Image":"rotated"} 55 | 56 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/img_rot.py: -------------------------------------------------------------------------------- 1 | import time 2 | from PIL import Image 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | 5 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 6 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 7 | container_client = blob_service_client.get_container_client("artifacteval") 8 | 9 | fileAppend = open("../funcs.txt", "a") 10 | 11 | def main(params): 12 | t1 = time.time() 13 | blobName = "img10.jpg" 14 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 15 | with open(blobName, "wb") as my_blob: 16 | t3 = time.time() 17 | download_stream = blob_client.download_blob() 18 | t4 = time.time() 19 | my_blob.write(download_stream.readall()) 20 | image = Image.open(blobName) 21 | img = image.transpose(Image.ROTATE_90) 22 | img.save('newImage.jpeg') 23 | fRead = open("newImage.jpeg","rb") 24 | value = fRead.read() 25 | blobName = "img10_rot.jpg" 26 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 27 | t5 = time.time() 28 | blob_client.upload_blob(value, overwrite=True) 29 | t6 = time.time() 30 | t2 = time.time() 31 | print("--- IMG ROT ---", file=fileAppend) 32 | print("Handler time = ", t2-t1, file=fileAppend) 33 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 34 | return {"Image":"rotated"} 35 | 36 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/in.txt: -------------------------------------------------------------------------------- 1 | Hello world -------------------------------------------------------------------------------- /characterization/functions-idle-time/lr_model.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/lr_model.pk -------------------------------------------------------------------------------- /characterization/functions-idle-time/lr_serving.py: -------------------------------------------------------------------------------- 1 | import time 2 | from sklearn.feature_extraction.text import TfidfVectorizer 3 | import joblib 4 | import pandas as pd 5 | import re 6 | from azure.storage.blob import BlobServiceClient, BlobClient 7 | 8 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 9 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 10 | container_client = blob_service_client.get_container_client("artifacteval") 11 | 12 | cleanup_re = re.compile('[^a-z]+') 13 | 14 | def cleanup(sentence): 15 | sentence = sentence.lower() 16 | sentence = cleanup_re.sub(' ', sentence).strip() 17 | return sentence 18 | 19 | blobName = "minioDataset.csv" 20 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 21 | with open(blobName, "wb") as my_blob: 22 | t3 = time.time() 23 | download_stream = blob_client.download_blob() 24 | t4 = time.time() 25 | my_blob.write(download_stream.readall()) 26 | dataset = pd.read_csv('minioDataset.csv') 27 | 28 | df_input = pd.DataFrame() 29 | dataset['train'] = dataset['Text'].apply(cleanup) 30 | tfidf_vect = TfidfVectorizer(min_df=100).fit(dataset['train']) 31 | x = 'The ambiance is magical. The food and service was nice! The lobster and cheese was to die for and our steaks were cooked perfectly. ' 32 | df_input['x'] = [x] 33 | df_input['x'] = df_input['x'].apply(cleanup) 34 | X = tfidf_vect.transform(df_input['x']) 35 | 36 | x = 'My favorite cafe. I like going there on weekends, always taking a cafe and some of their pastry before visiting my parents. ' 37 | df_input['x'] = [x] 38 | df_input['x'] = df_input['x'].apply(cleanup) 39 | X2 = tfidf_vect.transform(df_input['x']) 40 | 41 | blobName = "lr_model.pk" 42 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 43 | with open(blobName, "wb") as my_blob: 44 | t3 = time.time() 45 | download_stream = blob_client.download_blob() 46 | t4 = time.time() 47 | my_blob.write(download_stream.readall()) 48 | model = joblib.load('lr_model.pk') 49 | print('Model is ready') 50 | 51 | fileAppend = open("../funcs.txt", "a") 52 | 53 | def main(params): 54 | t1 = time.time() 55 | blobName = "in.txt" 56 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 57 | with open(blobName, "wb") as my_blob: 58 | t3 = time.time() 59 | download_stream = blob_client.download_blob() 60 | t4 = time.time() 61 | my_blob.write(download_stream.readall()) 62 | 63 | y = model.predict(X) 64 | 65 | value = y 66 | blobName = "out.txt" 67 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 68 | t5 = time.time() 69 | blob_client.upload_blob(value, overwrite=True) 70 | t6 = time.time() 71 | t2 = time.time() 72 | print("--- LR-SERV ---", file=fileAppend) 73 | print("Handler time = ", t2-t1, file=fileAppend) 74 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 75 | 76 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/ml_train.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pickle 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | from sklearn.feature_extraction.text import TfidfVectorizer 5 | from sklearn.linear_model import LogisticRegression 6 | import pandas as pd 7 | import re 8 | import warnings 9 | 10 | warnings.filterwarnings("ignore") 11 | 12 | cleanup_re = re.compile('[^a-z]+') 13 | 14 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 15 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 16 | container_client = blob_service_client.get_container_client("artifacteval") 17 | 18 | def cleanup(sentence): 19 | sentence = sentence.lower() 20 | sentence = cleanup_re.sub(' ', sentence).strip() 21 | return sentence 22 | 23 | df_name = 'minioDataset.csv' 24 | df_path = 'pulled_' + df_name 25 | fileAppend = open("../funcs.txt", "a") 26 | 27 | def serve(): 28 | t1 = time.time() 29 | 30 | blobName = df_name 31 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 32 | with open(df_path, "wb") as my_blob: 33 | t3 = time.time() 34 | download_stream = blob_client.download_blob() 35 | t4 = time.time() 36 | my_blob.write(download_stream.readall()) 37 | df = pd.read_csv(df_path) 38 | df['train'] = df['Text'].apply(cleanup) 39 | 40 | model = LogisticRegression(max_iter=10) 41 | tfidf_vector = TfidfVectorizer(min_df=1000).fit(df['train']) 42 | train = tfidf_vector.transform(df['train']) 43 | model.fit(train, df['Score']) 44 | 45 | filename = 'finalized_model.sav' 46 | pickle.dump(model, open(filename, 'wb')) 47 | 48 | fRead = open("finalized_model.sav","rb") 49 | value = fRead.read() 50 | blobName = "finalized_model.sav" 51 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 52 | t5 = time.time() 53 | blob_client.upload_blob(value, overwrite=True) 54 | t6 = time.time() 55 | 56 | t2 = time.time() 57 | print("--- ML TRAIN ---", file=fileAppend) 58 | print("Handler time = ", t2-t1, file=fileAppend) 59 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 60 | return {"Ok":"done"} 61 | 62 | if __name__ == '__main__': 63 | serve() -------------------------------------------------------------------------------- /characterization/functions-idle-time/money.txt: -------------------------------------------------------------------------------- 1 | -14638.8546 -------------------------------------------------------------------------------- /characterization/functions-idle-time/moneyTemp.txt: -------------------------------------------------------------------------------- 1 | -14738.8546 -------------------------------------------------------------------------------- /characterization/functions-idle-time/newImage.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/newImage.jpeg -------------------------------------------------------------------------------- /characterization/functions-idle-time/newImage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/newImage.png -------------------------------------------------------------------------------- /characterization/functions-idle-time/ordIDs.txt: -------------------------------------------------------------------------------- 1 | 1 1000.014 2 | 2 3531.52 3 | 3 53646775.25 4 | 4 35641.1 5 | 5 25625.1 -------------------------------------------------------------------------------- /characterization/functions-idle-time/ordTemp.txt: -------------------------------------------------------------------------------- 1 | 163af545-cf89-11ed-aad3-1c34da7240d6 ---1000.014 -------------------------------------------------------------------------------- /characterization/functions-idle-time/output.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/output.avi -------------------------------------------------------------------------------- /characterization/functions-idle-time/pay_ord.py: -------------------------------------------------------------------------------- 1 | import time 2 | from azure.storage.blob import BlobServiceClient, BlobClient 3 | 4 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 5 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 6 | container_client = blob_service_client.get_container_client("artifacteval") 7 | 8 | fileAppend = open("../funcs.txt", "a") 9 | 10 | def main(params): 11 | t1 = time.time() 12 | blobName = "money.txt" 13 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 14 | with open(blobName, "wb") as my_blob: 15 | t3 = time.time() 16 | download_stream = blob_client.download_blob() 17 | my_blob.write(download_stream.readall()) 18 | t4 = time.time() 19 | moneyF = open(blobName, "r") 20 | money = float(moneyF.readline()) 21 | moneyF.close() 22 | money -= 100.0 23 | new_file = open("moneyTemp.txt", "w") 24 | new_file.write(str(money)) 25 | new_file.close() 26 | fRead = open("moneyTemp.txt","rb") 27 | value = fRead.read() 28 | blobName = "money.txt" 29 | t5 = time.time() 30 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 31 | blob_client.upload_blob(value, overwrite=True) 32 | t6 = time.time() 33 | t2 = time.time() 34 | print("--- Pay ord ---", file=fileAppend) 35 | print("Handler time = ", t2-t1, file=fileAppend) 36 | print("Idle time = ", t4+t6-t3-t5, file=fileAppend) 37 | return {"Order":"Payed"} 38 | 39 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/rnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | 6 | class RNN(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): 8 | super(RNN, self).__init__() 9 | self.hidden_size = hidden_size 10 | 11 | self.all_categories = all_categories 12 | self.n_categories = n_categories 13 | self.all_letters = all_letters 14 | self.n_letters = n_letters 15 | 16 | self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) 17 | self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) 18 | self.o2o = nn.Linear(hidden_size + output_size, output_size) 19 | self.dropout = nn.Dropout(0.1) 20 | self.softmax = nn.LogSoftmax(dim=1) 21 | 22 | def forward(self, category, input_tensor, hidden): 23 | input_combined = torch.cat((category, input_tensor, hidden), 1) 24 | hidden = self.i2h(input_combined) 25 | output = self.i2o(input_combined) 26 | output_combined = torch.cat((hidden, output), 1) 27 | output = self.o2o(output_combined) 28 | output = self.dropout(output) 29 | output = self.softmax(output) 30 | return output, hidden 31 | 32 | def init_hidden(self): 33 | return Variable(torch.zeros(1, self.hidden_size)) 34 | 35 | @staticmethod 36 | def gen_input_tensor(all_letters, n_letters, line): 37 | tensor = torch.zeros(len(line), 1, n_letters) 38 | for li in range(len(line)): 39 | letter = line[li] 40 | tensor[li][0][all_letters.find(letter)] = 1 41 | return tensor 42 | 43 | @staticmethod 44 | def gen_category_tensor(all_categories, n_categories, category): 45 | li = all_categories.index(category) 46 | tensor = torch.zeros(1, n_categories) 47 | tensor[0][li] = 1 48 | return tensor 49 | 50 | # Sample from a category and starting letter 51 | def sample(self, category, start_letter='A'): 52 | category_tensor = Variable(self.gen_category_tensor(self.all_categories, self.n_categories, category)) 53 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) 54 | hidden = self.init_hidden() 55 | 56 | output_name = start_letter 57 | 58 | max_length = 20 59 | for i in range(max_length): 60 | output, hidden = self.forward(category_tensor, input_tensor[0], hidden) 61 | topv, topi = output.data.topk(1) 62 | topi = topi[0][0] 63 | 64 | if topi == self.n_letters - 1: 65 | break 66 | else: 67 | letter = self.all_letters[topi] 68 | output_name += letter 69 | 70 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) 71 | 72 | return output_name 73 | 74 | # Get multiple samples from one category and multiple starting letters 75 | def samples(self, category, start_letters='ABC'): 76 | for start_letter in start_letters: 77 | yield self.sample(category, start_letter) 78 | -------------------------------------------------------------------------------- /characterization/functions-idle-time/rnn_model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/rnn_model.pth -------------------------------------------------------------------------------- /characterization/functions-idle-time/rnn_params.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/rnn_params.pkl -------------------------------------------------------------------------------- /characterization/functions-idle-time/rnn_serving.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import torch 3 | import rnn 4 | import io 5 | import string 6 | import time 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | 9 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 10 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 11 | container_client = blob_service_client.get_container_client("artifacteval") 12 | 13 | torch.set_num_threads(1) 14 | language = 'Scottish' 15 | language2 = 'Russian' 16 | start_letters = 'ABCDEFGHIJKLMNOP' 17 | start_letters2 = 'QRSTUVWXYZABCDEF' 18 | 19 | blobName = "rnn_params.pkl" 20 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 21 | with open(blobName, "wb") as my_blob: 22 | download_stream = blob_client.download_blob() 23 | my_blob.write(download_stream.readall()) 24 | my_blob = open(blobName, "rb") 25 | params = pickle.load(my_blob) 26 | 27 | all_categories =['French', 'Czech', 'Dutch', 'Polish', 'Scottish', 'Chinese', 'English', 'Italian', 'Portuguese', 'Japanese', 'German', 'Russian', 'Korean', 'Arabic', 'Greek', 'Vietnamese', 'Spanish', 'Irish'] 28 | n_categories = len(all_categories) 29 | all_letters = string.ascii_letters + " .,;'-" 30 | n_letters = len(all_letters) + 1 31 | 32 | rnn_model = rnn.RNN(n_letters, 128, n_letters, all_categories, n_categories, all_letters, n_letters) 33 | blobName = "rnn_model.pth" 34 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 35 | with open(blobName, "wb") as my_blob: 36 | download_stream = blob_client.download_blob() 37 | my_blob.write(download_stream.readall()) 38 | my_blob = open(blobName, "rb") 39 | buffer = io.BytesIO(my_blob.read()) 40 | rnn_model.load_state_dict(torch.load(buffer)) 41 | rnn_model.eval() 42 | 43 | fileAppend = open("../funcs.txt", "a") 44 | 45 | def main(params): 46 | t1 = time.time() 47 | blobName = "in.txt" 48 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 49 | with open(blobName, "wb") as my_blob: 50 | t3 = time.time() 51 | download_stream = blob_client.download_blob() 52 | t4 = time.time() 53 | my_blob.write(download_stream.readall()) 54 | 55 | output_names = list(rnn_model.samples(language, start_letters)) 56 | value = str(output_names) 57 | blobName = "out.txt" 58 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 59 | t5 = time.time() 60 | blob_client.upload_blob(value, overwrite=True) 61 | t6 = time.time() 62 | t2 = time.time() 63 | print("--- RNN-SERV ---", file=fileAppend) 64 | print("Handler time = ", t2-t1, file=fileAppend) 65 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 66 | 67 | return {"Prediction":"correct"} 68 | 69 | main({"func":"test"}) -------------------------------------------------------------------------------- /characterization/functions-idle-time/tempImage.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/tempImage.jpeg -------------------------------------------------------------------------------- /characterization/functions-idle-time/vid1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-idle-time/vid1.mp4 -------------------------------------------------------------------------------- /characterization/functions-idle-time/vid_proc.py: -------------------------------------------------------------------------------- 1 | import time 2 | import cv2 3 | from azure.storage.blob import BlobServiceClient, BlobClient 4 | 5 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 6 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 7 | container_client = blob_service_client.get_container_client("artifacteval") 8 | 9 | tmp = "/tmp/" 10 | 11 | vid_name = 'vid1.mp4' 12 | 13 | result_file_path = tmp + vid_name 14 | 15 | fileAppend = open("../funcs.txt", "a") 16 | 17 | def video_processing(): 18 | t1 = time.time() 19 | blobName = "vid1.mp4" 20 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 21 | with open(vid_name, "wb") as my_blob: 22 | t3 = time.time() 23 | download_stream = blob_client.download_blob() 24 | t4 = time.time() 25 | my_blob.write(download_stream.readall()) 26 | video = cv2.VideoCapture(vid_name) 27 | 28 | width = int(video.get(3)) 29 | height = int(video.get(4)) 30 | fourcc = cv2.VideoWriter_fourcc(*'MPEG') 31 | out = cv2.VideoWriter('output.avi',fourcc, 20.0, (width, height)) 32 | 33 | while video.isOpened(): 34 | ret, frame = video.read() 35 | if ret: 36 | gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 37 | tmp_file_path = tmp+'tmp.jpg' 38 | cv2.imwrite(tmp_file_path, gray_frame) 39 | gray_frame = cv2.imread(tmp_file_path) 40 | out.write(gray_frame) 41 | # break 42 | else: 43 | break 44 | 45 | fRead = open("output.avi","rb") 46 | value = fRead.read() 47 | blobName = "output.avi" 48 | t5 = time.time() 49 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 50 | blob_client.upload_blob(value, overwrite=True) 51 | t6 = time.time() 52 | 53 | video.release() 54 | out.release() 55 | t2 = time.time() 56 | print("--- VID PROC ---", file=fileAppend) 57 | print("Handler time = ", t2-t1, file=fileAppend) 58 | print("Idle time = ", t4-t3+t6-t5, file=fileAppend) 59 | return 60 | 61 | def serve(): 62 | video_processing() 63 | 64 | if __name__ == '__main__': 65 | serve() 66 | -------------------------------------------------------------------------------- /characterization/functions-idle-time/web_serve.py: -------------------------------------------------------------------------------- 1 | import time 2 | from azure.storage.blob import BlobServiceClient, BlobClient 3 | 4 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 5 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 6 | container_client = blob_service_client.get_container_client("artifacteval") 7 | 8 | fileAppend = open("../funcs.txt", "a") 9 | 10 | def main(params): 11 | t1 = time.time() 12 | blobName = "money.txt" 13 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 14 | with open(blobName, "wb") as my_blob: 15 | t3 = time.time() 16 | download_stream = blob_client.download_blob() 17 | my_blob.write(download_stream.readall()) 18 | t4 = time.time() 19 | moneyF = open(blobName, "r") 20 | money = float(moneyF.readline()) 21 | moneyF.close() 22 | money -= 100.0 23 | new_file = open("moneyTemp.txt", "w") 24 | new_file.write(str(money)) 25 | new_file.close() 26 | fRead = open("moneyTemp.txt","rb") 27 | value = fRead.read() 28 | blobName = "money.txt" 29 | t5 = time.time() 30 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 31 | blob_client.upload_blob(value, overwrite=True) 32 | t6 = time.time() 33 | t2 = time.time() 34 | print("--- WEB SERVE ---", file=fileAppend) 35 | print("Handler time = ", t2-t1, file=fileAppend) 36 | print("Idle time = ", t4+t6-t3-t5, file=fileAppend) 37 | return {"Money":"withdrawn"} 38 | 39 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/__pycache__/rnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/__pycache__/rnn.cpython-36.pyc -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/cnn_serving.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from mxnet import gluon 7 | import mxnet as mx 8 | from PIL import Image 9 | from azure.storage.blob import BlobServiceClient, BlobClient 10 | 11 | fileAppend = open("../funcs.txt", "a") 12 | 13 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 14 | print("--- CNN SERVING ---", file=fileAppend) 15 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 16 | 17 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 18 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 19 | container_client = blob_service_client.get_container_client("artifacteval") 20 | 21 | net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, root = '/tmp/') 22 | net.hybridize(static_alloc=True, static_shape=True) 23 | lblPath = gluon.utils.download('http://data.mxnet.io/models/imagenet/synset.txt',path='/tmp/') 24 | with open(lblPath, 'r') as f: 25 | labels = [l.rstrip() for l in f] 26 | 27 | memoryUse_old = memoryUse 28 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 29 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 30 | 31 | def lambda_handler(): 32 | blobName = "img10.jpg" 33 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 34 | with open(blobName, "wb") as my_blob: 35 | download_stream = blob_client.download_blob() 36 | my_blob.write(download_stream.readall()) 37 | image = Image.open(blobName) 38 | image.save('tempImage.jpeg') 39 | 40 | # format image as (batch, RGB, width, height) 41 | img = mx.image.imread("tempImage.jpeg") 42 | img = mx.image.imresize(img, 224, 224) # resize 43 | img = mx.image.color_normalize(img.astype(dtype='float32')/255, 44 | mean=mx.nd.array([0.485, 0.456, 0.406]), 45 | std=mx.nd.array([0.229, 0.224, 0.225])) # normalize 46 | img = img.transpose((2, 0, 1)) # channel first 47 | img = img.expand_dims(axis=0) # batchify 48 | 49 | prob = net(img).softmax() # predict and normalize output 50 | idx = prob.topk(k=5)[0] # get top 5 result 51 | inference = '' 52 | for i in idx: 53 | i = int(i.asscalar()) 54 | # print('With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i])) 55 | inference = inference + 'With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i]) + '. ' 56 | return inference 57 | 58 | lambda_handler() 59 | memoryUse_old = memoryUse 60 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 61 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/create_ord.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | import uuid 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | fileAppend = open("../funcs.txt", "a") 9 | 10 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 11 | print("--- CREATE-ORD ---", file=fileAppend) 12 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 13 | 14 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 15 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 16 | container_client = blob_service_client.get_container_client("artifacteval") 17 | 18 | memoryUse_old = memoryUse 19 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 20 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 21 | 22 | def main(params): 23 | blobName = "ordIDs.txt" 24 | ordID = 1 25 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 26 | with open(blobName, "wb") as my_blob: 27 | download_stream = blob_client.download_blob() 28 | my_blob.write(download_stream.readall()) 29 | orderF = open(blobName, "r") 30 | orderIDs = orderF.readlines() 31 | orderPrice = -1 32 | for line in orderIDs: 33 | lineStr = line.split(" ") 34 | if int(lineStr[0]) == ordID: 35 | orderPrice = float(lineStr[1]) 36 | break 37 | orderF.close() 38 | new_file = open("ordTemp.txt", "w") 39 | new_file.write(str(uuid.uuid1()) + " ---" + str(orderPrice)) 40 | new_file.close() 41 | fRead = open("ordTemp.txt","rb") 42 | value = fRead.read() 43 | blobName = "ordPrice.txt" 44 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 45 | blob_client.upload_blob(value, overwrite=True) 46 | return {"Order":"created"} 47 | 48 | main({"test":"func"}) 49 | memoryUse_old = memoryUse 50 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 51 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/finalized_model.sav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/finalized_model.sav -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img0.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img1.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img10.jpg -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img10.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img11.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img12.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img13.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img14.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img15.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img16.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img17.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img18.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img19.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img2.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img20.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img3.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img4.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img5.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img6.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img7.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img8.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/img9.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img_res.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from PIL import Image 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | 9 | fileAppend = open("../funcs.txt", "a") 10 | 11 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 12 | print("--- IMG RES ---", file=fileAppend) 13 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 14 | 15 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 16 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 17 | container_client = blob_service_client.get_container_client("artifacteval") 18 | 19 | images = {} 20 | for indI in range(0, 20): 21 | blbName = "img" + str(indI) + ".png" 22 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blbName) 23 | with open(blbName, "wb") as my_blob: 24 | download_stream = blob_client.download_blob() 25 | my_blob.write(download_stream.readall()) 26 | images[blbName] = Image.open(blbName) 27 | 28 | memoryUse_old = memoryUse 29 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 30 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 31 | 32 | def main(params): 33 | blobName = "img20.png" 34 | if blobName not in images: 35 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 36 | with open(blobName, "wb") as my_blob: 37 | download_stream = blob_client.download_blob() 38 | my_blob.write(download_stream.readall()) 39 | image = Image.open(blobName) 40 | images[blobName] = image 41 | else: 42 | image = images[blobName] 43 | width, height = image.size 44 | # Setting the points for cropped image 45 | left = 4 46 | top = height / 5 47 | right = 100 48 | bottom = 3 * height / 5 49 | im1 = image.crop((left, top, right, bottom)) 50 | im1.save("newImage.png") 51 | 52 | fRead = open("newImage.png","rb") 53 | value = fRead.read() 54 | blobName = "img10_res.png" 55 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 56 | blob_client.upload_blob(value, overwrite=True) 57 | return {"Image":"rotated"} 58 | 59 | main({"test":"func"}) 60 | memoryUse_old = memoryUse 61 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 62 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/img_rot.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from PIL import Image 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | 9 | fileAppend = open("../funcs.txt", "a") 10 | 11 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 12 | print("--- IMG ROT ---", file=fileAppend) 13 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 14 | 15 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 16 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 17 | container_client = blob_service_client.get_container_client("artifacteval") 18 | 19 | memoryUse_old = memoryUse 20 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 21 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 22 | 23 | def main(params): 24 | blobName = "img10.jpg" 25 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 26 | with open(blobName, "wb") as my_blob: 27 | download_stream = blob_client.download_blob() 28 | my_blob.write(download_stream.readall()) 29 | image = Image.open(blobName) 30 | img = image.transpose(Image.ROTATE_90) 31 | img.save('newImage.jpeg') 32 | fRead = open("newImage.jpeg","rb") 33 | value = fRead.read() 34 | blobName = "img10_rot.jpg" 35 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 36 | blob_client.upload_blob(value, overwrite=True) 37 | return {"Image":"rotated"} 38 | 39 | main({"test":"func"}) 40 | memoryUse_old = memoryUse 41 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 42 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/in.txt: -------------------------------------------------------------------------------- 1 | Hello world -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/lr_model.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/lr_model.pk -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/lr_serving.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from sklearn.feature_extraction.text import TfidfVectorizer 7 | import joblib 8 | import pandas as pd 9 | import re 10 | from azure.storage.blob import BlobServiceClient, BlobClient 11 | 12 | fileAppend = open("../funcs.txt", "a") 13 | 14 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 15 | print("--- LR SERVING ---", file=fileAppend) 16 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 17 | 18 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 19 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 20 | container_client = blob_service_client.get_container_client("artifacteval") 21 | 22 | cleanup_re = re.compile('[^a-z]+') 23 | 24 | def cleanup(sentence): 25 | sentence = sentence.lower() 26 | sentence = cleanup_re.sub(' ', sentence).strip() 27 | return sentence 28 | 29 | blobName = "minioDataset.csv" 30 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 31 | with open(blobName, "wb") as my_blob: 32 | download_stream = blob_client.download_blob() 33 | my_blob.write(download_stream.readall()) 34 | dataset = pd.read_csv('minioDataset.csv') 35 | 36 | df_input = pd.DataFrame() 37 | dataset['train'] = dataset['Text'].apply(cleanup) 38 | tfidf_vect = TfidfVectorizer(min_df=100).fit(dataset['train']) 39 | x = 'The ambiance is magical. The food and service was nice! The lobster and cheese was to die for and our steaks were cooked perfectly. ' 40 | df_input['x'] = [x] 41 | df_input['x'] = df_input['x'].apply(cleanup) 42 | X = tfidf_vect.transform(df_input['x']) 43 | 44 | x = 'My favorite cafe. I like going there on weekends, always taking a cafe and some of their pastry before visiting my parents. ' 45 | df_input['x'] = [x] 46 | df_input['x'] = df_input['x'].apply(cleanup) 47 | X2 = tfidf_vect.transform(df_input['x']) 48 | 49 | blobName = "lr_model.pk" 50 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 51 | with open(blobName, "wb") as my_blob: 52 | download_stream = blob_client.download_blob() 53 | my_blob.write(download_stream.readall()) 54 | model = joblib.load('lr_model.pk') 55 | print('Model is ready') 56 | 57 | memoryUse_old = memoryUse 58 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 59 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 60 | 61 | def main(params): 62 | blobName = "in.txt" 63 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 64 | with open(blobName, "wb") as my_blob: 65 | download_stream = blob_client.download_blob() 66 | my_blob.write(download_stream.readall()) 67 | 68 | y = model.predict(X) 69 | 70 | value = y 71 | blobName = "out.txt" 72 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 73 | blob_client.upload_blob(value, overwrite=True) 74 | 75 | main({"test":"func"}) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/ml_train.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | import pickle 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | from sklearn.feature_extraction.text import TfidfVectorizer 9 | from sklearn.linear_model import LogisticRegression 10 | import pandas as pd 11 | import re 12 | import warnings 13 | 14 | warnings.filterwarnings("ignore") 15 | 16 | fileAppend = open("../funcs.txt", "a") 17 | 18 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 19 | print("--- ML TRAIN ---", file=fileAppend) 20 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 21 | 22 | cleanup_re = re.compile('[^a-z]+') 23 | 24 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 25 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 26 | container_client = blob_service_client.get_container_client("artifacteval") 27 | 28 | def cleanup(sentence): 29 | sentence = sentence.lower() 30 | sentence = cleanup_re.sub(' ', sentence).strip() 31 | return sentence 32 | 33 | df_name = 'minioDataset.csv' 34 | df_path = 'pulled_' + df_name 35 | 36 | blobName = df_name 37 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 38 | with open(df_path, "wb") as my_blob: 39 | download_stream = blob_client.download_blob() 40 | my_blob.write(download_stream.readall()) 41 | df = pd.read_csv(df_path) 42 | df['train'] = df['Text'].apply(cleanup) 43 | 44 | memoryUse_old = memoryUse 45 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 46 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 47 | 48 | def serve(): 49 | 50 | model = LogisticRegression(max_iter=10) 51 | tfidf_vector = TfidfVectorizer(min_df=1000).fit(df['train']) 52 | train = tfidf_vector.transform(df['train']) 53 | model.fit(train, df['Score']) 54 | 55 | filename = 'finalized_model.sav' 56 | pickle.dump(model, open(filename, 'wb')) 57 | 58 | fRead = open("finalized_model.sav","rb") 59 | value = fRead.read() 60 | blobName = "finalized_model.sav" 61 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 62 | blob_client.upload_blob(value, overwrite=True) 63 | 64 | return {"Ok":"done"} 65 | 66 | if __name__ == '__main__': 67 | serve() 68 | memoryUse_old = memoryUse 69 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 70 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/money.txt: -------------------------------------------------------------------------------- 1 | -14838.8546 -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/moneyTemp.txt: -------------------------------------------------------------------------------- 1 | -14938.8546 -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/newImage.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/newImage.jpeg -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/newImage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/newImage.png -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/ordIDs.txt: -------------------------------------------------------------------------------- 1 | 1 1000.014 2 | 2 3531.52 3 | 3 53646775.25 4 | 4 35641.1 5 | 5 25625.1 -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/ordTemp.txt: -------------------------------------------------------------------------------- 1 | 27366dbb-cf89-11ed-aad3-1c34da7240d6 ---1000.014 -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/output.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/output.avi -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/pay_ord.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from azure.storage.blob import BlobServiceClient, BlobClient 7 | 8 | fileAppend = open("../funcs.txt", "a") 9 | 10 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 11 | print("--- CREATE-ORD ---", file=fileAppend) 12 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 13 | 14 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 15 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 16 | container_client = blob_service_client.get_container_client("artifacteval") 17 | 18 | memoryUse_old = memoryUse 19 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 20 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 21 | 22 | def main(params): 23 | blobName = "money.txt" 24 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 25 | with open(blobName, "wb") as my_blob: 26 | download_stream = blob_client.download_blob() 27 | my_blob.write(download_stream.readall()) 28 | moneyF = open(blobName, "r") 29 | money = float(moneyF.readline()) 30 | moneyF.close() 31 | money -= 100.0 32 | new_file = open("moneyTemp.txt", "w") 33 | new_file.write(str(money)) 34 | new_file.close() 35 | fRead = open("moneyTemp.txt","rb") 36 | value = fRead.read() 37 | blobName = "money.txt" 38 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 39 | blob_client.upload_blob(value, overwrite=True) 40 | return {"Order":"Payed"} 41 | 42 | main({"test":"func"}) 43 | memoryUse_old = memoryUse 44 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 45 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/rnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | 6 | class RNN(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): 8 | super(RNN, self).__init__() 9 | self.hidden_size = hidden_size 10 | 11 | self.all_categories = all_categories 12 | self.n_categories = n_categories 13 | self.all_letters = all_letters 14 | self.n_letters = n_letters 15 | 16 | self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) 17 | self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) 18 | self.o2o = nn.Linear(hidden_size + output_size, output_size) 19 | self.dropout = nn.Dropout(0.1) 20 | self.softmax = nn.LogSoftmax(dim=1) 21 | 22 | def forward(self, category, input_tensor, hidden): 23 | input_combined = torch.cat((category, input_tensor, hidden), 1) 24 | hidden = self.i2h(input_combined) 25 | output = self.i2o(input_combined) 26 | output_combined = torch.cat((hidden, output), 1) 27 | output = self.o2o(output_combined) 28 | output = self.dropout(output) 29 | output = self.softmax(output) 30 | return output, hidden 31 | 32 | def init_hidden(self): 33 | return Variable(torch.zeros(1, self.hidden_size)) 34 | 35 | @staticmethod 36 | def gen_input_tensor(all_letters, n_letters, line): 37 | tensor = torch.zeros(len(line), 1, n_letters) 38 | for li in range(len(line)): 39 | letter = line[li] 40 | tensor[li][0][all_letters.find(letter)] = 1 41 | return tensor 42 | 43 | @staticmethod 44 | def gen_category_tensor(all_categories, n_categories, category): 45 | li = all_categories.index(category) 46 | tensor = torch.zeros(1, n_categories) 47 | tensor[0][li] = 1 48 | return tensor 49 | 50 | # Sample from a category and starting letter 51 | def sample(self, category, start_letter='A'): 52 | category_tensor = Variable(self.gen_category_tensor(self.all_categories, self.n_categories, category)) 53 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) 54 | hidden = self.init_hidden() 55 | 56 | output_name = start_letter 57 | 58 | max_length = 20 59 | for i in range(max_length): 60 | output, hidden = self.forward(category_tensor, input_tensor[0], hidden) 61 | topv, topi = output.data.topk(1) 62 | topi = topi[0][0] 63 | 64 | if topi == self.n_letters - 1: 65 | break 66 | else: 67 | letter = self.all_letters[topi] 68 | output_name += letter 69 | 70 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) 71 | 72 | return output_name 73 | 74 | # Get multiple samples from one category and multiple starting letters 75 | def samples(self, category, start_letters='ABC'): 76 | for start_letter in start_letters: 77 | yield self.sample(category, start_letter) 78 | -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/rnn_model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/rnn_model.pth -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/rnn_params.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/rnn_params.pkl -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/rnn_serving.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | import pickle 7 | import torch 8 | import rnn 9 | import io 10 | import string 11 | from azure.storage.blob import BlobServiceClient, BlobClient 12 | 13 | fileAppend = open("../funcs.txt", "a") 14 | 15 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 16 | print("--- RNN SERVING ---", file=fileAppend) 17 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 18 | 19 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 20 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 21 | container_client = blob_service_client.get_container_client("artifacteval") 22 | 23 | torch.set_num_threads(1) 24 | language = 'Scottish' 25 | language2 = 'Russian' 26 | start_letters = 'ABCDEFGHIJKLMNOP' 27 | start_letters2 = 'QRSTUVWXYZABCDEF' 28 | 29 | blobName = "rnn_params.pkl" 30 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 31 | with open(blobName, "wb") as my_blob: 32 | download_stream = blob_client.download_blob() 33 | my_blob.write(download_stream.readall()) 34 | my_blob = open(blobName, "rb") 35 | params = pickle.load(my_blob) 36 | 37 | all_categories =['French', 'Czech', 'Dutch', 'Polish', 'Scottish', 'Chinese', 'English', 'Italian', 'Portuguese', 'Japanese', 'German', 'Russian', 'Korean', 'Arabic', 'Greek', 'Vietnamese', 'Spanish', 'Irish'] 38 | n_categories = len(all_categories) 39 | all_letters = string.ascii_letters + " .,;'-" 40 | n_letters = len(all_letters) + 1 41 | 42 | rnn_model = rnn.RNN(n_letters, 128, n_letters, all_categories, n_categories, all_letters, n_letters) 43 | blobName = "rnn_model.pth" 44 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 45 | with open(blobName, "wb") as my_blob: 46 | download_stream = blob_client.download_blob() 47 | my_blob.write(download_stream.readall()) 48 | my_blob = open(blobName, "rb") 49 | buffer = io.BytesIO(my_blob.read()) 50 | rnn_model.load_state_dict(torch.load(buffer)) 51 | rnn_model.eval() 52 | 53 | memoryUse_old = memoryUse 54 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 55 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 56 | 57 | def main(params): 58 | blobName = "in.txt" 59 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 60 | with open(blobName, "wb") as my_blob: 61 | download_stream = blob_client.download_blob() 62 | my_blob.write(download_stream.readall()) 63 | 64 | output_names = list(rnn_model.samples(language, start_letters)) 65 | value = str(output_names) 66 | blobName = "out.txt" 67 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 68 | blob_client.upload_blob(value, overwrite=True) 69 | 70 | return {"Prediction":"correct"} 71 | 72 | main({"func":"test"}) 73 | memoryUse_old = memoryUse 74 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 75 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/tempImage.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/tempImage.jpeg -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/vid1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/characterization/functions-mem-footprint/vid1.mp4 -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/vid_proc.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | import cv2 7 | from azure.storage.blob import BlobServiceClient, BlobClient 8 | 9 | fileAppend = open("../funcs.txt", "a") 10 | 11 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 12 | print("--- VID PROC ---", file=fileAppend) 13 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 14 | 15 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 16 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 17 | container_client = blob_service_client.get_container_client("artifacteval") 18 | 19 | tmp = "/tmp/" 20 | 21 | vid_name = 'vid1.mp4' 22 | 23 | blobName = "vid1.mp4" 24 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 25 | with open(vid_name, "wb") as my_blob: 26 | download_stream = blob_client.download_blob() 27 | my_blob.write(download_stream.readall()) 28 | 29 | result_file_path = tmp + vid_name 30 | 31 | memoryUse_old = memoryUse 32 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 33 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 34 | 35 | def video_processing(): 36 | 37 | video = cv2.VideoCapture(vid_name) 38 | 39 | width = int(video.get(3)) 40 | height = int(video.get(4)) 41 | fourcc = cv2.VideoWriter_fourcc(*'MPEG') 42 | out = cv2.VideoWriter('output.avi',fourcc, 20.0, (width, height)) 43 | 44 | while video.isOpened(): 45 | ret, frame = video.read() 46 | if ret: 47 | gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 48 | tmp_file_path = tmp+'tmp.jpg' 49 | cv2.imwrite(tmp_file_path, gray_frame) 50 | gray_frame = cv2.imread(tmp_file_path) 51 | out.write(gray_frame) 52 | break 53 | else: 54 | break 55 | 56 | fRead = open("output.avi","rb") 57 | value = fRead.read() 58 | blobName = "output.avi" 59 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 60 | blob_client.upload_blob(value, overwrite=True) 61 | 62 | video.release() 63 | out.release() 64 | return 65 | 66 | def serve(): 67 | video_processing() 68 | 69 | if __name__ == '__main__': 70 | serve() 71 | memoryUse_old = memoryUse 72 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 73 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/functions-mem-footprint/web_serve.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import os 3 | pid = os.getpid() 4 | python_process = psutil.Process(pid) 5 | memoryUse_old = 0 6 | from azure.storage.blob import BlobServiceClient, BlobClient 7 | 8 | fileAppend = open("../funcs.txt", "a") 9 | 10 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 11 | print("--- WEB SERVE ---", file=fileAppend) 12 | print('memory use 1:', memoryUse-memoryUse_old, file=fileAppend) 13 | 14 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 15 | blob_service_client = BlobServiceClient.from_connection_string(connection_string) 16 | container_client = blob_service_client.get_container_client("artifacteval") 17 | 18 | memoryUse_old = memoryUse 19 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 20 | print('memory use 2:', memoryUse-memoryUse_old, file=fileAppend) 21 | 22 | def main(params): 23 | blobName = "money.txt" 24 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 25 | with open(blobName, "wb") as my_blob: 26 | download_stream = blob_client.download_blob() 27 | my_blob.write(download_stream.readall()) 28 | moneyF = open(blobName, "r") 29 | money = float(moneyF.readline()) 30 | moneyF.close() 31 | money -= 100.0 32 | new_file = open("moneyTemp.txt", "w") 33 | new_file.write(str(money)) 34 | new_file.close() 35 | fRead = open("moneyTemp.txt","rb") 36 | value = fRead.read() 37 | blobName = "money.txt" 38 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=blobName) 39 | blob_client.upload_blob(value, overwrite=True) 40 | return {"Money":"withdrawn"} 41 | 42 | main({"test":"func"}) 43 | memoryUse_old = memoryUse 44 | memoryUse = python_process.memory_info()[0]/2.**20 # memory use in MB 45 | print('memory use 3:', memoryUse-memoryUse_old, file=fileAppend) -------------------------------------------------------------------------------- /characterization/install-libs.sh: -------------------------------------------------------------------------------- 1 | sudo apt-get install python3-pip -y 2 | sudo apt-get install ffmpeg libsm6 libxext6 -y 3 | 4 | pip3 install --upgrade pip 5 | 6 | pip3 install psutil mxnet Pillow azure-storage-blob pandas scikit-learn 7 | 8 | pip3 install opencv-python==4.3.0.38 9 | pip3 install imgaug 10 | 11 | pip3 install torch -------------------------------------------------------------------------------- /ecofaas_init/node_controller/node_controller.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package nodecontroller; 4 | 5 | // Message containing utilization data from a pool. 6 | message PoolUtilization { 7 | string pool_id = 1; 8 | int32 num_served_invocations = 2; 9 | double avg_waiting_time = 3; // in milliseconds 10 | int32 num_invocations_lower_freq_possible = 4; 11 | int32 num_invocations_increased_freq = 5; 12 | } 13 | 14 | // Assignment message for a pool. 15 | message Assignment { 16 | int32 num_cores = 1; 17 | int32 frequency_level = 2; // e.g., 1: low, 2: medium, 3: high 18 | } 19 | 20 | // Request message for updating pool utilization. 21 | message UpdatePoolUtilizationRequest { 22 | PoolUtilization utilization = 1; 23 | } 24 | 25 | // Response message for updating pool utilization. 26 | message UpdatePoolUtilizationResponse { 27 | Assignment assignment = 1; 28 | string message = 2; 29 | } 30 | 31 | // The NodeController service definition. 32 | service NodeController { 33 | // Pool Controllers call this to send their utilization data and receive assignments. 34 | rpc UpdatePoolUtilization (UpdatePoolUtilizationRequest) returns (UpdatePoolUtilizationResponse); 35 | } 36 | -------------------------------------------------------------------------------- /ecofaas_init/node_controller/node_controller_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: node_controller.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15node_controller.proto\x12\x0enodecontroller\"\xb1\x01\n\x0fPoolUtilization\x12\x0f\n\x07pool_id\x18\x01 \x01(\t\x12\x1e\n\x16num_served_invocations\x18\x02 \x01(\x05\x12\x18\n\x10\x61vg_waiting_time\x18\x03 \x01(\x01\x12+\n#num_invocations_lower_freq_possible\x18\x04 \x01(\x05\x12&\n\x1enum_invocations_increased_freq\x18\x05 \x01(\x05\"8\n\nAssignment\x12\x11\n\tnum_cores\x18\x01 \x01(\x05\x12\x17\n\x0f\x66requency_level\x18\x02 \x01(\x05\"T\n\x1cUpdatePoolUtilizationRequest\x12\x34\n\x0butilization\x18\x01 \x01(\x0b\x32\x1f.nodecontroller.PoolUtilization\"`\n\x1dUpdatePoolUtilizationResponse\x12.\n\nassignment\x18\x01 \x01(\x0b\x32\x1a.nodecontroller.Assignment\x12\x0f\n\x07message\x18\x02 \x01(\t2\x86\x01\n\x0eNodeController\x12t\n\x15UpdatePoolUtilization\x12,.nodecontroller.UpdatePoolUtilizationRequest\x1a-.nodecontroller.UpdatePoolUtilizationResponseb\x06proto3') 17 | 18 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) 19 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'node_controller_pb2', globals()) 20 | if _descriptor._USE_C_DESCRIPTORS == False: 21 | 22 | DESCRIPTOR._options = None 23 | _POOLUTILIZATION._serialized_start=42 24 | _POOLUTILIZATION._serialized_end=219 25 | _ASSIGNMENT._serialized_start=221 26 | _ASSIGNMENT._serialized_end=277 27 | _UPDATEPOOLUTILIZATIONREQUEST._serialized_start=279 28 | _UPDATEPOOLUTILIZATIONREQUEST._serialized_end=363 29 | _UPDATEPOOLUTILIZATIONRESPONSE._serialized_start=365 30 | _UPDATEPOOLUTILIZATIONRESPONSE._serialized_end=461 31 | _NODECONTROLLER._serialized_start=464 32 | _NODECONTROLLER._serialized_end=598 33 | # @@protoc_insertion_point(module_scope) 34 | -------------------------------------------------------------------------------- /ecofaas_init/node_controller/node_controller_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import node_controller_pb2 as node__controller__pb2 6 | 7 | 8 | class NodeControllerStub(object): 9 | """The NodeController service definition. 10 | """ 11 | 12 | def __init__(self, channel): 13 | """Constructor. 14 | 15 | Args: 16 | channel: A grpc.Channel. 17 | """ 18 | self.UpdatePoolUtilization = channel.unary_unary( 19 | '/nodecontroller.NodeController/UpdatePoolUtilization', 20 | request_serializer=node__controller__pb2.UpdatePoolUtilizationRequest.SerializeToString, 21 | response_deserializer=node__controller__pb2.UpdatePoolUtilizationResponse.FromString, 22 | ) 23 | 24 | 25 | class NodeControllerServicer(object): 26 | """The NodeController service definition. 27 | """ 28 | 29 | def UpdatePoolUtilization(self, request, context): 30 | """Pool Controllers call this to send their utilization data and receive assignments. 31 | """ 32 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 33 | context.set_details('Method not implemented!') 34 | raise NotImplementedError('Method not implemented!') 35 | 36 | 37 | def add_NodeControllerServicer_to_server(servicer, server): 38 | rpc_method_handlers = { 39 | 'UpdatePoolUtilization': grpc.unary_unary_rpc_method_handler( 40 | servicer.UpdatePoolUtilization, 41 | request_deserializer=node__controller__pb2.UpdatePoolUtilizationRequest.FromString, 42 | response_serializer=node__controller__pb2.UpdatePoolUtilizationResponse.SerializeToString, 43 | ), 44 | } 45 | generic_handler = grpc.method_handlers_generic_handler( 46 | 'nodecontroller.NodeController', rpc_method_handlers) 47 | server.add_generic_rpc_handlers((generic_handler,)) 48 | 49 | 50 | # This class is part of an EXPERIMENTAL API. 51 | class NodeController(object): 52 | """The NodeController service definition. 53 | """ 54 | 55 | @staticmethod 56 | def UpdatePoolUtilization(request, 57 | target, 58 | options=(), 59 | channel_credentials=None, 60 | call_credentials=None, 61 | insecure=False, 62 | compression=None, 63 | wait_for_ready=None, 64 | timeout=None, 65 | metadata=None): 66 | return grpc.experimental.unary_unary(request, target, '/nodecontroller.NodeController/UpdatePoolUtilization', 67 | node__controller__pb2.UpdatePoolUtilizationRequest.SerializeToString, 68 | node__controller__pb2.UpdatePoolUtilizationResponse.FromString, 69 | options, channel_credentials, 70 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 71 | -------------------------------------------------------------------------------- /ecofaas_init/node_controller/test_client.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | import node_controller_pb2 3 | import node_controller_pb2_grpc 4 | import time 5 | 6 | # Define REFRESH_INTERVAL to match the server configuration 7 | REFRESH_INTERVAL = 10 # Match this with the server's REFRESH_INTERVAL 8 | 9 | def run(): 10 | # Connect to the NodeController server 11 | channel = grpc.insecure_channel('localhost:50052') 12 | stub = node_controller_pb2_grpc.NodeControllerStub(channel) 13 | 14 | # Example utilization data for multiple pools 15 | pool_utilizations = [ 16 | node_controller_pb2.PoolUtilization( 17 | pool_id="pool_1", 18 | num_served_invocations=100, 19 | avg_waiting_time=50.0, # in milliseconds 20 | num_invocations_lower_freq_possible=20, 21 | num_invocations_increased_freq=10 22 | ), 23 | node_controller_pb2.PoolUtilization( 24 | pool_id="pool_2", 25 | num_served_invocations=150, 26 | avg_waiting_time=30.0, 27 | num_invocations_lower_freq_possible=15, 28 | num_invocations_increased_freq=5 29 | ), 30 | node_controller_pb2.PoolUtilization( 31 | pool_id="pool_3", 32 | num_served_invocations=80, 33 | avg_waiting_time=70.0, 34 | num_invocations_lower_freq_possible=10, 35 | num_invocations_increased_freq=20 36 | ), 37 | # Add more pool data as needed 38 | ] 39 | 40 | # Send utilization data to the Node Controller 41 | for util in pool_utilizations: 42 | request = node_controller_pb2.UpdatePoolUtilizationRequest( 43 | utilization=util 44 | ) 45 | response = stub.UpdatePoolUtilization(request) 46 | if response.assignment.num_cores > 0: 47 | print(f"Assignment for {util.pool_id}:") 48 | print(f" Number of Cores: {response.assignment.num_cores}") 49 | print(f" Frequency Level: {response.assignment.frequency_level}") 50 | print(f" Message: {response.message}\n") 51 | else: 52 | print(f"Failed to get assignment for {util.pool_id}: {response.message}\n") 53 | 54 | # Keep the client running to allow periodic updates (optional) 55 | # Here, we simulate periodic updates every REFRESH_INTERVAL seconds 56 | try: 57 | while True: 58 | time.sleep(REFRESH_INTERVAL) 59 | # Update utilization data as needed 60 | # For example, send updated metrics 61 | # This part can be customized based on actual use-case 62 | except KeyboardInterrupt: 63 | print("Client shutting down.") 64 | 65 | if __name__ == "__main__": 66 | run() 67 | -------------------------------------------------------------------------------- /ecofaas_init/pool_controller/pool_controller.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package pool_controller; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | // Message to represent a function request 8 | message FunctionRequest { 9 | string pid = 1; // Process ID 10 | int32 deadline = 2; // Deadline for the request 11 | } 12 | 13 | // Message to represent core and frequency update 14 | message CoreFrequencyUpdate { 15 | int32 start_core_id = 1; // Starting core ID 16 | int32 core_count = 2; // Number of cores to update 17 | int32 frequency = 3; // Frequency in MHz 18 | } 19 | 20 | // Message to represent a request update 21 | message RequestUpdate { 22 | string pid = 1; // Process ID 23 | RequestStatus status = 2; // New status 24 | } 25 | 26 | // Enum for request status 27 | enum RequestStatus { 28 | UNKNOWN = 0; 29 | RUNNING = 1; 30 | BLOCKED = 2; 31 | WAITING = 3; 32 | COMPLETED = 4; 33 | } 34 | 35 | // Message to represent utilization data 36 | message UtilizationData { 37 | int32 served_requests = 1; 38 | float avg_waiting_time = 2; 39 | int32 missed_requests = 3; 40 | int32 temp_freq_increases = 4; 41 | } 42 | 43 | // Service definition 44 | service PoolController { 45 | rpc AddRequest (FunctionRequest) returns (google.protobuf.Empty); 46 | rpc BlockRequest (RequestUpdate) returns (google.protobuf.Empty); 47 | rpc UnblockRequest (RequestUpdate) returns (google.protobuf.Empty); 48 | rpc CompleteRequest (RequestUpdate) returns (google.protobuf.Empty); 49 | rpc UpdateCoreFrequency (CoreFrequencyUpdate) returns (google.protobuf.Empty); 50 | rpc SendUtilizationData (UtilizationData) returns (google.protobuf.Empty); 51 | } 52 | -------------------------------------------------------------------------------- /ecofaas_init/pool_controller/pool_controller_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: pool_controller.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 15 | 16 | 17 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15pool_controller.proto\x12\x0fpool_controller\x1a\x1bgoogle/protobuf/empty.proto\"0\n\x0f\x46unctionRequest\x12\x0b\n\x03pid\x18\x01 \x01(\t\x12\x10\n\x08\x64\x65\x61\x64line\x18\x02 \x01(\x05\"S\n\x13\x43oreFrequencyUpdate\x12\x15\n\rstart_core_id\x18\x01 \x01(\x05\x12\x12\n\ncore_count\x18\x02 \x01(\x05\x12\x11\n\tfrequency\x18\x03 \x01(\x05\"L\n\rRequestUpdate\x12\x0b\n\x03pid\x18\x01 \x01(\t\x12.\n\x06status\x18\x02 \x01(\x0e\x32\x1e.pool_controller.RequestStatus\"z\n\x0fUtilizationData\x12\x17\n\x0fserved_requests\x18\x01 \x01(\x05\x12\x18\n\x10\x61vg_waiting_time\x18\x02 \x01(\x02\x12\x17\n\x0fmissed_requests\x18\x03 \x01(\x05\x12\x1b\n\x13temp_freq_increases\x18\x04 \x01(\x05*R\n\rRequestStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\x0b\n\x07\x42LOCKED\x10\x02\x12\x0b\n\x07WAITING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\x32\xdb\x03\n\x0ePoolController\x12\x46\n\nAddRequest\x12 .pool_controller.FunctionRequest\x1a\x16.google.protobuf.Empty\x12\x46\n\x0c\x42lockRequest\x12\x1e.pool_controller.RequestUpdate\x1a\x16.google.protobuf.Empty\x12H\n\x0eUnblockRequest\x12\x1e.pool_controller.RequestUpdate\x1a\x16.google.protobuf.Empty\x12I\n\x0f\x43ompleteRequest\x12\x1e.pool_controller.RequestUpdate\x1a\x16.google.protobuf.Empty\x12S\n\x13UpdateCoreFrequency\x12$.pool_controller.CoreFrequencyUpdate\x1a\x16.google.protobuf.Empty\x12O\n\x13SendUtilizationData\x12 .pool_controller.UtilizationData\x1a\x16.google.protobuf.Emptyb\x06proto3') 18 | 19 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) 20 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'pool_controller_pb2', globals()) 21 | if _descriptor._USE_C_DESCRIPTORS == False: 22 | 23 | DESCRIPTOR._options = None 24 | _REQUESTSTATUS._serialized_start=408 25 | _REQUESTSTATUS._serialized_end=490 26 | _FUNCTIONREQUEST._serialized_start=71 27 | _FUNCTIONREQUEST._serialized_end=119 28 | _COREFREQUENCYUPDATE._serialized_start=121 29 | _COREFREQUENCYUPDATE._serialized_end=204 30 | _REQUESTUPDATE._serialized_start=206 31 | _REQUESTUPDATE._serialized_end=282 32 | _UTILIZATIONDATA._serialized_start=284 33 | _UTILIZATIONDATA._serialized_end=406 34 | _POOLCONTROLLER._serialized_start=493 35 | _POOLCONTROLLER._serialized_end=968 36 | # @@protoc_insertion_point(module_scope) 37 | -------------------------------------------------------------------------------- /ecofaas_init/pool_controller/test_client.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | import sys 3 | import time 4 | import pool_controller_pb2 5 | import pool_controller_pb2_grpc 6 | from google.protobuf import empty_pb2 7 | 8 | def run_client(port): 9 | # Create a gRPC channel and stub 10 | channel = grpc.insecure_channel(f'localhost:{port}') 11 | stub = pool_controller_pb2_grpc.PoolControllerStub(channel) 12 | 13 | # Add a new request 14 | print("Adding request with PID '1' and deadline 1000") 15 | add_request_response = stub.AddRequest(pool_controller_pb2.FunctionRequest(pid='1', deadline=1000)) 16 | print("AddRequest response:", add_request_response) 17 | 18 | # Block the request 19 | print("Blocking request with PID '1'") 20 | block_request_response = stub.BlockRequest(pool_controller_pb2.RequestUpdate(pid='1', status=pool_controller_pb2.RequestStatus.RUNNING)) 21 | print("BlockRequest response:", block_request_response) 22 | 23 | # Unblock the request 24 | print("Unblocking request with PID '1'") 25 | unblock_request_response = stub.UnblockRequest(pool_controller_pb2.RequestUpdate(pid='1', status=pool_controller_pb2.RequestStatus.BLOCKED)) 26 | print("UnblockRequest response:", unblock_request_response) 27 | 28 | # Complete the request 29 | print("Completing request with PID '1'") 30 | complete_request_response = stub.CompleteRequest(pool_controller_pb2.RequestUpdate(pid='1', status=pool_controller_pb2.RequestStatus.RUNNING)) 31 | print("CompleteRequest response:", complete_request_response) 32 | 33 | # Update core frequency 34 | print("Updating core frequency to 3000 MHz with 10 cores") 35 | update_core_freq_response = stub.UpdateCoreFrequency(pool_controller_pb2.CoreFrequencyUpdate(num_cores=10, frequency=3000)) 36 | print("UpdateCoreFrequency response:", update_core_freq_response) 37 | 38 | if __name__ == "__main__": 39 | if len(sys.argv) != 2: 40 | print("Usage: python test_client.py ") 41 | sys.exit(1) 42 | 43 | port = int(sys.argv[1]) 44 | run_client(port) 45 | -------------------------------------------------------------------------------- /ecofaas_init/pool_manager/pool_manager.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package pool_manager; 4 | import "google/protobuf/empty.proto"; 5 | 6 | // Define the PoolUtilization message 7 | message PoolUtilization { 8 | string pool_id = 1; 9 | int32 num_served_invocations = 2; 10 | double avg_waiting_time = 3; 11 | int32 num_invocations_lower_freq_possible = 4; 12 | int32 num_invocations_increased_freq = 5; 13 | } 14 | 15 | // Define the NodeAssignment message 16 | message NodeAssignment { 17 | string pool_id = 1; 18 | int32 num_cores = 2; 19 | int32 start_id = 3; 20 | int32 frequency_level = 4; // Adjust according to the frequency levels used 21 | string message = 5; 22 | } 23 | 24 | // Request message to update the Node Controller 25 | message UpdateUtilizationsRequest { 26 | repeated PoolUtilization utilizations = 1; 27 | } 28 | 29 | // Response message for the Node Controller update 30 | message UpdateUtilizationsResponse { 31 | repeated NodeAssignment assignments = 1; 32 | string message = 2; 33 | } 34 | 35 | // Define the Pool Manager service 36 | service PoolManager { 37 | rpc GetUtilizations (google.protobuf.Empty) returns (UpdateUtilizationsResponse); 38 | } 39 | -------------------------------------------------------------------------------- /ecofaas_init/pool_manager/pool_manager_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: pool_manager.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 15 | 16 | 17 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12pool_manager.proto\x12\x0cpool_manager\x1a\x1bgoogle/protobuf/empty.proto\"\xb1\x01\n\x0fPoolUtilization\x12\x0f\n\x07pool_id\x18\x01 \x01(\t\x12\x1e\n\x16num_served_invocations\x18\x02 \x01(\x05\x12\x18\n\x10\x61vg_waiting_time\x18\x03 \x01(\x01\x12+\n#num_invocations_lower_freq_possible\x18\x04 \x01(\x05\x12&\n\x1enum_invocations_increased_freq\x18\x05 \x01(\x05\"p\n\x0eNodeAssignment\x12\x0f\n\x07pool_id\x18\x01 \x01(\t\x12\x11\n\tnum_cores\x18\x02 \x01(\x05\x12\x10\n\x08start_id\x18\x03 \x01(\x05\x12\x17\n\x0f\x66requency_level\x18\x04 \x01(\x05\x12\x0f\n\x07message\x18\x05 \x01(\t\"P\n\x19UpdateUtilizationsRequest\x12\x33\n\x0cutilizations\x18\x01 \x03(\x0b\x32\x1d.pool_manager.PoolUtilization\"`\n\x1aUpdateUtilizationsResponse\x12\x31\n\x0b\x61ssignments\x18\x01 \x03(\x0b\x32\x1c.pool_manager.NodeAssignment\x12\x0f\n\x07message\x18\x02 \x01(\t2b\n\x0bPoolManager\x12S\n\x0fGetUtilizations\x12\x16.google.protobuf.Empty\x1a(.pool_manager.UpdateUtilizationsResponseb\x06proto3') 18 | 19 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) 20 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'pool_manager_pb2', globals()) 21 | if _descriptor._USE_C_DESCRIPTORS == False: 22 | 23 | DESCRIPTOR._options = None 24 | _POOLUTILIZATION._serialized_start=66 25 | _POOLUTILIZATION._serialized_end=243 26 | _NODEASSIGNMENT._serialized_start=245 27 | _NODEASSIGNMENT._serialized_end=357 28 | _UPDATEUTILIZATIONSREQUEST._serialized_start=359 29 | _UPDATEUTILIZATIONSREQUEST._serialized_end=439 30 | _UPDATEUTILIZATIONSRESPONSE._serialized_start=441 31 | _UPDATEUTILIZATIONSRESPONSE._serialized_end=537 32 | _POOLMANAGER._serialized_start=539 33 | _POOLMANAGER._serialized_end=637 34 | # @@protoc_insertion_point(module_scope) 35 | -------------------------------------------------------------------------------- /ecofaas_init/pool_manager/pool_manager_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 6 | import pool_manager_pb2 as pool__manager__pb2 7 | 8 | 9 | class PoolManagerStub(object): 10 | """Define the Pool Manager service 11 | """ 12 | 13 | def __init__(self, channel): 14 | """Constructor. 15 | 16 | Args: 17 | channel: A grpc.Channel. 18 | """ 19 | self.GetUtilizations = channel.unary_unary( 20 | '/pool_manager.PoolManager/GetUtilizations', 21 | request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, 22 | response_deserializer=pool__manager__pb2.UpdateUtilizationsResponse.FromString, 23 | ) 24 | 25 | 26 | class PoolManagerServicer(object): 27 | """Define the Pool Manager service 28 | """ 29 | 30 | def GetUtilizations(self, request, context): 31 | """Missing associated documentation comment in .proto file.""" 32 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 33 | context.set_details('Method not implemented!') 34 | raise NotImplementedError('Method not implemented!') 35 | 36 | 37 | def add_PoolManagerServicer_to_server(servicer, server): 38 | rpc_method_handlers = { 39 | 'GetUtilizations': grpc.unary_unary_rpc_method_handler( 40 | servicer.GetUtilizations, 41 | request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, 42 | response_serializer=pool__manager__pb2.UpdateUtilizationsResponse.SerializeToString, 43 | ), 44 | } 45 | generic_handler = grpc.method_handlers_generic_handler( 46 | 'pool_manager.PoolManager', rpc_method_handlers) 47 | server.add_generic_rpc_handlers((generic_handler,)) 48 | 49 | 50 | # This class is part of an EXPERIMENTAL API. 51 | class PoolManager(object): 52 | """Define the Pool Manager service 53 | """ 54 | 55 | @staticmethod 56 | def GetUtilizations(request, 57 | target, 58 | options=(), 59 | channel_credentials=None, 60 | call_credentials=None, 61 | insecure=False, 62 | compression=None, 63 | wait_for_ready=None, 64 | timeout=None, 65 | metadata=None): 66 | return grpc.experimental.unary_unary(request, target, '/pool_manager.PoolManager/GetUtilizations', 67 | google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, 68 | pool__manager__pb2.UpdateUtilizationsResponse.FromString, 69 | options, channel_credentials, 70 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 71 | -------------------------------------------------------------------------------- /ecofaas_init/pool_manager/server.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | from concurrent import futures 3 | import time 4 | 5 | import pool_manager_pb2 6 | import pool_manager_pb2_grpc 7 | import node_controller_pb2 8 | import node_controller_pb2_grpc 9 | from google.protobuf import empty_pb2 10 | 11 | # Define REFRESH_INTERVAL and TOTAL_POOLS for simulation 12 | REFRESH_INTERVAL = 60 # Match this with your actual refresh interval 13 | TOTAL_POOLS = 3 # Number of pools for this simulation 14 | 15 | class PoolManagerServicer(pool_manager_pb2_grpc.PoolManagerServicer): 16 | def __init__(self, node_controller_channel): 17 | self.node_controller_stub = node_controller_pb2_grpc.NodeControllerStub(node_controller_channel) 18 | self.pool_controllers = [ 19 | 'localhost:50054', # Pool Controller 1 20 | 'localhost:50055', # Pool Controller 2 21 | 'localhost:50056', # Pool Controller 3 22 | ] 23 | 24 | def fetch_utilizations_from_pools(self): 25 | utilizations = [] 26 | for pool_controller_address in self.pool_controllers: 27 | with grpc.insecure_channel(pool_controller_address) as channel: 28 | stub = pool_manager_pb2_grpc.PoolControllerStub(channel) 29 | response = stub.GetUtilizationStatus(empty_pb2.Empty()) 30 | utilizations.extend(response.utilizations) 31 | return utilizations 32 | 33 | def update_pools_with_assignments(self, assignments): 34 | for i, pool_controller_address in enumerate(self.pool_controllers): 35 | with grpc.insecure_channel(pool_controller_address) as channel: 36 | stub = pool_manager_pb2_grpc.PoolControllerStub(channel) 37 | assignment = assignments[i] 38 | request = pool_manager_pb2.UpdateAssignmentRequest( 39 | pool_id=assignment.pool_id, 40 | start_core_id=assignment.start_core_id, 41 | core_count=assignment.core_count, 42 | frequency_level=assignment.frequency_level 43 | ) 44 | stub.UpdateAssignment(request) 45 | 46 | def GetUtilizations(self, request, context): 47 | # Fetch utilizations from all Pool Controllers 48 | utilizations = self.fetch_utilizations_from_pools() 49 | 50 | # Send utilizations to Node Controller 51 | update_request = node_controller_pb2.UpdatePoolUtilizationsRequest( 52 | utilizations=utilizations 53 | ) 54 | response = self.node_controller_stub.UpdatePoolUtilizations(update_request) 55 | 56 | # Send new assignments to Pool Controllers 57 | self.update_pools_with_assignments(response.assignments) 58 | 59 | return pool_manager_pb2.UpdateUtilizationsResponse( 60 | assignments=response.assignments, 61 | message="Utilizations processed and assignments updated." 62 | ) 63 | 64 | def serve(): 65 | # Connect to the NodeController 66 | node_controller_channel = grpc.insecure_channel('localhost:50052') 67 | 68 | # Create a gRPC server 69 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) 70 | pool_manager_pb2_grpc.add_PoolManagerServicer_to_server( 71 | PoolManagerServicer(node_controller_channel), server 72 | ) 73 | 74 | # Listen on port 50053 75 | server.add_insecure_port('[::]:50053') 76 | server.start() 77 | print("PoolManager gRPC server is running on port 50053.") 78 | try: 79 | while True: 80 | time.sleep(REFRESH_INTERVAL) 81 | # Periodic update logic can be placed here if needed 82 | except KeyboardInterrupt: 83 | print("Shutting down the server.") 84 | server.stop(0) 85 | 86 | if __name__ == "__main__": 87 | serve() 88 | -------------------------------------------------------------------------------- /ecofaas_init/pool_manager/test_client.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | import time 3 | import pool_manager_pb2 4 | import pool_manager_pb2_grpc 5 | import node_controller_pb2 6 | import node_controller_pb2_grpc 7 | from google.protobuf import empty_pb2 # Correct import 8 | 9 | # Define REFRESH_INTERVAL to match the server configuration 10 | REFRESH_INTERVAL = 60 # Match this with the server's REFRESH_INTERVAL 11 | 12 | def run(): 13 | # Connect to the NodeController server 14 | channel = grpc.insecure_channel('localhost:50052') 15 | stub = node_controller_pb2_grpc.NodeControllerStub(channel) 16 | 17 | # Example utilization data for multiple pools 18 | pool_utilizations = [ 19 | node_controller_pb2.PoolUtilization( 20 | pool_id="pool_1", 21 | num_served_invocations=100, 22 | avg_waiting_time=50.0, # in milliseconds 23 | num_invocations_lower_freq_possible=20, 24 | num_invocations_increased_freq=10 25 | ), 26 | node_controller_pb2.PoolUtilization( 27 | pool_id="pool_2", 28 | num_served_invocations=150, 29 | avg_waiting_time=30.0, 30 | num_invocations_lower_freq_possible=15, 31 | num_invocations_increased_freq=5 32 | ), 33 | node_controller_pb2.PoolUtilization( 34 | pool_id="pool_3", 35 | num_served_invocations=80, 36 | avg_waiting_time=70.0, 37 | num_invocations_lower_freq_possible=10, 38 | num_invocations_increased_freq=20 39 | ), 40 | # Add more pool data as needed 41 | ] 42 | 43 | # Send utilization data to the Node Controller 44 | for util in pool_utilizations: 45 | request = node_controller_pb2.UpdatePoolUtilizationRequest( 46 | utilization=util 47 | ) 48 | response = stub.UpdatePoolUtilization(request) 49 | if response.assignment.num_cores > 0: 50 | print(f"Assignment for {util.pool_id}:") 51 | print(f" Number of Cores: {response.assignment.num_cores}") 52 | print(f" Frequency Level: {response.assignment.frequency_level}") 53 | print(f" Message: {response.message}\n") 54 | else: 55 | print(f"Failed to get assignment for {util.pool_id}: {response.message}\n") 56 | 57 | # Keep the client running to allow periodic updates (optional) 58 | # Here, we simulate periodic updates every REFRESH_INTERVAL seconds 59 | try: 60 | while True: 61 | time.sleep(REFRESH_INTERVAL) 62 | # Update utilization data as needed 63 | # For example, send updated metrics 64 | # This part can be customized based on actual use-case 65 | except KeyboardInterrupt: 66 | print("Client shutting down.") 67 | 68 | if __name__ == "__main__": 69 | run() 70 | -------------------------------------------------------------------------------- /ecofaas_init/workflow_controller/profiling.csv: -------------------------------------------------------------------------------- 1 | function_name,frequency,exec_time,energy 2 | f1,1.0,10,50 3 | f1,1.5,7,70 4 | f1,2.0,5,90 5 | f2,1.0,5,100 6 | f2,1.5,3,140 7 | f2,2.0,2,180 -------------------------------------------------------------------------------- /ecofaas_init/workflow_controller/test_client.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | import workflow_controller_pb2 3 | import workflow_controller_pb2_grpc 4 | 5 | def run(): 6 | # Connect to the server 7 | channel = grpc.insecure_channel('localhost:50051') 8 | stub = workflow_controller_pb2_grpc.WorkflowControllerStub(channel) 9 | 10 | # Prepare the request 11 | request = workflow_controller_pb2.OptimizeRequest( 12 | functions=['f1', 'f1'], # Replace with desired function names 13 | slo=19.0 # Replace with desired SLO in milliseconds 14 | ) 15 | 16 | # Send the request 17 | response = stub.OptimizeDeadlines(request) 18 | 19 | # Handle the response 20 | if response.message == "Optimization successful.": 21 | print("Optimal Per-Function Deadlines and Settings:") 22 | for deadline in response.deadlines: 23 | print(f"Function {deadline.function_name}:") 24 | print(f" Frequency: {deadline.frequency} GHz") 25 | print(f" Execution Time: {deadline.exec_time} ms") 26 | print(f" Energy Consumption: {deadline.energy} J") 27 | print(f" Deadline Fraction: {deadline.deadline_fraction:.2f}") 28 | print(f"Total Execution Time: {response.total_execution_time} ms") 29 | print(f"Total Energy Consumption: {response.total_energy} J") 30 | else: 31 | print(response.message) 32 | 33 | if __name__ == "__main__": 34 | run() 35 | -------------------------------------------------------------------------------- /ecofaas_init/workflow_controller/workflow_controller.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package workflowcontroller; 4 | 5 | // The request message containing the list of functions and the SLO. 6 | message OptimizeRequest { 7 | repeated string functions = 1; // List of function names 8 | double slo = 2; // Service Level Objective in milliseconds 9 | } 10 | 11 | // Information about each function's optimized settings. 12 | message FunctionDeadline { 13 | string function_name = 1; 14 | double frequency = 2; // GHz 15 | double exec_time = 3; // milliseconds 16 | double energy = 4; // Joules 17 | double deadline_fraction = 5; // Fraction of SLO 18 | } 19 | 20 | // The response message containing optimized per-function deadlines. 21 | message OptimizeResponse { 22 | repeated FunctionDeadline deadlines = 1; 23 | double total_execution_time = 2; // milliseconds 24 | double total_energy = 3; // Joules 25 | string message = 4; // Success or error message 26 | } 27 | 28 | // The WorkflowController service definition. 29 | service WorkflowController { 30 | // Optimizes per-function deadlines based on the provided functions and SLO. 31 | rpc OptimizeDeadlines (OptimizeRequest) returns (OptimizeResponse); 32 | } 33 | -------------------------------------------------------------------------------- /ecofaas_init/workflow_controller/workflow_controller_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: workflow_controller.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19workflow_controller.proto\x12\x12workflowcontroller\"1\n\x0fOptimizeRequest\x12\x11\n\tfunctions\x18\x01 \x03(\t\x12\x0b\n\x03slo\x18\x02 \x01(\x01\"z\n\x10\x46unctionDeadline\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x11\n\tfrequency\x18\x02 \x01(\x01\x12\x11\n\texec_time\x18\x03 \x01(\x01\x12\x0e\n\x06\x65nergy\x18\x04 \x01(\x01\x12\x19\n\x11\x64\x65\x61\x64line_fraction\x18\x05 \x01(\x01\"\x90\x01\n\x10OptimizeResponse\x12\x37\n\tdeadlines\x18\x01 \x03(\x0b\x32$.workflowcontroller.FunctionDeadline\x12\x1c\n\x14total_execution_time\x18\x02 \x01(\x01\x12\x14\n\x0ctotal_energy\x18\x03 \x01(\x01\x12\x0f\n\x07message\x18\x04 \x01(\t2t\n\x12WorkflowController\x12^\n\x11OptimizeDeadlines\x12#.workflowcontroller.OptimizeRequest\x1a$.workflowcontroller.OptimizeResponseb\x06proto3') 17 | 18 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) 19 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'workflow_controller_pb2', globals()) 20 | if _descriptor._USE_C_DESCRIPTORS == False: 21 | 22 | DESCRIPTOR._options = None 23 | _OPTIMIZEREQUEST._serialized_start=49 24 | _OPTIMIZEREQUEST._serialized_end=98 25 | _FUNCTIONDEADLINE._serialized_start=100 26 | _FUNCTIONDEADLINE._serialized_end=222 27 | _OPTIMIZERESPONSE._serialized_start=225 28 | _OPTIMIZERESPONSE._serialized_end=369 29 | _WORKFLOWCONTROLLER._serialized_start=371 30 | _WORKFLOWCONTROLLER._serialized_end=487 31 | # @@protoc_insertion_point(module_scope) 32 | -------------------------------------------------------------------------------- /ecofaas_init/workflow_controller/workflow_controller_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import workflow_controller_pb2 as workflow__controller__pb2 6 | 7 | 8 | class WorkflowControllerStub(object): 9 | """The WorkflowController service definition. 10 | """ 11 | 12 | def __init__(self, channel): 13 | """Constructor. 14 | 15 | Args: 16 | channel: A grpc.Channel. 17 | """ 18 | self.OptimizeDeadlines = channel.unary_unary( 19 | '/workflowcontroller.WorkflowController/OptimizeDeadlines', 20 | request_serializer=workflow__controller__pb2.OptimizeRequest.SerializeToString, 21 | response_deserializer=workflow__controller__pb2.OptimizeResponse.FromString, 22 | ) 23 | 24 | 25 | class WorkflowControllerServicer(object): 26 | """The WorkflowController service definition. 27 | """ 28 | 29 | def OptimizeDeadlines(self, request, context): 30 | """Optimizes per-function deadlines based on the provided functions and SLO. 31 | """ 32 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 33 | context.set_details('Method not implemented!') 34 | raise NotImplementedError('Method not implemented!') 35 | 36 | 37 | def add_WorkflowControllerServicer_to_server(servicer, server): 38 | rpc_method_handlers = { 39 | 'OptimizeDeadlines': grpc.unary_unary_rpc_method_handler( 40 | servicer.OptimizeDeadlines, 41 | request_deserializer=workflow__controller__pb2.OptimizeRequest.FromString, 42 | response_serializer=workflow__controller__pb2.OptimizeResponse.SerializeToString, 43 | ), 44 | } 45 | generic_handler = grpc.method_handlers_generic_handler( 46 | 'workflowcontroller.WorkflowController', rpc_method_handlers) 47 | server.add_generic_rpc_handlers((generic_handler,)) 48 | 49 | 50 | # This class is part of an EXPERIMENTAL API. 51 | class WorkflowController(object): 52 | """The WorkflowController service definition. 53 | """ 54 | 55 | @staticmethod 56 | def OptimizeDeadlines(request, 57 | target, 58 | options=(), 59 | channel_credentials=None, 60 | call_credentials=None, 61 | insecure=False, 62 | compression=None, 63 | wait_for_ready=None, 64 | timeout=None, 65 | metadata=None): 66 | return grpc.experimental.unary_unary(request, target, '/workflowcontroller.WorkflowController/OptimizeDeadlines', 67 | workflow__controller__pb2.OptimizeRequest.SerializeToString, 68 | workflow__controller__pb2.OptimizeResponse.FromString, 69 | options, channel_credentials, 70 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 71 | -------------------------------------------------------------------------------- /experiments/__pycache__/rnn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/experiments/__pycache__/rnn.cpython-38.pyc -------------------------------------------------------------------------------- /experiments/baseline_P99.txt: -------------------------------------------------------------------------------- 1 | =====================cnn_servingLOW_LOAD===================== 2 | 3.1723406536076535 3 | =====================img_resLOW_LOAD===================== 4 | 2.4728994703680804 5 | =====================img_rotLOW_LOAD===================== 6 | 2.8787631064204846 7 | =====================ml_trainLOW_LOAD===================== 8 | 30.77259219003389 9 | =====================vid_procLOW_LOAD===================== 10 | 10.730756392064992 11 | =====================web_serveLOW_LOAD===================== 12 | 0.8506797732586119 13 | =====================cnn_servingMED_LOAD===================== 14 | 12.476873103737637 15 | =====================img_resMED_LOAD===================== 16 | 31.2018263547958 17 | =====================img_rotMED_LOAD===================== 18 | 18.985816288599565 19 | =====================ml_trainMED_LOAD===================== 20 | 113.93917410257883 21 | =====================vid_procMED_LOAD===================== 22 | 46.99029709507471 23 | =====================web_serveMED_LOAD===================== 24 | 21.194402299836415 25 | =====================cnn_servingHIGH_LOAD===================== 26 | 52.672445944852065 27 | =====================img_resHIGH_LOAD===================== 28 | 91.47799939852725 29 | =====================img_rotHIGH_LOAD===================== 30 | 71.95376869021 31 | =====================ml_trainHIGH_LOAD===================== 32 | 435.11039849606357 33 | =====================vid_procHIGH_LOAD===================== 34 | 67.07152842921451 35 | =====================web_serveHIGH_LOAD===================== 36 | 57.20575127561942 37 | -------------------------------------------------------------------------------- /experiments/cpu-util-out.txt: -------------------------------------------------------------------------------- 1 | Start with MXContainer 2 | CPU Utilization: 95.11 3 | CPU Utilization: 101.87 4 | CPU Utilization: 97.42 5 | CPU Utilization: 93.90 6 | CPU Utilization: 96.77 7 | CPU Utilization: 98.85 8 | CPU Utilization: 95.73 9 | CPU Utilization: 100.99 10 | CPU Utilization: 98.22 11 | CPU Utilization: 99.38 12 | CPU Utilization: 96.48 13 | CPU Utilization: 97.68 14 | CPU Utilization: 102.17 15 | CPU Utilization: 101.75 16 | CPU Utilization: 99.97 17 | CPU Utilization: 97.97 18 | CPU Utilization: 96.21 19 | CPU Utilization: 98.99 20 | CPU Utilization: 98.12 21 | CPU Utilization: 99.26 22 | CPU Utilization: 100.40 23 | CPU Utilization: 95.62 24 | CPU Utilization: 97.96 25 | Start with Baseline 26 | CPU Utilization: 16.96 27 | CPU Utilization: 22.66 28 | CPU Utilization: 22.16 29 | CPU Utilization: 23.46 30 | CPU Utilization: 20.69 31 | CPU Utilization: 23.17 32 | CPU Utilization: 25.53 33 | CPU Utilization: 23.38 34 | CPU Utilization: 23.20 35 | CPU Utilization: 21.85 36 | CPU Utilization: 23.86 37 | CPU Utilization: 21.02 38 | CPU Utilization: 25.62 39 | CPU Utilization: 28.79 40 | CPU Utilization: 22.94 41 | CPU Utilization: 22.41 42 | CPU Utilization: 23.01 43 | CPU Utilization: 23.89 44 | CPU Utilization: 23.84 45 | CPU Utilization: 22.25 46 | CPU Utilization: 23.07 47 | CPU Utilization: 26.75 48 | CPU Utilization: 27.96 49 | -------------------------------------------------------------------------------- /experiments/cpu_utils.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import docker 3 | import numpy as np 4 | import time 5 | import requests 6 | import threading 7 | import os 8 | from statistics import mean, median 9 | 10 | services = ["create_ord_cpu_test", "create_ord_cpu_test_base"] 11 | ip_addresses = [] 12 | 13 | def measure_cpu_util(): 14 | while True: 15 | time.sleep(1) 16 | output = os.popen("docker stats " + service+ " --no-stream --format '{{.CPUPerc}}'").read() 17 | flout = float(output[:-2]) 18 | perc_cpu = max(0, flout-100) 19 | print(f"CPU Utilization: {perc_cpu:.2f}") 20 | 21 | containers = ["MXContainer", "Baseline"] 22 | for service in services: 23 | output = subprocess.check_output("docker run -d --name " + service + " --cpuset-cpus=0,1 jovanvr97/" + service, shell=True).decode("utf-8") 24 | time.sleep(5) 25 | print("Start with " + containers[services.index(service)]) 26 | threading.Thread(target=measure_cpu_util).start() 27 | client = docker.DockerClient() 28 | container = client.containers.get(service) 29 | ip_add = container.attrs['NetworkSettings']['IPAddress'] 30 | ip_addresses.append(ip_add) 31 | 32 | time.sleep(50) 33 | 34 | try: 35 | output = subprocess.check_output("docker stop " + service, shell=True).decode("utf-8") 36 | output = subprocess.check_output("docker rm " + service, shell=True).decode("utf-8") 37 | except: 38 | pass 39 | 40 | print("End experiment.") -------------------------------------------------------------------------------- /experiments/expected-output-all.txt: -------------------------------------------------------------------------------- 1 | =====================cnn_servingLOW_LOAD===================== 2 | 0.5820772647857666 3 | 0.593254566192627 4 | 0.6188956737518311 5 | 0.628932785987854 6 | 0.6369624757766723 7 | =====================img_resLOW_LOAD===================== 8 | 0.34524944850376676 9 | 0.3554530143737793 10 | 0.4114576816558838 11 | 0.4326005697250366 12 | 0.44951488018035884 13 | =====================img_rotLOW_LOAD===================== 14 | 0.3858306407928467 15 | 0.3964378833770752 16 | 0.4317786693572998 17 | 0.44036245346069336 18 | 0.4472294807434082 19 | =====================ml_trainLOW_LOAD===================== 20 | 5.679911920002529 21 | 5.524739742279053 22 | 6.071630954742432 23 | 6.098921895027161 24 | 6.120754647254944 25 | =====================vid_procLOW_LOAD===================== 26 | 1.1846439497811454 27 | 1.4069328308105469 28 | 1.9192342281341552 29 | 1.941785669326782 30 | 1.9598268222808837 31 | =====================web_serveLOW_LOAD===================== 32 | 0.24830358369009836 33 | 0.2500324249267578 34 | 0.2698817729949951 35 | 0.27645518779754635 36 | 0.2817139196395874 37 | =====================cnn_servingMED_LOAD===================== 38 | 0.6978050095694406 39 | 0.6310880184173584 40 | 0.7588737964630127 41 | 1.5922033071517943 42 | 1.6620938444137572 43 | =====================img_resMED_LOAD===================== 44 | 0.6807353905269078 45 | 0.3554391860961914 46 | 1.3667163848876953 47 | 1.415076637268066 48 | 2.6504557418823183 49 | =====================img_rotMED_LOAD===================== 50 | 0.6038853985922678 51 | 0.3866896629333496 52 | 1.3504083156585693 53 | 1.3890479564666747 54 | 1.407211866378784 55 | =====================ml_trainMED_LOAD===================== 56 | 9.933400283540998 57 | 9.451580286026001 58 | 10.152626848220825 59 | 15.533267712593078 60 | 15.645029773712158 61 | =====================vid_procMED_LOAD===================== 62 | 1.6020769187382289 63 | 1.6468288898468018 64 | 2.219186878204346 65 | 2.259106683731079 66 | 3.4974675130844055 67 | =====================web_serveMED_LOAD===================== 68 | 0.7770122732434954 69 | 0.4286684989929199 70 | 1.724177360534668 71 | 1.8124023914337155 72 | 1.9190665388107297 73 | =====================cnn_servingHIGH_LOAD===================== 74 | 1.6614203740315265 75 | 1.6544811725616455 76 | 3.5129137992858888 77 | 3.632801628112793 78 | 3.723150029182434 79 | =====================img_resHIGH_LOAD===================== 80 | 1.4558836379683162 81 | 1.3941383361816406 82 | 3.378686475753784 83 | 3.618258523941039 84 | 4.406473951339694 85 | =====================img_rotHIGH_LOAD===================== 86 | 1.4601099433669125 87 | 1.4769670963287354 88 | 2.6107838153839107 89 | 3.4117236375808715 90 | 3.7832335996627746 91 | =====================ml_trainHIGH_LOAD===================== 92 | 23.10753878627915 93 | 22.885252952575684 94 | 24.17806749343872 95 | 28.44967947006225 96 | 28.80360601902008 97 | =====================vid_procHIGH_LOAD===================== 98 | 1.6743340348622886 99 | 1.6606206893920898 100 | 2.4690977573394775 101 | 3.5893872499465926 102 | 3.856101245880126 103 | =====================web_serveHIGH_LOAD===================== 104 | 1.1325586617711079 105 | 1.2748839855194092 106 | 1.658215093612671 107 | 3.1176454544067305 108 | 3.3926021909713735 109 | -------------------------------------------------------------------------------- /experiments/hist-out-ref.txt: -------------------------------------------------------------------------------- 1 | ********************BASELINE******************** 2 | Mean = 3.766349454797408 3 | Median = 4.781609535217285 4 | P95 = 7.891412782669067 5 | P99 = 8.57341894388199 6 | ********************MXFaaS******************** 7 | Mean = 0.12208010742272794 8 | Median = 0.07506310939788818 9 | P95 = 0.23057100772857655 10 | P99 = 1.7435910987854006 -------------------------------------------------------------------------------- /experiments/mem-out-ref.txt: -------------------------------------------------------------------------------- 1 | Baseline --> RAM memory used [MB]: 5845 2 | MXFaaS [MB]--> RAM memory used: 85 -------------------------------------------------------------------------------- /experiments/mem.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import docker 4 | import threading 5 | import requests 6 | 7 | # Getting all memory using os.popen() 8 | total_memory, used_memory, free_memory = map( 9 | int, os.popen('free -t -m').readlines()[-1].split()[1:]) 10 | usedMemStart = used_memory 11 | 12 | def lambda_func(service): 13 | while True: 14 | try: 15 | requests.post(service, json={"name": "test"}) 16 | break 17 | except: 18 | pass 19 | 20 | # BASELINE 21 | services = ["cnn_serving"] 22 | for service in services: 23 | threads = [] 24 | for indF in range(20): 25 | nameS = service + str(indF) 26 | output = subprocess.check_output("docker run -d --name " + nameS + " --cpu-shares=0 jovanvr97/" + service + "_knative", shell=True).decode("utf-8") 27 | client = docker.DockerClient() 28 | container = client.containers.get(nameS) 29 | ip_add = container.attrs['NetworkSettings']['IPAddress'] 30 | for _ in range(30): 31 | threadToAdd = threading.Thread(target=lambda_func, args=("http://"+ip_add+":9999", )) 32 | threads.append(threadToAdd) 33 | 34 | for thread in threads: 35 | thread.start() 36 | 37 | # Getting all memory using os.popen() 38 | total_memory, used_memory, free_memory = map( 39 | int, os.popen('free -t -m').readlines()[-1].split()[1:]) 40 | 41 | # Memory usage 42 | print("Baseline --> RAM memory used [MB]:", round((used_memory-usedMemStart), 2)) 43 | 44 | for thread in threads: 45 | thread.join() 46 | 47 | output = subprocess.check_output("docker kill $(docker ps -q)", shell=True).decode("utf-8") 48 | for indF in range(20): 49 | nameS = service + str(indF) 50 | output = subprocess.check_output("docker rm " + nameS, shell=True).decode("utf-8") 51 | 52 | # Getting all memory using os.popen() 53 | total_memory, used_memory, free_memory = map(int, os.popen('free -t -m').readlines()[-1].split()[1:]) 54 | usedMemStart = used_memory 55 | 56 | #MXFaaS 57 | for service in services: 58 | threads = [] 59 | nameS = service + str(indF) 60 | output = subprocess.check_output("docker run -d --name " + nameS + " --cpu-shares=0 jovanvr97/" + service + "_knative", shell=True).decode("utf-8") 61 | client = docker.DockerClient() 62 | container = client.containers.get(nameS) 63 | ip_add = container.attrs['NetworkSettings']['IPAddress'] 64 | for _ in range(20*30): 65 | threadToAdd = threading.Thread(target=lambda_func, args=("http://"+ip_add+":9999", )) 66 | threads.append(threadToAdd) 67 | 68 | for thread in threads: 69 | thread.start() 70 | 71 | # Getting all memory using os.popen() 72 | total_memory, used_memory, free_memory = map( 73 | int, os.popen('free -t -m').readlines()[-1].split()[1:]) 74 | 75 | # Memory usage 76 | print("MXFaaS [MB]--> RAM memory used:", round((used_memory-usedMemStart), 2)) 77 | 78 | for thread in threads: 79 | thread.join() 80 | 81 | output = subprocess.check_output("docker kill $(docker ps -q)", shell=True).decode("utf-8") 82 | output = subprocess.check_output("docker rm " + nameS, shell=True).decode("utf-8") -------------------------------------------------------------------------------- /experiments/microarch-out-ref.txt: -------------------------------------------------------------------------------- 1 | LR Serve first done at second = 9.572263717651367 2 | LR Serve last done at second = 19.572345972061157 3 | LR Serve => Response time reduction = 0.4510460251046025 4 | CNN Serve first done at second = 21.147321701049805 5 | CNN Serve last done at second = 31.45834970474243 6 | CNN Serve => Response time reduction = 0.7199999269599165 7 | RNN Serve first done at second = 33.36118149757385 8 | RNN Serve last done at second = 43.375192642211914 9 | RNN Serve => Response time reduction = 0.8826743651320832 10 | MLTrain first done at second = 52.65940976142883 11 | MLTrain last done at second = 65.8178961277008 12 | MLTrain => Response time reduction = 0.9260675937859138 13 | VidConv first done at second = 67.72637367248535 14 | VidConv last done at second = 77.78013467788696 15 | VidConv => Response time reduction = 0.8604064008816342 16 | ImgRes first done at second = 83.13585114479065 17 | ImgRes last done at second = 93.13613748550415 18 | ImgRes => Response time reduction = 0.6564724684004068 19 | ImgRot first done at second = 94.0619728565216 20 | ImgRot last done at second = 104.86844944953918 21 | ImgRot => Response time reduction = 0.9242564227380997 22 | CreateOrd first done at second = 105.38547897338867 23 | CreateOrd last done at second = 115.86001181602478 24 | CreateOrd => Response time reduction = 0.9013310533201736 -------------------------------------------------------------------------------- /experiments/plot_sens_io.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from statistics import mean 3 | from collections import defaultdict 4 | from matplotlib import rcParams 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import matplotlib as mpl 8 | import ast 9 | 10 | rcParams['font.serif'] = ['Times'] 11 | #plt.style.use('grayscale') 12 | new_params = { 13 | 'axes.labelsize': 70, 14 | 'xtick.labelsize': 70, 15 | 'ytick.labelsize': 70, 16 | 'legend.fontsize': 70, 17 | 'lines.markersize': 15, 18 | 'xtick.major.pad': 0, 19 | 'ytick.major.pad': 10, 20 | 'font.size': 70, 21 | 'grid.linestyle': 'dashdot', 22 | 'patch.edgecolor': 'black', 23 | 'patch.force_edgecolor': True, 24 | 'font.serif': 'Times', 25 | 'grid.alpha': 0.4, 26 | } 27 | mpl.rcParams.update(new_params) 28 | 29 | fig, ax = plt.subplots(figsize=(12, 6)) 30 | 31 | ax2 = ax.twinx() 32 | 33 | t_merges = [0, 0.5, 1, 10, 20, 50, 100, 200, 500, 1000] 34 | 35 | percent_merged = [0.9933065595716198, 0.9866131191432396, 0.9678714859437751, 0.9397590361445783, 0.8908969210174029, 0.7891566265060241, 0.6941097724230254, 0.47121820615796517, 0.45314591700133866, 0.44176706827309237] 36 | fetch_latency = [1.5534909129142755, 1.3967915058135986, 1.2918736934661864, 1.2442637801170349, 1.0249151706695556, 1.0012211084365845, 0.9711835551261902, 0.9422359085083008, 0.847205429077148, 0.9911181426048279] 37 | percent_merged.reverse() 38 | fetch_latency.reverse() 39 | 40 | maxFL = max(fetch_latency) 41 | 42 | #for indFL in range(len(fetch_latency)): 43 | # fetch_latency[indFL] = fetch_latency[indFL] / maxFL 44 | 45 | #ax.plot(x1, y1, label="Cloud Storage", color="orange") 46 | ax.set_xscale("log") 47 | lns1 = ax.plot(t_merges, percent_merged, label = "Fraction of merged I/Os", color="black", linewidth=3) 48 | lns2 = ax2.plot(t_merges, fetch_latency, "--", label="Tail Data Access Latency [s]", color="red", linewidth=3) 49 | 50 | lns = lns1 + lns2 51 | lbs = [l.get_label() for l in lns] 52 | 53 | ax.set_xlabel("$T_{merge}$ [ms]", fontsize=22) 54 | ax.set_ylabel("Fraction of merged I/Os", fontsize=22) 55 | ax2.set_ylabel("Tail Latency [s]", fontsize=22) 56 | ax.tick_params(axis="y", labelsize=20) 57 | ax2.tick_params(axis="y", labelsize=20) 58 | ax.tick_params(axis="x", labelsize=20) 59 | ax.set_yticks(np.arange(0.0, 1.1, 0.2)) 60 | ax2.set_yticks(np.arange(0.0, 2.1, 0.5)) 61 | ax.grid(visible=True, axis='y') 62 | handles, labels = ax.get_legend_handles_labels() 63 | ax.legend(lns, lbs, loc="lower left", mode=None, borderaxespad=0, fancybox=True, shadow=True, ncol=1, frameon=False, fontsize=23) 64 | #ax2.legend(loc="lower left", mode=None, borderaxespad=0, fancybox=True, shadow=True, ncol=1, frameon=False, fontsize=23) 65 | plt.tight_layout() 66 | plt.savefig("sensitivity_tmerge.pdf") 67 | -------------------------------------------------------------------------------- /experiments/rnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | 6 | class RNN(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters): 8 | super(RNN, self).__init__() 9 | self.hidden_size = hidden_size 10 | 11 | self.all_categories = all_categories 12 | self.n_categories = n_categories 13 | self.all_letters = all_letters 14 | self.n_letters = n_letters 15 | 16 | self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) 17 | self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) 18 | self.o2o = nn.Linear(hidden_size + output_size, output_size) 19 | self.dropout = nn.Dropout(0.1) 20 | self.softmax = nn.LogSoftmax(dim=1) 21 | 22 | def forward(self, category, input_tensor, hidden): 23 | input_combined = torch.cat((category, input_tensor, hidden), 1) 24 | hidden = self.i2h(input_combined) 25 | output = self.i2o(input_combined) 26 | output_combined = torch.cat((hidden, output), 1) 27 | output = self.o2o(output_combined) 28 | output = self.dropout(output) 29 | output = self.softmax(output) 30 | return output, hidden 31 | 32 | def init_hidden(self): 33 | return Variable(torch.zeros(1, self.hidden_size)) 34 | 35 | @staticmethod 36 | def gen_input_tensor(all_letters, n_letters, line): 37 | tensor = torch.zeros(len(line), 1, n_letters) 38 | for li in range(len(line)): 39 | letter = line[li] 40 | tensor[li][0][all_letters.find(letter)] = 1 41 | return tensor 42 | 43 | @staticmethod 44 | def gen_category_tensor(all_categories, n_categories, category): 45 | li = all_categories.index(category) 46 | tensor = torch.zeros(1, n_categories) 47 | tensor[0][li] = 1 48 | return tensor 49 | 50 | # Sample from a category and starting letter 51 | def sample(self, category, start_letter='A'): 52 | category_tensor = Variable(self.gen_category_tensor(self.all_categories, self.n_categories, category)) 53 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter)) 54 | hidden = self.init_hidden() 55 | 56 | output_name = start_letter 57 | 58 | max_length = 20 59 | for i in range(max_length): 60 | output, hidden = self.forward(category_tensor, input_tensor[0], hidden) 61 | topv, topi = output.data.topk(1) 62 | topi = topi[0][0] 63 | 64 | if topi == self.n_letters - 1: 65 | break 66 | else: 67 | letter = self.all_letters[topi] 68 | output_name += letter 69 | 70 | input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter)) 71 | 72 | return output_name 73 | 74 | # Get multiple samples from one category and multiple starting letters 75 | def samples(self, category, start_letters='ABC'): 76 | for start_letter in start_letters: 77 | yield self.sample(category, start_letter) 78 | -------------------------------------------------------------------------------- /experiments/run-all.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import docker 3 | import numpy as np 4 | import time 5 | import requests 6 | import threading 7 | from statistics import mean, median 8 | 9 | services = ["cnn_serving", "img_res", "img_rot", "ml_train", "vid_proc", "web_serve"] 10 | ip_addresses = [] 11 | 12 | for service in services: 13 | output = subprocess.check_output("docker run -d --name " + service + " --cpu-shares=0 jovanvr97/" + service + "_knative", shell=True).decode("utf-8") 14 | 15 | client = docker.DockerClient() 16 | container = client.containers.get(service) 17 | ip_add = container.attrs['NetworkSettings']['IPAddress'] 18 | ip_addresses.append(ip_add) 19 | 20 | def lambda_func(service): 21 | global times 22 | while True: 23 | try: 24 | t1 = time.time() 25 | r = requests.post(service, json={"name": "test"}) 26 | break 27 | except: 28 | pass 29 | t2 = time.time() 30 | times.append(t2-t1) 31 | 32 | def EnforceActivityWindow(start_time, end_time, instance_events): 33 | events_iit = [] 34 | events_abs = [0] + instance_events 35 | event_times = [sum(events_abs[:i]) for i in range(1, len(events_abs) + 1)] 36 | event_times = [e for e in event_times if (e > start_time)and(e < end_time)] 37 | try: 38 | events_iit = [event_times[0]] + [event_times[i]-event_times[i-1] 39 | for i in range(1, len(event_times))] 40 | except: 41 | pass 42 | return events_iit 43 | 44 | loads = [5, 30, 80] 45 | load_desc = ["LOW_LOAD", "MED_LOAD", "HIGH_LOAD"] 46 | 47 | output_file = open("run-all-out.txt", "w") 48 | 49 | indR = 0 50 | for load in loads: 51 | duration = 1 52 | seed = 100 53 | rate = load 54 | # generate Poisson's distribution of events 55 | inter_arrivals = [] 56 | np.random.seed(seed) 57 | beta = 1.0/rate 58 | oversampling_factor = 2 59 | inter_arrivals = list(np.random.exponential(scale=beta, size=int(oversampling_factor*duration*rate))) 60 | instance_events = EnforceActivityWindow(0,duration,inter_arrivals) 61 | 62 | for service in services: 63 | 64 | threads = [] 65 | times = [] 66 | after_time, before_time = 0, 0 67 | 68 | st = 0 69 | for t in instance_events: 70 | st = st + t - (after_time - before_time) 71 | before_time = time.time() 72 | if st > 0: 73 | time.sleep(st) 74 | 75 | threadToAdd = threading.Thread(target=lambda_func, args=("http://"+ip_addresses[services.index(service)]+":9999", )) 76 | threads.append(threadToAdd) 77 | threadToAdd.start() 78 | after_time = time.time() 79 | 80 | for thread in threads: 81 | thread.join() 82 | 83 | print("=====================" + service + load_desc[loads.index(load)] + "=====================", file=output_file, flush=True) 84 | print(mean(times), file=output_file, flush=True) 85 | print(median(times), file=output_file, flush=True) 86 | print(np.percentile(times, 90), file=output_file, flush=True) 87 | print(np.percentile(times, 95), file=output_file, flush=True) 88 | print(np.percentile(times, 99), file=output_file, flush=True) 89 | 90 | 91 | for service in services: 92 | output = subprocess.check_output("docker stop " + service, shell=True).decode("utf-8") 93 | output = subprocess.check_output("docker rm " + service, shell=True).decode("utf-8") 94 | 95 | -------------------------------------------------------------------------------- /experiments/sens-io-out-ref.txt: -------------------------------------------------------------------------------- 1 | Tmerge values = [1, 0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.001, 0.0005, 0] 2 | Percentage of merged I/Os = [0.9980724749421742, 0.9965304548959136, 0.9930609097918273, 0.9826522744795683, 0.9687740940632228, 0.9313801079414032, 0.8962991518889746, 0.7925983037779492, 0.7868157286044719, 0.7505782575173477] 3 | Tail Latencies = [1.0870697498321533, 0.6005066752433776, 0.3033761501312256, 0.20587611198425293, 0.1604669332504272, 0.13138047456741334, 0.1258947491645813, 0.12295324802398681, 0.1298491835594177, 0.12566943168640138] -------------------------------------------------------------------------------- /experiments/sens_io.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from azure.storage.blob import BlobClient 4 | import numpy as np 5 | import sys 6 | import threading 7 | import random 8 | 9 | myImages = [] 10 | for indImg in range(20): 11 | myImages.append("img"+str(indImg)+".png") 12 | 13 | connection_string = "DefaultEndpointsProtocol=https;AccountName=serverlesscache;AccountKey=O7MZkxwjyBWTcPL4fDoHi6n8GsYECQYiMe+KLOIPLpzs9BoMONPg2thf1wM1pxlVxuICJvqL4hWb+AStIKVWow==;EndpointSuffix=core.windows.net" 14 | 15 | def fetch_data_storage(key): 16 | blob_client = BlobClient.from_connection_string(connection_string, container_name="artifacteval", blob_name=key) 17 | blob_client.download_blob() 18 | 19 | def fetch_data(key, myId, tMerge): 20 | # Function: fetch the data value from the remote global storage 21 | global request_table 22 | global wait_events 23 | global return_values 24 | global numIssued 25 | global numWait 26 | global issueTimes 27 | global to_issue 28 | global to_inform 29 | 30 | issueTimes.append(time.time()) 31 | 32 | leader = False 33 | toIssue = False 34 | 35 | lockTable.acquire() 36 | 37 | if key not in request_table: 38 | request_table[key] = [] 39 | toIssue = True 40 | if len(to_issue) == 0: 41 | leader = True 42 | else: 43 | wait_event = threading.Event() 44 | to_inform[myId] = wait_event 45 | to_issue.append(myId) 46 | else: 47 | myEvent = threading.Event() 48 | wait_events[myId] = myEvent 49 | 50 | request_table[key].append(myId) 51 | 52 | lockTable.release() 53 | 54 | if toIssue: 55 | 56 | if leader: 57 | 58 | time.sleep(tMerge) 59 | 60 | lockTable.acquire() 61 | 62 | list_to_inform = to_issue 63 | list_to_inform.remove(myId) 64 | to_issue = [] 65 | 66 | lockTable.release() 67 | 68 | numIssued += 1 69 | 70 | fetch_data_storage(key) 71 | 72 | for elemInf in list_to_inform: 73 | to_inform[elemInf].set() 74 | else: 75 | numWait += 1 76 | wait_event.wait() 77 | 78 | returnValue = {} 79 | returnValue["value"] = 1 80 | returnValue["seq_num"] = 2 81 | 82 | lockTable.acquire() 83 | 84 | 85 | 86 | return_values[key] = returnValue 87 | request_table[key].remove(myId) 88 | toWakeUp = request_table[key] 89 | for elem in toWakeUp: 90 | wait_events[elem].set() 91 | wait_events.pop(elem) 92 | 93 | if len(request_table[key]) == 0: 94 | request_table.pop(key) 95 | 96 | lockTable.release() 97 | 98 | else: 99 | 100 | myEvent.wait() 101 | 102 | lockTable.acquire() 103 | 104 | numWait += 1 105 | 106 | returnValue = return_values[key] 107 | request_table[key].remove(myId) 108 | if len(request_table[key]) == 0: 109 | request_table.pop(key) 110 | else: 111 | toWakeUp = request_table[key] 112 | for elem in toWakeUp: 113 | if elem in wait_events: 114 | wait_events[elem].set() 115 | wait_events.pop(elem) 116 | 117 | lockTable.release() 118 | 119 | return returnValue 120 | 121 | def lambda_func(params): 122 | global times 123 | 124 | time1 = time.time() 125 | keyIn = params["inImg"] 126 | fetch_data(keyIn, params["myId"], params["tMerge"]) 127 | time4 = time.time() 128 | 129 | times.append(time4-time1) 130 | return {"Image":"rotated"} 131 | 132 | def EnforceActivityWindow(start_time, end_time, instance_events): 133 | events_iit = [] 134 | events_abs = [0] + instance_events 135 | event_times = [sum(events_abs[:i]) for i in range(1, len(events_abs) + 1)] 136 | event_times = [e for e in event_times if (e > start_time)and(e < end_time)] 137 | try: 138 | events_iit = [event_times[0]] + [event_times[i]-event_times[i-1] 139 | for i in range(1, len(event_times))] 140 | except: 141 | pass 142 | return events_iit 143 | 144 | tMerges = [1, 0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.001, 0.0005, 0] 145 | prints1 = [] 146 | prints2 = [] 147 | 148 | for tMerge in tMerges: 149 | 150 | times = [] 151 | 152 | request_table = {} 153 | wait_events = {} 154 | return_values = {} 155 | lockTable = threading.Lock() 156 | numIssued = 0 157 | numWait = 0 158 | 159 | to_issue = [] 160 | to_inform = {} 161 | 162 | issueTimes = [] 163 | 164 | duration = 5 165 | seed = 100 166 | rate = 500 167 | 168 | # generate Poisson's distribution of events 169 | inter_arrivals = [] 170 | np.random.seed(seed) 171 | beta = 1.0/rate 172 | oversampling_factor = 2 173 | inter_arrivals = list(np.random.exponential(scale=beta, size=int(oversampling_factor*duration*rate))) 174 | instance_events = EnforceActivityWindow(0,duration,inter_arrivals) 175 | threads = [] 176 | after_time, before_time = 0, 0 177 | 178 | st = 0 179 | for t in instance_events: 180 | st = st + t - (after_time - before_time) 181 | before_time = time.time() 182 | if st > 0: 183 | time.sleep(st) 184 | inImg = myImages[random.randint(0,len(myImages)-1)] 185 | keyArg = {"inImg":inImg, "outImg":"jovan_photo_rot.jpg", "myId":len(threads), "tMerge": tMerge} 186 | threadToAdd = threading.Thread(target=lambda_func, args=(keyArg,)) 187 | threads.append(threadToAdd) 188 | threadToAdd.start() 189 | after_time = time.time() 190 | 191 | for thread in threads: 192 | thread.join() 193 | 194 | prints1.append(float(numWait/(numWait+numIssued))) 195 | prints2.append(np.percentile(times, 95)) 196 | 197 | print("Tmerge values = ", tMerges) 198 | print("Percentage of merged I/Os = ", prints1) 199 | print("Tail Latencies = ", prints2) -------------------------------------------------------------------------------- /pythonAction/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 19 | 20 | # Python 3 OpenWhisk Runtime Container 21 | 22 | ## 1.0.3 23 | Changes: 24 | - Update base image to openwhisk/dockerskeleton:1.3.3 25 | 26 | ## 1.0.2 27 | Changes: 28 | - Update base image to openwhisk/dockerskeleton:1.3.2 29 | 30 | ## 1.0.1 31 | Changes: 32 | - Update base image to openwhisk/dockerskeleton:1.3.1 33 | 34 | ## 1.0.0 35 | Initial release. 36 | 37 | Python version = 3.6.1 38 | 39 | - asn1crypto (0.23.0) 40 | - attrs (17.2.0) 41 | - Automat (0.6.0) 42 | - beautifulsoup4 (4.5.3) 43 | - cffi (1.11.1) 44 | - click (6.7) 45 | - constantly (15.1.0) 46 | - cryptography (2.0.3) 47 | - cssselect (1.0.1) 48 | - Flask (0.12) 49 | - gevent (1.2.1) 50 | - greenlet (0.4.12) 51 | - httplib2 (0.10.3) 52 | - idna (2.6) 53 | - incremental (17.5.0) 54 | - itsdangerous (0.24) 55 | - Jinja2 (2.9.6) 56 | - kafka-python (1.3.4) 57 | - lxml (3.7.3) 58 | - MarkupSafe (1.0) 59 | - parsel (1.2.0) 60 | - pip (9.0.1) 61 | - pyasn1 (0.3.7) 62 | - pyasn1-modules (0.1.4) 63 | - pycparser (2.18) 64 | - PyDispatcher (2.0.5) 65 | - pyOpenSSL (17.3.0) 66 | - python-dateutil (2.6.0) 67 | - queuelib (1.4.2) 68 | - requests (2.13.0) 69 | - Scrapy (1.3.3) 70 | - service-identity (17.0.0) 71 | - setuptools (36.5.0) 72 | - simplejson (3.10.0) 73 | - six (1.11.0) 74 | - Twisted (17.1.0) 75 | - virtualenv (15.1.0) 76 | - w3lib (1.18.0) 77 | - Werkzeug (0.12.2) 78 | - wheel (0.29.0) 79 | - zope.interface (4.4.3) 80 | -------------------------------------------------------------------------------- /pythonAction/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Dockerfile for python actions, overrides and extends ActionRunner from actionProxy 19 | FROM whisk/dockerskeleton:vJovan 20 | 21 | RUN apk update 22 | 23 | ENV PACKAGES="\ 24 | dumb-init \ 25 | musl \ 26 | libc6-compat \ 27 | linux-headers \ 28 | build-base \ 29 | bash \ 30 | git \ 31 | ca-certificates \ 32 | freetype \ 33 | libgfortran \ 34 | libgcc \ 35 | libstdc++ \ 36 | openblas \ 37 | tcl \ 38 | tk \ 39 | gcc \ 40 | make \ 41 | curl \ 42 | bzip2-dev \ 43 | libssl1.1 \ 44 | libc-dev \ 45 | libxslt-dev \ 46 | libxml2-dev \ 47 | libffi-dev \ 48 | openssl-dev \ 49 | g++ \ 50 | gfortran \ 51 | vim \ 52 | musl-dev \ 53 | " 54 | ENV PYTHON_PACKAGES="\ 55 | beautifulsoup4 \ 56 | httplib2 \ 57 | lxml \ 58 | python-dateutil \ 59 | requests \ 60 | scrapy \ 61 | simplejson \ 62 | virtualenv \ 63 | twisted \ 64 | psutil \ 65 | joblib \ 66 | minio \ 67 | numpy \ 68 | matplotlib \ 69 | scipy \ 70 | scikit-learn \ 71 | pandas \ 72 | nltk \ 73 | opencv-python \ 74 | " 75 | RUN apk --update --upgrade add gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev 76 | 77 | RUN apk add --no-cache --virtual build-dependencies python3 \ 78 | && apk add --virtual build-runtime \ 79 | build-base python3-dev openblas-dev freetype-dev pkgconfig gfortran \ 80 | && ln -s /usr/include/locale.h /usr/include/xlocale.h \ 81 | && python3 -m ensurepip \ 82 | && rm -r /usr/lib/python*/ensurepip \ 83 | && pip3 install --upgrade pip setuptools \ 84 | && ln -sf /usr/bin/python3 /usr/bin/python \ 85 | && ln -sf pip3 /usr/bin/pip \ 86 | && rm -r /root/.cache \ 87 | && pip install --no-cache-dir $PYTHON_PACKAGES \ 88 | && apk del build-runtime \ 89 | && apk add --no-cache --virtual build-dependencies $PACKAGES \ 90 | && rm -rf /var/cache/apk/* 91 | 92 | ENV FLASK_PROXY_PORT 8080 93 | 94 | RUN mkdir -p /pythonAction 95 | ADD init.py /pythonAction/ 96 | ADD runner.py /pythonAction/ 97 | ADD test.txt /pythonAction 98 | RUN rm -rf /action 99 | RUN mkdir /action 100 | 101 | CMD ["/bin/bash", "-c", "cd pythonAction && python3 -u init.py"] 102 | -------------------------------------------------------------------------------- /pythonAction/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | ext.dockerImageName = 'python3action' 19 | apply from: '../../gradle/docker.gradle' 20 | -------------------------------------------------------------------------------- /pythonAction/init.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import json 3 | import os 4 | import socket 5 | import sys 6 | import signal 7 | import runner 8 | 9 | def signal_handler(sig, frame): 10 | serverSocket.close() 11 | sys.exit(0) 12 | 13 | signal.signal(signal.SIGINT, signal_handler) 14 | 15 | class PrintHook: 16 | def __init__(self,out=1): 17 | self.func = None 18 | self.origOut = None 19 | self.out = out 20 | 21 | def TestHook(self,text): 22 | f = open('hook_log.txt','a') 23 | f.write(text) 24 | f.close() 25 | return 0,0,text 26 | 27 | def Start(self,func=None): 28 | if self.out: 29 | sys.stdout = self 30 | self.origOut = sys.__stdout__ 31 | else: 32 | sys.stderr= self 33 | self.origOut = sys.__stderr__ 34 | 35 | if func: 36 | self.func = func 37 | else: 38 | self.func = self.TestHook 39 | 40 | def Stop(self): 41 | self.origOut.flush() 42 | if self.out: 43 | sys.stdout = sys.__stdout__ 44 | else: 45 | sys.stderr = sys.__stderr__ 46 | self.func = None 47 | 48 | def flush(self): 49 | self.origOut.flush() 50 | 51 | def write(self,text): 52 | proceed = 1 53 | lineNo = 0 54 | addText = '' 55 | if self.func != None: 56 | proceed,lineNo,newText = self.func(text) 57 | if proceed: 58 | if text.split() == []: 59 | self.origOut.write(text) 60 | else: 61 | if self.out: 62 | if lineNo: 63 | try: 64 | raise "Dummy" 65 | except: 66 | codeObject = sys.exc_info()[2].tb_frame.f_back.f_code 67 | fileName = codeObject.co_filename 68 | funcName = codeObject.co_name 69 | self.origOut.write(newText) 70 | 71 | def MyHookOut(text): 72 | return 1,1,' -- pid -- '+ str(os.getpid()) + ' ' + text 73 | 74 | phOut = PrintHook() 75 | phOut.Start(MyHookOut) 76 | 77 | myHost = '0.0.0.0' 78 | myPort = 8080 79 | 80 | serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 81 | serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 82 | serverSocket.bind((myHost, myPort)) 83 | serverSocket.listen(1) 84 | 85 | (clientSocket, address) = serverSocket.accept() 86 | clientAddress = address[0] 87 | 88 | data = clientSocket.recv(102400) 89 | dataStr = data.decode('UTF-8') 90 | dataStrList = dataStr.splitlines() 91 | message = json.loads(dataStrList[-1]) 92 | value = message['value'] 93 | env = value['env'] 94 | code = value['code'] 95 | numCores = env['numCores'] 96 | print(code, file=open("/pythonAction/actionToExec.py","w")) 97 | 98 | msg = "OK" 99 | 100 | response_headers = { 101 | 'Content-Type': 'text/html; encoding=utf8', 102 | 'Content-Length': len(msg), 103 | 'Connection': 'close', 104 | } 105 | 106 | response_headers_raw = ''.join('%s: %s\r\n' % (k, v) for k, v in response_headers.items()) 107 | 108 | response_proto = 'HTTP/1.1' 109 | response_status = '200' 110 | response_status_text = 'OK' # this can be random 111 | 112 | # sending all this stuff 113 | r = '%s %s %s\r\n' % (response_proto, response_status, response_status_text) 114 | 115 | clientSocket.send(r.encode(encoding="utf-8")) 116 | clientSocket.send(response_headers_raw.encode(encoding="utf-8")) 117 | clientSocket.send('\r\n'.encode(encoding="utf-8")) # to separate headers from body 118 | clientSocket.send(msg.encode(encoding="utf-8")) 119 | clientSocket.close() 120 | 121 | runner.run(env,serverSocket,numCores) -------------------------------------------------------------------------------- /pythonAction/mylib.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | void rand_str(char *dest, size_t length) { 15 | char charset[] = "0123456789" 16 | "abcdefghijklmnopqrstuvwxyz" 17 | "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; 18 | *dest++ = ' '; 19 | *dest++ = '-'; 20 | *dest++ = ' '; 21 | length -= 3; 22 | while (length-- > 0) { 23 | size_t index = (double) rand() / RAND_MAX * (sizeof charset - 1); 24 | *dest++ = charset[index]; 25 | } 26 | *dest = '\n'; 27 | } 28 | 29 | ssize_t recvfrom(int socket, void *restrict buffer, size_t length, int flags, struct sockaddr *restrict address, socklen_t *restrict address_len){ 30 | ssize_t (*lrecvfrom)(int, void *restrict, size_t, int, struct sockaddr *restrict, socklen_t *restrict) = dlsym(RTLD_NEXT, "recvfrom"); 31 | pid_t tid = syscall(SYS_gettid); 32 | pid_t pid = getpid(); 33 | 34 | //inform runner that thread/process is blocked 35 | const char* str1 = "\nblocked - "; 36 | char *num; 37 | int fd; 38 | char * myfifo = "/tmp/blocked"; 39 | mkfifo(myfifo, 0666); 40 | fd = open(myfifo, O_WRONLY); 41 | char mybuffer[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 42 | asprintf(&num, "%d", tid); 43 | strcat(strcpy(mybuffer, str1), num); 44 | write(fd, mybuffer, (strlen(mybuffer) + 1)); 45 | close(fd); 46 | 47 | ssize_t toReturnValue = lrecvfrom(socket, buffer, length, flags, address, address_len); 48 | //inform runner that it is unblocked 49 | 50 | fd = open(myfifo, O_WRONLY); 51 | const char* str1_new = "\nunblocked - "; 52 | char *num_new; 53 | char mybuffer_new[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 54 | asprintf(&num_new, "%d", tid); 55 | strcat(strcpy(mybuffer_new, str1_new), num_new); 56 | char mybuffer_newest[(int)((ceil(log10(tid))+1)*sizeof(char)) + 100]; 57 | char str2[] = { [10] = '\1' }; 58 | rand_str(str2, sizeof(str2) - 1); 59 | strcat(strcpy(mybuffer_newest, mybuffer_new), str2); 60 | write(fd, mybuffer_newest, (strlen(mybuffer_newest) + 1)); 61 | close(fd); 62 | 63 | //wait for confirmation 64 | int fd_conf; 65 | char arr1[3]; 66 | char * myfifo_conf = "/tmp/myfifo"; 67 | mkfifo(myfifo_conf, 0666); 68 | fd_conf = open(myfifo_conf, O_RDONLY); 69 | read(fd_conf, arr1, sizeof(arr1)); 70 | printf("Read\n"); 71 | close(fd_conf); 72 | printf("Send response\n"); 73 | //send response 74 | return toReturnValue; 75 | } -------------------------------------------------------------------------------- /pythonAction/mylib.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/pythonAction/mylib.so -------------------------------------------------------------------------------- /pythonAction/mylibSocket.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #define PORT 3333 18 | 19 | ssize_t recvfrom(int socketm, void *restrict buffer, size_t length, int flags, struct sockaddr *restrict address, socklen_t *restrict address_len){ 20 | ssize_t (*lrecvfrom)(int, void *restrict, size_t, int, struct sockaddr *restrict, socklen_t *restrict) = dlsym(RTLD_NEXT, "recvfrom"); 21 | pid_t tid = syscall(SYS_gettid); 22 | pid_t pid = getpid(); 23 | printf("Catch this recvfrom!\n"); 24 | int sock = 0, valread, client_fd; 25 | struct sockaddr_in serv_addr; 26 | 27 | if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { 28 | printf("\n Socket creation error \n"); 29 | return -1; 30 | } 31 | 32 | bzero(&serv_addr, sizeof(serv_addr)); 33 | serv_addr.sin_family = AF_INET; 34 | serv_addr.sin_port = htons(PORT); 35 | 36 | if (inet_pton(AF_INET, "0.0.0.0", &serv_addr.sin_addr)<= 0) { 37 | printf("\nInvalid address/ Address not supported \n"); 38 | return -1; 39 | } 40 | 41 | if ((client_fd = connect(sock, (struct sockaddr*)&serv_addr, sizeof(serv_addr))) < 0) { 42 | printf("\nConnection Failed \n"); 43 | return -1; 44 | } 45 | 46 | { 47 | char* num; 48 | const char* str1 = "\nblocked - "; 49 | char mybuffer[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 50 | bzero(mybuffer, sizeof(mybuffer)); 51 | asprintf(&num, "%d", tid); 52 | strcat(strcpy(mybuffer, str1), num); 53 | write(sock, mybuffer, sizeof(mybuffer)); 54 | } 55 | ssize_t toReturnValue = lrecvfrom(socketm, buffer, length, flags, address, address_len); 56 | { 57 | char* num; 58 | const char* str1 = "\nunblocked - "; 59 | char mybuffer[(int)((ceil(log10(tid))+1)*sizeof(char)) + 20]; 60 | bzero(mybuffer, sizeof(mybuffer)); 61 | asprintf(&num, "%d", tid); 62 | strcat(strcpy(mybuffer, str1), num); 63 | write(sock, mybuffer, sizeof(mybuffer)); 64 | bzero(mybuffer, sizeof(mybuffer)); 65 | valread = read(sock, mybuffer, sizeof(mybuffer)); 66 | //close(client_fd); 67 | } 68 | return toReturnValue; 69 | } -------------------------------------------------------------------------------- /pythonAction/mylibSocket.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/pythonAction/mylibSocket.so -------------------------------------------------------------------------------- /pythonAction/myprogram: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/pythonAction/myprogram -------------------------------------------------------------------------------- /pythonAction/myprogram.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main(void) 5 | { 6 | CURL *curl; 7 | CURLcode res; 8 | 9 | /* In windows, this will init the winsock stuff */ 10 | curl_global_init(CURL_GLOBAL_ALL); 11 | 12 | /* get a curl handle */ 13 | curl = curl_easy_init(); 14 | if(curl) { 15 | /* First set the URL that is about to receive our POST. This URL can 16 | just as well be a https:// URL if that is what should receive the 17 | data. */ 18 | curl_easy_setopt(curl, CURLOPT_URL, "http://google.com"); 19 | /* Now specify the POST data */ 20 | curl_easy_setopt(curl, CURLOPT_POSTFIELDS, "name=daniel&project=curl"); 21 | 22 | /* Perform the request, res will get the return code */ 23 | res = curl_easy_perform(curl); 24 | /* Check for errors */ 25 | if(res != CURLE_OK) 26 | fprintf(stderr, "curl_easy_perform() failed: %s\n", 27 | curl_easy_strerror(res)); 28 | 29 | /* always cleanup */ 30 | curl_easy_cleanup(curl); 31 | } 32 | curl_global_cleanup(); 33 | return 0; 34 | } -------------------------------------------------------------------------------- /pythonAction/runner_old.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | import signal 5 | from multiprocessing import Process 6 | 7 | import psutil 8 | 9 | def signal_handler(sig, frame): 10 | serverSocket_.close() 11 | sys.exit(0) 12 | 13 | class PrintHook: 14 | def __init__(self,out=1): 15 | self.func = None 16 | self.origOut = None 17 | self.out = out 18 | 19 | def TestHook(self,text): 20 | f = open('hook_log.txt','a') 21 | f.write(text) 22 | f.close() 23 | return 0,0,text 24 | 25 | def Start(self,func=None): 26 | if self.out: 27 | sys.stdout = self 28 | self.origOut = sys.__stdout__ 29 | else: 30 | sys.stderr= self 31 | self.origOut = sys.__stderr__ 32 | 33 | if func: 34 | self.func = func 35 | else: 36 | self.func = self.TestHook 37 | 38 | def Stop(self): 39 | self.origOut.flush() 40 | if self.out: 41 | sys.stdout = sys.__stdout__ 42 | else: 43 | sys.stderr = sys.__stderr__ 44 | self.func = None 45 | 46 | def flush(self): 47 | self.origOut.flush() 48 | 49 | def write(self,text): 50 | proceed = 1 51 | lineNo = 0 52 | addText = '' 53 | if self.func != None: 54 | proceed,lineNo,newText = self.func(text) 55 | if proceed: 56 | if text.split() == []: 57 | self.origOut.write(text) 58 | else: 59 | if self.out: 60 | if lineNo: 61 | try: 62 | raise "Dummy" 63 | except: 64 | codeObject = sys.exc_info()[2].tb_frame.f_back.f_code 65 | fileName = codeObject.co_filename 66 | funcName = codeObject.co_name 67 | self.origOut.write(newText) 68 | 69 | def MyHookOut(text): 70 | return 1,1,' -- pid -- '+ str(os.getpid()) + ' ' + text 71 | 72 | serverSocket_ = None 73 | actionModule = None 74 | 75 | def myFunction(clientSocket_): 76 | global actionModule 77 | data_ = clientSocket_.recv(1024) 78 | dataStr = data_.decode('UTF-8') 79 | dataStrList = dataStr.splitlines() 80 | message = json.loads(dataStrList[-1]) 81 | args = message['value'] 82 | result = actionModule.main(args) 83 | result["myPID"] = os.getpid() 84 | msg = json.dumps(result) 85 | 86 | response_headers = { 87 | 'Content-Type': 'text/html; encoding=utf8', 88 | 'Content-Length': len(msg), 89 | 'Connection': 'close', 90 | } 91 | 92 | response_headers_raw = ''.join('%s: %s\r\n' % (k, v) for k, v in response_headers.items()) 93 | 94 | response_proto = 'HTTP/1.1' 95 | response_status = '200' 96 | response_status_text = 'OK' # this can be random 97 | 98 | # sending all this stuff 99 | r = '%s %s %s\r\n' % (response_proto, response_status, response_status_text) 100 | 101 | clientSocket_.send(r.encode(encoding="utf-8")) 102 | clientSocket_.send(response_headers_raw.encode(encoding="utf-8")) 103 | clientSocket_.send('\r\n'.encode(encoding="utf-8")) # to separate headers from body 104 | clientSocket_.send(msg.encode(encoding="utf-8")) 105 | 106 | clientSocket_.close() 107 | 108 | def run(env, serverSocket): 109 | global serverSocket_ 110 | global actionModule 111 | serverSocket_ = serverSocket 112 | import actionToExec 113 | actionModule = actionToExec 114 | os.environ = env 115 | signal.signal(signal.SIGINT, signal_handler) 116 | phOut = PrintHook() 117 | phOut.Start(MyHookOut) 118 | myChildren = [] 119 | 120 | while(True): 121 | (clientSocket, address) = serverSocket.accept() 122 | print("My Children!") 123 | for child in myChildren: 124 | print(psutil.Process(child).status()) 125 | childProcess = os.fork() 126 | if childProcess == 0: 127 | myFunction(clientSocket) 128 | os._exit(os.EX_OK) 129 | else: 130 | myChildren.append(childProcess) 131 | clientSocket.close() 132 | -------------------------------------------------------------------------------- /pythonAction/scheduler.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import requests 3 | import threading 4 | import pickle 5 | 6 | class Message: 7 | def __init__(self): 8 | self.url_func = "" 9 | self.parameters = {} 10 | self.authentication = ("", "") 11 | self.arguments = {} 12 | 13 | def myFunction(clientSocket): 14 | data = clientSocket.recv(1024) 15 | mReceived = pickle.loads(data) 16 | future = requests.post(mReceived.url_func, params=mReceived.parameters, auth=mReceived.authentication, json=mReceived.arguments, verify=False) 17 | clientSocket.sendall(str(future.content).encode('utf-8')) 18 | clientSocket.close() 19 | 20 | def sendRequest(url, params={}, auth=("",""), args={}): 21 | myHost = '0.0.0.0' 22 | myPort = 1234 23 | clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 24 | clientSocket.connect((myHost, myPort)) 25 | message = Message() 26 | message.url_func = url 27 | message.parameters = params 28 | message.authentication = auth 29 | message.arguments = args 30 | mToSend = pickle.dumps(message) 31 | clientSocket.sendall(mToSend) 32 | data = clientSocket.recv(1024) 33 | data = data.decode("utf-8")[2:-1] 34 | print(data) 35 | return data 36 | 37 | if __name__ == "__main__": 38 | myHost = '0.0.0.0' 39 | myPort = 1234 40 | 41 | serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 42 | serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 43 | serverSocket.bind((myHost, myPort)) 44 | serverSocket.listen(1) 45 | 46 | while True: 47 | (clientSocket, address) = serverSocket.accept() 48 | thread = threading.Thread(target=myFunction, args=(clientSocket,)) 49 | thread.start() 50 | -------------------------------------------------------------------------------- /pythonAction/start.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | 4 | subprocess.Popen(["python3","scheduler.py"]) 5 | subprocess.Popen(["python3","init.py"]) 6 | 7 | while True: 8 | time.sleep(100) -------------------------------------------------------------------------------- /pythonAction/test.txt: -------------------------------------------------------------------------------- 1 | Hello I am Jovan 2 | I am from Serbia 3 | I am PhD student at UIUC -------------------------------------------------------------------------------- /pythonAction/testHTTP.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | import threading 4 | import threading 5 | import pipes 6 | import socket 7 | import json 8 | 9 | print("MY TID = ", threading.get_native_id()) 10 | print("MY PID = ", os.getpid()) 11 | 12 | def threadDo(clientSocket_): 13 | while True: 14 | try: 15 | data_ = clientSocket_.recv(1024) 16 | if not data_: 17 | break 18 | dataStr = data_.decode('UTF-8') 19 | dataStrLines = dataStr.splitlines() 20 | for line in dataStrLines: 21 | if ("unblocked" in line): 22 | print("Thread id unblocked = " + line.split(" - ")[-1]) 23 | elif ("blocked" in line): 24 | print("Thread id blocked = " + line.split(" - ")[-1]) 25 | print("Message = " + dataStr) 26 | if "unblocked" in dataStr: 27 | result = "ok" 28 | clientSocket_.send(result.encode(encoding="utf-8")) 29 | break 30 | except: 31 | break 32 | print("Thread done") 33 | 34 | 35 | def threadCheck(): 36 | myHost = '0.0.0.0' 37 | myPort = 3333 38 | 39 | serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 40 | serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 41 | serverSocket.bind((myHost, myPort)) 42 | serverSocket.listen(1) 43 | 44 | while True: 45 | (clientSocket, _) = serverSocket.accept() 46 | print("Spawn new thread") 47 | doThread = threading.Thread(target=threadDo, args=(clientSocket,)) 48 | doThread.start() 49 | 50 | #threading.Thread(target=threadCheck).start() 51 | data_run = {"action_name":"/guest/funcB","action_version":"0.0.1","activation_id":"8cc0d938952e437e80d938952e637e9d","deadline":"1645662489031","namespace":"guest","transaction_id":"aYtvu7ZYIOBRi9FU3zuyGBSqu5mYDy3b","value":{"password":123,"username":"jovan"}} 52 | #future = requests.post('http://192.168.0.1:8080/run', json=data_run) 53 | future = requests.get("http://google.com") 54 | print(future.text) 55 | print(type(threading.get_native_id())) 56 | #while(True): 57 | # pass -------------------------------------------------------------------------------- /pythonAction/testHTTP1.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import requests 3 | libc = ctypes.CDLL('libc.so.6') 4 | 5 | def mywrapper(): 6 | print("hello") 7 | 8 | orig_recvfrom = libc.recvfrom 9 | orig_open = libc.open 10 | libc.recvfrom = mywrapper 11 | libc.open = mywrapper 12 | print(libc.recvfrom) 13 | f = open("filename.txt", "r") -------------------------------------------------------------------------------- /pythonAction/vid1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jovans2/MXFaaS_Artifact/999e8388e6694c5519ce28dd99ef0cfee88eba39/pythonAction/vid1.mp4 -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | sudo apt update 2 | sudo apt upgrade -y 3 | sudo apt-get install \ 4 | ca-certificates \ 5 | curl \ 6 | gnupg \ 7 | lsb-release htop -y 8 | sudo mkdir -p /etc/apt/keyrings 9 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 10 | echo \ 11 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 12 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 13 | sudo apt-get update 14 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin -y 15 | sudo groupadd docker 16 | sudo usermod -aG docker $USER 17 | 18 | mkdir /tmp/114514 19 | cd /tmp/114514 20 | 21 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 22 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 23 | 24 | 25 | # This part was commented out because Kubenetes suggest a new way to install kubectl 26 | # See https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ 27 | 28 | #sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg 29 | #echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list 30 | 31 | #sudo apt-get update 32 | #sudo apt-get install -y kubectl 33 | 34 | 35 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 36 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 37 | 38 | wget https://github.com/knative/client/releases/download/knative-v1.8.1/kn-linux-amd64 39 | chmod +x kn-linux-amd64 40 | sudo mv kn-linux-amd64 /usr/local/bin/kn 41 | 42 | wget https://github.com/knative-sandbox/kn-plugin-quickstart/releases/download/knative-v1.8.1/kn-quickstart-linux-amd64 43 | chmod +x kn-quickstart-linux-amd64 44 | sudo mv kn-quickstart-linux-amd64 /usr/local/bin/kn-quickstart 45 | 46 | sudo chmod 666 /var/run/docker.sock 47 | 48 | minikube start --nodes 1 -p minikube 49 | 50 | # Install the required custom resources by running the command: 51 | kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.9.1/serving-crds.yaml 52 | 53 | # Install the core components of Knative Serving by running the command: 54 | kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.9.1/serving-core.yaml 55 | 56 | # Install the Knative Kourier controller by running the command: 57 | kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.9.1/kourier.yaml 58 | 59 | # Configure Knative Serving to use Kourier by default by running the command: 60 | kubectl patch configmap/config-network \ 61 | --namespace knative-serving \ 62 | --type merge \ 63 | --patch '{"data":{"ingress-class":"kourier.ingress.networking.knative.dev"}}' 64 | 65 | # Configure DNS 66 | kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.9.1/serving-default-domain.yaml 67 | 68 | 69 | sudo apt-get install python3-pip -y 70 | pip3 install docker 71 | pip3 install numpy # Sometimes numpy need to specify the version 72 | pip3 install torch torchvision torchaudio 73 | --------------------------------------------------------------------------------