├── log └── test.txt ├── backend_server ├── log │ └── test.txt ├── backend_globals.py ├── grpc_config │ ├── protos │ │ └── msg_transfer.proto │ ├── msg_transfer_pb2_grpc.py │ └── msg_transfer_pb2.py ├── model_controller.py └── rpc_server.py ├── info_store ├── handled_result │ └── test.txt └── temporary_file │ └── dog.jpg ├── DOC └── smarteye.png ├── cv_model ├── fasterrcnn_resnet50_fpn_coco-258fb6c6.pth ├── maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth ├── retinanet_resnet50_fpn_coco-eeacb38b.pth ├── fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth └── fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth ├── edge_globals.py ├── .gitattributes ├── tools ├── read_config.py ├── make_request.py ├── transfer_files_tool.py └── video_handle_tool.py ├── frontend_server ├── monitor.py ├── dispatch_policy.py ├── frontend_globals.py ├── offloading.py ├── forwarding_server.py └── grpc_interface.py ├── config ├── config.ini └── model_info.py ├── local ├── video_reader.py ├── preprocessor.py ├── sys_info.py ├── local_store.py └── decision_engine.py ├── camera └── virtual_camera.py ├── model_manager ├── model_cache.py ├── image_classification.py ├── object_detection.py └── imagenet_classes.txt ├── test ├── load_model │ └── load_model_test.py └── model │ └── test.py ├── edge_main.py ├── edge_worker.py └── README.md /log/test.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend_server/log/test.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /info_store/handled_result/test.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend_server/backend_globals.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | global loaded_model 4 | -------------------------------------------------------------------------------- /DOC/smarteye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MSNLAB/SmartEye/HEAD/DOC/smarteye.png -------------------------------------------------------------------------------- /info_store/temporary_file/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MSNLAB/SmartEye/HEAD/info_store/temporary_file/dog.jpg -------------------------------------------------------------------------------- /cv_model/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:258fb6c638b15964ddcdd1ae0748c5eef1be9e732750120cc857feed3faac384 3 | size 167502836 4 | -------------------------------------------------------------------------------- /cv_model/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:bf2d0c1efbc936eeee2bc95a48e80ebc86b891f61b0106485937fc29f9315fc0 3 | size 178090079 4 | -------------------------------------------------------------------------------- /cv_model/retinanet_resnet50_fpn_coco-eeacb38b.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:eeacb38b7cec8cf93c57867e05eaab621047f19b0d2ec5accaa405f690da15b7 3 | size 136595076 4 | -------------------------------------------------------------------------------- /cv_model/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:fb6a3cc702b1df54c18a44b26708cd083614211062d0c36d2ca7bf9270df3533 3 | size 77844807 4 | -------------------------------------------------------------------------------- /cv_model/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:907ea3f91ff92242bc1baea8049276a3e76bca48ce7560bd268cc029f37977b5 3 | size 77844807 4 | -------------------------------------------------------------------------------- /edge_globals.py: -------------------------------------------------------------------------------- 1 | IMAGE_TYPE = 0 2 | VIDEO_TYPE = 1 3 | 4 | IMAGE_CLASSIFICATION = 0 5 | OBJECT_DETECTION = 1 6 | 7 | LOCAL = 0 8 | OFFLOAD = 1 9 | 10 | global loaded_model 11 | global sys_info 12 | global datastore 13 | global thread 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | cv_model/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth filter=lfs diff=lfs merge=lfs -text 2 | cv_model/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth filter=lfs diff=lfs merge=lfs -text 3 | cv_model/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth filter=lfs diff=lfs merge=lfs -text 4 | cv_model/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth filter=lfs diff=lfs merge=lfs -text 5 | cv_model/retinanet_resnet50_fpn_coco-eeacb38b.pth filter=lfs diff=lfs merge=lfs -text 6 | -------------------------------------------------------------------------------- /tools/read_config.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import os 3 | 4 | 5 | def read_config(section, key=None): 6 | """Read configure file. 7 | 8 | Read configure file to get configure information. 9 | 10 | :param: section: the section of ini file 11 | :param: key in section of ini file 12 | :return: configure information 13 | """ 14 | root_path = os.path.abspath(os.path.dirname(__file__)) 15 | config = configparser.ConfigParser() 16 | config.read(root_path + "/../config/config.ini") 17 | if key is None: 18 | items = config.items(section) 19 | models = [] 20 | for item in items: 21 | models.append(item[1]) 22 | return models 23 | else: 24 | value = config.get(section, key) 25 | return value 26 | 27 | -------------------------------------------------------------------------------- /frontend_server/monitor.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | import frontend_globals 3 | from frontend_server.grpc_interface import get_server_utilization 4 | 5 | 6 | def server_monitor(): 7 | """Update the cpu usage list and memory usage list every ten seconds. 8 | 9 | :return: None 10 | """ 11 | frontend_globals.cpu_usage = [] 12 | frontend_globals.memory_usage = [] 13 | for grpc_server in frontend_globals.grpc_servers: 14 | new_cpu_usage, new_memory_usage = get_server_utilization(grpc_server) 15 | frontend_globals.cpu_usage.append(new_cpu_usage) 16 | frontend_globals.memory_usage.append(new_memory_usage) 17 | logger.info("cpu_usage:" + str(frontend_globals.cpu_usage)) 18 | logger.info("memory_usage" + str(frontend_globals.memory_usage)) 19 | 20 | -------------------------------------------------------------------------------- /config/config.ini: -------------------------------------------------------------------------------- 1 | [grpc-url] 2 | url0=127.0.0.1:50051 3 | url1=127.0.0.1:50051 4 | 5 | [flask-url] 6 | video_frame_url=http://127.0.0.1:5000/image_handler 7 | 8 | [camera-info] 9 | account=your account 10 | password=your password 11 | ip_address=you camera ip 12 | channel=1 13 | 14 | [monitor] 15 | monitor_interval=30 16 | 17 | [edge-setting] 18 | control_policy=threshold_offload_policy 19 | ;always_local_fastest_model 20 | ;always_cloud_lowest_delay 21 | ;threshold_offload_policy 22 | queue_maxsize=10 23 | worker_number=5 24 | 25 | 26 | [object-detection] 27 | model4=maskrcnn_resnet50_fpn 28 | model5=retinanet_resnet50_fpn 29 | model2=fasterrcnn_mobilenet_v3_large_fpn 30 | model3=fasterrcnn_resnet50_fpn 31 | model1=fasterrcnn_mobilenet_v3_large_320_fpn 32 | 33 | [image-classification] 34 | model1=alexnet 35 | model2=densenet121 36 | model6=googlenet 37 | model7=inception_v3 38 | model8=mnasnet0_5 39 | model10=mobilenet_v2 40 | model13=resnet101 41 | model18= resnext101_32x8d 42 | model20=shufflenet_v2_x0_5 43 | model22=squeezenet1_0 44 | model24=vgg11 45 | model32=wide_resnet101_2 46 | 47 | -------------------------------------------------------------------------------- /frontend_server/dispatch_policy.py: -------------------------------------------------------------------------------- 1 | import random 2 | import frontend_globals 3 | 4 | 5 | def random_policy(): 6 | """Choose a random server from all of the servers and return. 7 | 8 | :return: server url 9 | """ 10 | rand = random.randint(0, len(frontend_globals.grpc_servers) - 1) 11 | key = frontend_globals.grpc_servers[rand] 12 | return key 13 | 14 | 15 | def shortest_queue(): 16 | """Choose a grpc server whose tasks queue is the shortest. 17 | 18 | :return: server url 19 | """ 20 | tasks_number_list = frontend_globals.tasks_number.values() 21 | selected_server = tasks_number_list.index(min(tasks_number_list)) 22 | key = frontend_globals.grpc_servers[selected_server] 23 | return key 24 | 25 | 26 | def lowest_cpu_utilization(): 27 | """Get the server url whose cpu utilization is the lowest one. 28 | 29 | :return: server url 30 | """ 31 | selected_server = frontend_globals.cpu_usage.index(min(frontend_globals.cpu_usage)) 32 | key = frontend_globals.grpc_servers[selected_server] 33 | return key 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /local/video_reader.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from tools.read_config import read_config 3 | 4 | 5 | class VideoReader: 6 | def __init__(self, input_source=None, rtsp_camera=False): 7 | self.input_source = None 8 | if input_source is not None: 9 | self.input_source = input_source 10 | elif rtsp_camera is True: 11 | account = read_config("camera-info", "account") 12 | password = read_config("camera-info", "password") 13 | ip_address = read_config("camera-info", "ip_address") 14 | channel = int(read_config("camera-info", "channel")) 15 | self.input_source = "rtsp://%s:%s@%s/cam/realmonitor?channel=%d&subtype=0" \ 16 | % (account, password, ip_address, channel) 17 | self.cap = cv2.VideoCapture(self.input_source) 18 | 19 | def read_frame(self): 20 | if self.cap.isOpened(): 21 | ret, frame = self.cap.read() 22 | if ret: 23 | return frame 24 | self.cap.release() 25 | return None 26 | return None 27 | -------------------------------------------------------------------------------- /frontend_server/frontend_globals.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import sys 3 | sys.path.append("../") 4 | from tools.read_config import read_config 5 | 6 | 7 | def init(): 8 | """Some global variables 9 | 10 | :param grpc_servers: grpc server url list 11 | :param cpu_usage: cpu_usage list of all servers 12 | :param memory_usage: memory_usage list of all servers 13 | :param distribution_function: three distribution functions of selecting a grpc server 14 | :param tasks_number: processing tasks number of all the grpc servers 15 | """ 16 | global grpc_servers 17 | global cpu_usage 18 | global memory_usage 19 | 20 | grpc_servers = read_config("grpc-url") 21 | cpu_usage = [] 22 | memory_usage = [] 23 | 24 | global distribution_function 25 | distribution_function = { 26 | 'random': 'random_policy', 27 | 'tasks_queue': 'shortest_queue', 28 | 'cpu_usage': 'lowest_cpu_utilization' 29 | } 30 | 31 | global tasks_number 32 | tasks_number = {} 33 | for grpc_server in grpc_servers: 34 | tasks_number[grpc_server] = 0 35 | 36 | 37 | -------------------------------------------------------------------------------- /backend_server/grpc_config/protos/msg_transfer.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | service MsgTransfer { 4 | 5 | rpc image_processor (MsgRequest) returns (MsgReply) {} 6 | rpc get_server_utilization(Server_Utilization_Request) returns (Server_Utilization_Reply) {} 7 | rpc get_loaded_models_name (Loaded_Model_Name_Request) returns (Loaded_Model_Name_Reply) {} 8 | rpc load_specified_model (Load_Specified_Model_Request) returns (Load_Specified_Model_Reply) {} 9 | 10 | } 11 | 12 | message MsgRequest { 13 | 14 | string model = 1; 15 | string frame = 2; 16 | string frame_shape = 3; 17 | 18 | } 19 | 20 | message MsgReply { 21 | 22 | string result = 2; 23 | string frame_shape = 3; 24 | 25 | } 26 | 27 | message Server_Utilization_Request {} 28 | 29 | message Server_Utilization_Reply { 30 | 31 | float cpu_usage = 1; 32 | float memory_usage = 2; 33 | 34 | } 35 | 36 | message Loaded_Model_Name_Request {} 37 | 38 | message Loaded_Model_Name_Reply { 39 | 40 | string loaded_model_name = 1; 41 | 42 | } 43 | 44 | message Load_Specified_Model_Request { 45 | 46 | string specified_model = 1; 47 | 48 | } 49 | 50 | message Load_Specified_Model_Reply {} -------------------------------------------------------------------------------- /local/preprocessor.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import os 3 | import random 4 | import numpy as np 5 | 6 | 7 | def video_frame_resize(frame, new_height): 8 | 9 | frame = Image.fromarray(frame) 10 | hpercent = (new_height / float(frame.size[1])) 11 | wsize = int((float(frame.size[0]) * float(hpercent))) 12 | frame = frame.resize((wsize, new_height), Image.ANTIALIAS) 13 | frame = np.asarray(frame) 14 | return frame 15 | 16 | 17 | def video_frame_change_qp(frame, qp): 18 | """Change the image quality""" 19 | image = Image.fromarray(frame) 20 | temporary_store = os.path.join(os.path.dirname(__file__), "../../Downloads/SmartEye/info_store/temporary_file") 21 | n = random.randrange(0, 1000) 22 | file_path = os.path.join(temporary_store, 'temporary_' + str(n) + '.jpg') 23 | image.save(file_path, quality=qp) 24 | img = Image.open(file_path) 25 | frame = np.array(img) 26 | os.remove(file_path) 27 | return frame 28 | 29 | 30 | def preprocess(task): 31 | """Preprocess the video frame based on the resolution decided by the decision engine""" 32 | if task.new_size is not None: 33 | task.frame = video_frame_resize(task.frame, task.new_size) 34 | 35 | if task.new_qp is not None: 36 | task.frame = video_frame_change_qp(task.frame, task.new_qp) 37 | 38 | return task 39 | 40 | 41 | -------------------------------------------------------------------------------- /tools/make_request.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | from urllib import request, parse 4 | import time 5 | from loguru import logger 6 | 7 | 8 | def make_request(url, **msg_dict): 9 | """Send info to server. 10 | 11 | :param url: server url 12 | :param msg_dict: 13 | :return: response object and service delay 14 | """ 15 | headers = { 16 | "User-Agent": "Mozilla", 17 | # 'content-type': 'application/json' 18 | } 19 | data = parse.urlencode(msg_dict).encode('utf8') 20 | t1 = time.time() 21 | req = request.Request(url=url, data=data, headers=headers, method='POST') 22 | try: 23 | response = request.urlopen(req) 24 | t2 = time.time() 25 | result = response.read().decode('utf-8') 26 | except: 27 | logger.exception("Error request server!") 28 | else: 29 | result_dict = json.loads(result) 30 | try: 31 | processing_delay = t2 - t1 32 | arrive_transfer_server_time = (processing_delay - result_dict["process_time"]) / 2 33 | assert processing_delay != 0 34 | assert arrive_transfer_server_time != 0 35 | except AssertionError as err: 36 | logger.error("processing_delay or arrive_transfer_server_time is 0!") 37 | else: 38 | # logger.debug("make request well!") 39 | return result_dict, t1, processing_delay, arrive_transfer_server_time 40 | 41 | 42 | -------------------------------------------------------------------------------- /tools/transfer_files_tool.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | import numpy as np 4 | 5 | 6 | def transfer_file_to_str(file_path): 7 | """Transfer image to a string 8 | 9 | :param file_path: file path 10 | :return: msg_dict including file name and file str encoded by base64 package 11 | """ 12 | file_name = os.path.basename(file_path) 13 | with open(file_path, 'rb') as f: 14 | img_byte = base64.b64encode(f.read()) # 二进制读取后变base64编码 15 | img_str = img_byte.decode('ascii') 16 | msg_dict = { 17 | 'file_name': file_name, 18 | 'file_str': img_str 19 | } 20 | 21 | return msg_dict 22 | 23 | 24 | def transfer_array_and_str(frame, way): 25 | """Transfer between base64 format and numpy.ndarray 26 | 27 | :param frame: will be transfered image str or ndarray 28 | :param way: if way is 'up', transfer numpy.ndarray format to base64 format, 29 | else if way is 'down', transfer base64 format format to numpy.ndarray. 30 | :return: image ndarray or image str 31 | """ 32 | if way is 'up': 33 | binary_frame = frame.tobytes() 34 | img_byte = base64.b64encode(binary_frame) 35 | img_str = img_byte.decode('ascii') 36 | return img_str 37 | else: 38 | img_decode_ = frame.encode('ascii') 39 | img_decode = base64.b64decode(img_decode_) 40 | nprr = np.fromstring(img_decode, np.uint8) 41 | return nprr 42 | 43 | 44 | -------------------------------------------------------------------------------- /tools/video_handle_tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import subprocess 4 | 5 | save_folder_path = '../' 6 | 7 | 8 | def extract_frames(input_file): 9 | """ 10 | Extract 1 frame every 5 frames 11 | :param input_file: video path 12 | :return: pictures stored path 13 | """ 14 | folder_pre_path = os.path.dirname(input_file) 15 | folder_name = os.path.basename(input_file).split(".")[0] 16 | folder_path = folder_pre_path + "/" + folder_name 17 | if not os.path.isdir(folder_path): 18 | os.mkdir(folder_path, 777) 19 | # pass 20 | cmd = ("ffmpeg -i " + input_file + " -r 5 -f image2 " 21 | + folder_path + "/" + folder_name + "_%05d.jpg") 22 | p = subprocess.Popen(cmd) 23 | return folder_path 24 | 25 | 26 | def compose_video(picture_folder_path, video_path): 27 | """ 28 | compose pictures to a video 29 | :param: picture_folder_path: picture path 30 | :param: video_path: video save path 31 | :return: video save path 32 | """ 33 | folder_pre_path = os.path.dirname(video_path) 34 | folder_name = os.path.basename(video_path).split(".")[0] 35 | suffix = os.path.basename(video_path).split(".")[1] 36 | video_name = folder_pre_path + folder_name + '_processed' + suffix 37 | # audio or not 38 | cmd = "ffmpeg -loop 1 -f image2 -i " + picture_folder_path + " -vcodec libx264 -r 5 -t 10 " + video_name 39 | subprocess.Popen(cmd) 40 | return video_name 41 | 42 | -------------------------------------------------------------------------------- /local/sys_info.py: -------------------------------------------------------------------------------- 1 | import time 2 | import psutil 3 | import collections 4 | from loguru import logger 5 | 6 | 7 | Data = collections.namedtuple('Data', ['time', 'value']) 8 | 9 | 10 | def get_local_utilization(): 11 | """Get the CPU usage and memory usage""" 12 | cpu_usage = psutil.cpu_percent() 13 | memory_usage = psutil.virtual_memory().percent 14 | return cpu_usage, memory_usage 15 | 16 | 17 | class SysInfo: 18 | """Storing the system information of local and server""" 19 | def __init__(self): 20 | 21 | self.cpu_usage = [] 22 | self.memory_usage = [] 23 | self.local_delay = [] 24 | self.offload_delay = [] 25 | self.bandwidth = [] 26 | self.local_pending_task = 0 27 | 28 | def update_local_utilization(self): 29 | """Update local utilization including cpu usage and memory usage""" 30 | t = time.time() 31 | 32 | cpu_usage, memory_usage = get_local_utilization() 33 | self.cpu_usage.append(Data(t, cpu_usage)) 34 | self.memory_usage.append(Data(t, memory_usage)) 35 | 36 | def append_local_delay(self, cur_time, delay): 37 | 38 | data = Data(cur_time, delay) 39 | self.local_delay.append(data) 40 | 41 | def append_offload_delay(self, cur_time, delay): 42 | 43 | data = Data(cur_time, delay) 44 | self.offload_delay.append(data) 45 | 46 | def append_bandwidth(self, cur_time, delay): 47 | data = Data(cur_time, delay) 48 | self.bandwidth.append(data) 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /config/model_info.py: -------------------------------------------------------------------------------- 1 | model_lib = { 2 | 'fasterrcnn_mobilenet_v3_large_320_fpn': { 3 | 'model_path': 'fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth', 4 | 'tx2_delay': 0.18, 5 | 'cloud_delay': 0.024, 6 | 'precision': None, 7 | 'service_type': 'object_detection' 8 | }, 9 | 'fasterrcnn_mobilenet_v3_large_fpn': { 10 | 'model_path': 'fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth', 11 | 'tx2_delay': 0.39, 12 | 'cloud_delay': 0.026, 13 | 'precision': None, 14 | 'service_type': 'object_detection' 15 | }, 16 | 'fasterrcnn_resnet50_fpn': { 17 | 'model_path': 'fasterrcnn_resnet50_fpn_coco-258fb6c6.pth', 18 | 'tx2_delay': 1.57, 19 | 'cloud_delay': 0.058, 20 | 'precision': None, 21 | 'service_type': 'object_detection' 22 | }, 23 | 'maskrcnn_resnet50_fpn': { 24 | 'model_path': 'maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth', 25 | 'tx2_delay': 1.65, 26 | 'cloud_delay': 0.064, 27 | 'precision': None, 28 | 'service_type': 'object_detection' 29 | }, 30 | 'retinanet_resnet50_fpn': { 31 | 'model_path': 'retinanet_resnet50_fpn_coco-eeacb38b.pth', 32 | 'tx2_delay': 1.77, 33 | 'cloud_delay': 0.063, 34 | 'precision': None, 35 | 'service_type': 'object_detection' 36 | }, 37 | } 38 | 39 | edge_object_detection_model = ( 40 | 'fasterrcnn_mobilenet_v3_large_320_fpn', 41 | 'fasterrcnn_mobilenet_v3_large_fpn', 42 | 'fasterrcnn_resnet50_fpn' 43 | ) 44 | 45 | cloud_object_detection_model = ( 46 | 'fasterrcnn_mobilenet_v3_large_320_fpn', 47 | 'fasterrcnn_mobilenet_v3_large_fpn', 48 | 'fasterrcnn_resnet50_fpn', 49 | 'maskrcnn_resnet50_fpn', 50 | 'retinanet_resnet50_fpn' 51 | ) 52 | 53 | 54 | -------------------------------------------------------------------------------- /camera/virtual_camera.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: Xuezhi Wang 5 | @license: 6 | @contact: 1050642597@qq.com 7 | @software: pycharm 8 | @file: virtual_camera.py 9 | @time: 2021/4/9 下午3:19 10 | @desc: 11 | ''' 12 | import pyvirtualcam 13 | # import numpy as np 14 | import cv2 15 | # from PIL import Image 16 | import subprocess 17 | # import os 18 | # import json 19 | # import time 20 | 21 | 22 | class VirtualCamera: 23 | """This is a virtual camera class. 24 | 25 | Virtual camera class, works like a real camera. Read a video file and output the image frame. 26 | """ 27 | def __init__(self, video_name): 28 | 29 | try: 30 | subprocess.call(['modprobe', 'v4l2loopback', 'device=1'], stdout=subprocess.PIPE, timeout=5) 31 | except Exception as err: 32 | print("error:", err) 33 | self.cap = cv2.VideoCapture(video_name) 34 | self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 35 | self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 36 | self.fps = self.cap.get(cv2.CAP_PROP_FPS) 37 | 38 | def send_video_to_virtual_camera(self): 39 | """Send video file to virtual camera. 40 | 41 | Create a pyvirtualcam.Camera object to collect video frames. 42 | 43 | :return: None 44 | """ 45 | with pyvirtualcam.Camera(width=self.width, height=self.height, fps=self.fps) as cam: 46 | while self.cap.isOpened(): 47 | _, frame = self.cap.read() 48 | if frame is None: 49 | break 50 | cam.send(frame) 51 | cam.sleep_until_next_frame() 52 | self.cap.release() 53 | 54 | 55 | if __name__ == "__main__": 56 | 57 | video_path = '/home/wxz/Desktop/20200827153531.mp4' 58 | vircam = VirtualCamera(video_path) 59 | vircam.send_video_to_virtual_camera() 60 | -------------------------------------------------------------------------------- /frontend_server/offloading.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: XuezhiWang 5 | @license: 6 | @contact: 1050642597@qq.com 7 | @software: pycharm 8 | @file: offloading.py 9 | @time: 2021/4/16 下午2:37 10 | @desc: 11 | """ 12 | from tools import make_request 13 | from tools.transfer_files_tool import transfer_array_and_str 14 | from loguru import logger 15 | 16 | 17 | def send_frame(url, frame, selected_model): 18 | """Send the image frame to the transfer server. 19 | 20 | Send the image frame to the transfer server, and get the result of server. 21 | At the same time, calculate the time of total processing and arrive transfer server delay. 22 | 23 | :param url: transfer server's url 24 | :param frame: image frame to send to server 25 | :param selected_model: model name to send to server 26 | :return: result_dict: result dict returned from server 27 | start_time: the start time of calculating the time 28 | processing_delay: total processing time 29 | arrive_transfer_server_time: the delay between client and transfer server 30 | """ 31 | frame_shape = frame.shape 32 | img_str = transfer_array_and_str(frame, "up") 33 | msg_dict = { 34 | "selected_model": selected_model, 35 | "frame_shape": frame_shape, 36 | "frame": img_str 37 | } 38 | try: 39 | result_dict, start_time, processing_delay, arrive_transfer_server_time = make_request.make_request(url, **msg_dict) 40 | except Exception as err: 41 | logger.exception("servers return nothing") 42 | else: 43 | return result_dict, start_time, processing_delay, arrive_transfer_server_time 44 | 45 | 46 | # video file interface 47 | def process_video_file(url, input_file): 48 | 49 | response = make_request.make_request(url) 50 | video = response.read().decode('utf-8') 51 | # if selected_model == "image classification": 52 | # print(video) 53 | # else: 54 | # save_file(video, input_file) 55 | -------------------------------------------------------------------------------- /model_manager/model_cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | from loguru import logger 5 | from config.model_info import model_lib 6 | from torchvision.models import * 7 | from torchvision.models.detection import * 8 | 9 | 10 | def load_models(model_list): 11 | loaded_model = {} 12 | weight_folder = os.path.join(os.path.dirname(__file__), "../cv_model") 13 | 14 | for model_name in model_list: 15 | if model_name in model_lib.keys(): 16 | weight_files_path = os.path.join(weight_folder, model_lib[model_name]['model_path']) 17 | # load the weight file 18 | file_load = torch.load(weight_files_path) 19 | model = eval(model_name)(pretrained_backbone=False, pretrained=False) 20 | model.load_state_dict(file_load, False) 21 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 22 | model.to(device) 23 | model.eval() 24 | loaded_model[model_name] = model 25 | else: 26 | logger.error('model does not exist') 27 | sys.exit() 28 | return loaded_model 29 | 30 | 31 | def get_fastest_model(model_list): 32 | fast_model = None 33 | min_delay = float('inf') 34 | for model in model_list: 35 | if model in model_lib.keys(): 36 | delay = model_lib[model]["tx2_delay"] 37 | if delay < min_delay: 38 | fast_model = model 39 | min_delay = delay 40 | return fast_model 41 | 42 | 43 | def get_most_precise_model(model_list): 44 | 45 | return "retinanet_resnet50_fpn" 46 | precise_model = None 47 | max_precision = float('-Inf') 48 | for model in model_list: 49 | if model in model_lib.keys(): 50 | precision = model_lib[model]['precision'] 51 | if precision > max_precision: 52 | precise_model = model 53 | max_precision = precision 54 | return "retinanet_resnet50_fpn" 55 | return precise_model 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /local/local_store.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: XuezhiWang 5 | @license: 6 | @contact: 1050642597@qq.com 7 | @software: pycharm 8 | @file: local_store.py 9 | @time: 2021/4/16 下午2:25 10 | @desc: 11 | ''' 12 | import os 13 | import cv2 14 | import datetime 15 | import edge_globals 16 | from loguru import logger 17 | 18 | 19 | class DataStore: 20 | """Store results locally. 21 | 22 | According to the requirements, LocalStore stores the input video frame 23 | as image or video in local directory. 24 | """ 25 | 26 | def __init__(self, store_type=None): 27 | time = datetime.datetime.now() 28 | store_path = os.path.join(os.path.dirname(__file__), "../info_store/handled_result") 29 | self.n = 0 30 | self.result_store_location = os.path.join( 31 | store_path, time.strftime('%a%b%d%H%M') 32 | ) 33 | if store_type == edge_globals.VIDEO_TYPE: 34 | video_name = time.strftime('%a%b%d%H%M') + ".mp4" 35 | self.out = cv2.VideoWriter( 36 | video_name, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480) 37 | ) 38 | 39 | def store_image(self, frame): 40 | """Store frame as image 41 | 42 | :param frame: image which will be stored, type numpy.narray 43 | :return: None 44 | """ 45 | if not os.path.exists(self.result_store_location): 46 | os.mkdir(self.result_store_location) 47 | try: 48 | image_path = os.path.join(self.result_store_location, "out"+str(self.n)+".png") 49 | cv2.imwrite(image_path, frame) 50 | self.n += 1 51 | except Exception as err: 52 | print("save image fail:", err) 53 | 54 | def store_video(self, frame): 55 | """Write a image frame into a video file. 56 | 57 | :param frame: image which will be written, type numpy.ndarray 58 | :return: None 59 | """ 60 | try: 61 | self.out.write(frame) 62 | except Exception as err: 63 | print("write frame into video fail", err) 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /test/load_model/load_model_test.py: -------------------------------------------------------------------------------- 1 | from tools.read_config import read_config 2 | import torch 3 | import os 4 | from torchvision import transforms 5 | from PIL import Image 6 | import time 7 | 8 | 9 | transform = transforms.Compose([ 10 | transforms.Resize(256), 11 | transforms.CenterCrop(224), 12 | transforms.ToTensor(), 13 | transforms.Normalize( 14 | mean=[0.485, 0.456, 0.406], 15 | std=[0.229, 0.224, 0.225] 16 | ) 17 | ]) 18 | 19 | 20 | def load_model(selected_model): 21 | """ 22 | load the weight file of model 23 | :param selected_model: model is loaded 24 | :return: model 25 | """ 26 | weight_folder = read_config("models-path", "path") 27 | try: 28 | for file in os.listdir(weight_folder): 29 | if selected_model in file: 30 | file_name = file 31 | break 32 | assert file_name is not None 33 | except AssertionError: 34 | print("there is no matched file!") 35 | # print(selected_model) 36 | weight_files_path = os.path.join(weight_folder, file_name) 37 | model = eval(selected_model)() 38 | model.load_state_dict(torch.load(weight_files_path), False) 39 | model.eval() 40 | 41 | return model 42 | 43 | 44 | def image_classification(img, selected_model): 45 | 46 | img = Image.open(img) 47 | # img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 48 | img_t = transform(img) 49 | batch_t = torch.unsqueeze(img_t, 0) 50 | model = load_model(selected_model) 51 | with torch.no_grad(): 52 | out = model(batch_t) 53 | 54 | classes_file = read_config("classes-file", "classes_file") 55 | with open(classes_file) as f: 56 | classes = [line.strip() for line in f.readlines()] 57 | _, index = torch.max(out, 1) 58 | percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100 59 | result = classes[index[0]], percentage[index[0]].item() 60 | 61 | return result[0] 62 | 63 | 64 | if __name__ == '__main__': 65 | 66 | # alexnet mnasnet0_5 mnasnet1_0 mobilenet_v2 resnet101 shufflenet_v2_x0_5 squeezenet1_0 vgg11 wide_resnet101_2 67 | # "googlenet inception_v3" 68 | # densenet121, densenet161 69 | # print(result_dict.keys()) 70 | image_path = '../../info_store/handled_result/dog.jpg' 71 | selected_model = 'mnasnet0_5' 72 | # print(result_dict[selected_model]) 73 | t1 = time.time() 74 | result = image_classification(image_path, selected_model) 75 | t2 = time.time() 76 | print(t2 - t1) 77 | print(result) 78 | -------------------------------------------------------------------------------- /local/decision_engine.py: -------------------------------------------------------------------------------- 1 | import edge_globals 2 | from config.model_info import edge_object_detection_model 3 | from config.model_info import cloud_object_detection_model 4 | from model_manager.model_cache import get_fastest_model, get_most_precise_model 5 | from loguru import logger 6 | 7 | 8 | resolution_list = [240, 360, 480, 720, 1080] 9 | qp_value = [30, 40, 50, 60, 70, 80, 90] 10 | 11 | 12 | # the video frame will be always processed on edge 13 | def always_local_fastest_model(task): 14 | task.location = edge_globals.LOCAL 15 | if task.serv_type == edge_globals.OBJECT_DETECTION: 16 | task.selected_model = get_fastest_model(edge_object_detection_model) 17 | # logger.debug(task.selected_model) 18 | return task 19 | 20 | 21 | # the video frame will be always processed on the cloud 22 | def always_cloud_lowest_delay(task): 23 | 24 | task.location = edge_globals.OFFLOAD 25 | task.new_size = resolution_list[0] 26 | 27 | if task.serv_type == edge_globals.OBJECT_DETECTION: 28 | #task.selected_model = get_fastest_model(cloud_object_detection_model) 29 | task.selected_model = 'retinanet_resnet50_fpn' 30 | return task 31 | 32 | 33 | def threshold_offload_policy(task): 34 | 35 | if edge_globals.sys_info.local_pending_task <= 2: 36 | task.location = edge_globals.LOCAL 37 | if task.serv_type == edge_globals.OBJECT_DETECTION: 38 | task.selected_model = get_fastest_model(edge_object_detection_model) 39 | else: 40 | task.location = edge_globals.OFFLOAD 41 | task.new_size = resolution_list[0] 42 | if task.serv_type == edge_globals.OBJECT_DETECTION: 43 | task.selected_model = get_most_precise_model(cloud_object_detection_model) 44 | 45 | return task 46 | 47 | 48 | def delay_precision_tradeoff(task): 49 | pass 50 | 51 | 52 | class DecisionEngine: 53 | 54 | def __init__(self, sys_info): 55 | self.sys_info = sys_info 56 | self.policy_set = { 57 | "always_local_fastest_model": always_local_fastest_model, 58 | "always_cloud_lowest_delay": always_cloud_lowest_delay, 59 | "threshold_offload_policy": threshold_offload_policy 60 | } 61 | self.last_frame = None 62 | 63 | def get_decision(self, policy, task): 64 | # task = threshold_offload_policy(task) 65 | # make decision based on the policy 66 | task = self.policy_set[policy](task) 67 | # save the frame for differencing in the next time 68 | self.last_frame = task.frame 69 | return task 70 | 71 | 72 | -------------------------------------------------------------------------------- /frontend_server/forwarding_server.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from dispatch_policy import random_policy, shortest_queue, lowest_cpu_utilization 3 | from frontend_server.grpc_interface import get_grpc_reply 4 | import frontend_globals 5 | from frontend_server.monitor import server_monitor 6 | sys.path.append("../") 7 | from tools.read_config import read_config 8 | from loguru import logger 9 | 10 | from flask import Flask, request, jsonify 11 | import time 12 | from apscheduler.schedulers.background import BackgroundScheduler 13 | 14 | 15 | app = Flask(__name__) 16 | sched = BackgroundScheduler(daemon=True) 17 | sched.add_job(server_monitor, 'interval', seconds=int(read_config("monitor", "monitor_interval"))) 18 | sched.start() 19 | 20 | logger.add("log/transfer-server_{time}.log") 21 | 22 | 23 | @app.route('/image_handler', methods=['GET', 'POST']) 24 | def image_handler(): 25 | """Images transfer interface. 26 | 27 | 28 | Get info sent from local and transfer it to processing servers, 29 | then collect the processing result returned from grpc server 30 | and return the result to the client. 31 | 32 | :return: return_dict 33 | """ 34 | info_dict = request.form 35 | server_url = rpc_server_selection("random") 36 | frontend_globals.tasks_number[server_url] += 1 37 | t1 = time.time() 38 | try: 39 | msg_reply = get_grpc_reply(server_url, **info_dict) 40 | except Exception as err: 41 | logger.exception("Get result error:", err) 42 | frontend_globals.tasks_number[server_url] -= 1 43 | t2 = time.time() 44 | if msg_reply is None: 45 | return None 46 | if msg_reply.frame_shape == "": 47 | return_dict = { 48 | "prediction": msg_reply.result, 49 | "process_time": t2 - t1} 50 | return jsonify(return_dict) 51 | else: 52 | return_dict = { 53 | "frame_shape": msg_reply.frame_shape, 54 | "result": msg_reply.result, 55 | "process_time": t2 - t1} 56 | return jsonify(return_dict) 57 | 58 | 59 | def rpc_server_selection(policy): 60 | """Select a grpc server to which info will send. 61 | 62 | :param policy decide the policy of selecting a grpc server 63 | :return: server url 64 | """ 65 | if policy == 'random': 66 | grpc_server = random_policy() 67 | elif policy == 'tasks_queue': 68 | grpc_server = shortest_queue() 69 | else: 70 | grpc_server = lowest_cpu_utilization() 71 | return grpc_server 72 | 73 | 74 | if __name__ == '__main__': 75 | 76 | frontend_globals.init() 77 | app.run(host='0.0.0.0', port=5000, debug=True, threaded=True) 78 | -------------------------------------------------------------------------------- /model_manager/image_classification.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | from torchvision.models import * 4 | import torch 5 | from torchvision import transforms 6 | from PIL import Image 7 | from loguru import logger 8 | 9 | 10 | def preprocess(img): 11 | """Preprocess image which will be preprocessed 12 | 13 | :param img: image 14 | :return: preprocessed image 15 | """ 16 | transform = transforms.Compose([ 17 | transforms.Resize(256), 18 | transforms.CenterCrop(224), 19 | transforms.ToTensor(), 20 | transforms.Normalize( 21 | mean=[0.485, 0.456, 0.406], 22 | std=[0.229, 0.224, 0.225] 23 | ) 24 | ]) 25 | # img = Image.open(img) 26 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 27 | img_t = transform(img) 28 | batch_t = torch.unsqueeze(img_t, 0) 29 | return batch_t 30 | 31 | 32 | def load_model(selected_model): 33 | """Load the weight file of selected model. 34 | 35 | :param selected_model: The name of the model to load 36 | :return: model: loaded model 37 | """ 38 | 39 | weight_folder = os.path.join(os.path.dirname(__file__), 40 | "../../../../Ubuntu_1804.2019.522.0_x64/rootfs/home/wxz/Downloads/SmartEye/cv_model") 41 | try: 42 | for file in os.listdir(weight_folder): 43 | if selected_model in file: 44 | file_name = file 45 | break 46 | assert file_name is not None 47 | except AssertionError: 48 | print("there is no matched file!") 49 | weight_files_path = os.path.join(weight_folder, file_name) 50 | model = eval(selected_model)(pretrained=False) 51 | model.load_state_dict(torch.load(weight_files_path)) 52 | model.eval() 53 | return model 54 | 55 | 56 | def image_classification(img, model): 57 | """Image prediction. 58 | 59 | Predict the class of image and return the result. 60 | 61 | :param img: image frame 62 | :param model: loaded model 63 | :return: predict result. 64 | """ 65 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 66 | batch_t = preprocess(img) 67 | batch_t_gpu = batch_t.to(device) 68 | out = model(batch_t_gpu) 69 | classes_file = os.path.join(os.path.dirname(__file__), "imagenet_classes.txt") 70 | 71 | with open(classes_file) as f: 72 | classes = [line.strip() for line in f.readlines()] 73 | 74 | _, index = torch.max(out, 1) 75 | percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100 76 | result = classes[index[0]], percentage[index[0]].item() 77 | return result[0] 78 | 79 | 80 | if __name__ == '__main__': 81 | 82 | image_path = '../info_store/handled_result/dog.jpg' 83 | selected_model = 'resnet101' 84 | result = image_classification(image_path, selected_model) 85 | # print(dir(models)) 86 | print(result) 87 | -------------------------------------------------------------------------------- /model_manager/object_detection.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from torchvision import transforms as T 4 | import cv2 5 | from torchvision.models.detection import * 6 | from loguru import logger 7 | import numpy as np 8 | import pickle 9 | 10 | 11 | COCO_INSTANCE_CATEGORY_NAMES = [ 12 | '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 13 | 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', 14 | 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 15 | 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 16 | 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 17 | 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 18 | 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 19 | 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 20 | 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 21 | 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 22 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 23 | 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' 24 | ] 25 | 26 | 27 | def get_prediction(img, threshold, model): 28 | 29 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 30 | transform = T.Compose([T.ToTensor()]) 31 | img = transform(img) 32 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 33 | img = img.to(device) 34 | pred = model([img]) 35 | 36 | if torch.cuda.is_available(): 37 | pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cuda().data.cpu().numpy())] 38 | pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())] 39 | pred_score = list(pred[0]['scores'].detach().cpu().numpy()) 40 | else: 41 | pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] 42 | pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] 43 | pred_score = list(pred[0]['scores'].detach().numpy()) 44 | try: 45 | pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1] 46 | except IndexError: 47 | return None, None 48 | else: 49 | pred_boxes = pred_boxes[:pred_t+1] 50 | pred_class = pred_class[:pred_t+1] 51 | return pred_boxes, pred_class 52 | 53 | 54 | def object_detection_api(img, model, rect_th=15, text_th=7, text_size=5, threshold=0.8): 55 | 56 | boxes, pred_cls = get_prediction(img, threshold, model) 57 | if boxes is None and pred_cls is None: 58 | return img 59 | for i in range(len(boxes)): 60 | # Draw Rectangle with the coordinates 61 | cv2.rectangle(img, boxes[i][0], boxes[i][1], color=(0, 255, 0), thickness=rect_th) 62 | # Write the prediction class 63 | cv2.putText(img, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 255, 0), thickness=text_th) 64 | return img 65 | 66 | -------------------------------------------------------------------------------- /frontend_server/grpc_interface.py: -------------------------------------------------------------------------------- 1 | import grpc 2 | from backend_server.grpc_config import msg_transfer_pb2_grpc, msg_transfer_pb2 3 | from loguru import logger 4 | 5 | 6 | def get_grpc_reply(server_url, **info_dict): 7 | """Send frame to server and get result. 8 | 9 | Send frame to processing server whose server number equals to server_number, 10 | and get the result. 11 | 12 | :param server_url: processing servers' url 13 | :param info_dict: info sent from client, including selected_model, frame_shape and frame 14 | :return: msg_reply: a data structure of grpc 15 | """ 16 | 17 | options = [('grpc.max_receive_message_length', 256 * 1024 * 1024)] 18 | channel = grpc.insecure_channel(server_url, options=options) 19 | stub = msg_transfer_pb2_grpc.MsgTransferStub(channel) 20 | msg_request = msg_transfer_pb2.MsgRequest( 21 | model=info_dict["selected_model"], frame=info_dict["frame"], frame_shape=info_dict["frame_shape"] 22 | ) 23 | try: 24 | msg_reply = stub.image_processor(msg_request) 25 | except: 26 | logger.exception("Error: GRPC reply error!") 27 | pass 28 | else: 29 | return msg_reply 30 | 31 | 32 | def get_server_utilization(grpc_server): 33 | """Get the cpu usage of grpc server 34 | 35 | :param grpc_server: server's url, including port 36 | :return: the server's cpu usage 37 | """ 38 | channel = grpc.insecure_channel(grpc_server) 39 | stub = msg_transfer_pb2_grpc.MsgTransferStub(channel) 40 | server_utilization_request = msg_transfer_pb2.Server_Utilization_Request() 41 | try: 42 | server_utilization_reply = stub.get_server_utilization(server_utilization_request) 43 | except TimeoutError as err: 44 | logger.exception("Get server utilization error:", err) 45 | return server_utilization_reply.cpu_usage, server_utilization_reply.memory_usage 46 | 47 | 48 | def load_specified_model(grpc_server, model_name): 49 | """Request a specified grpc server to load a specified model 50 | 51 | :param grpc_server: server url 52 | :param model_name: model name 53 | :return: None 54 | """ 55 | channel = grpc.insecure_channel(grpc_server) 56 | stub = msg_transfer_pb2_grpc.MsgTransferStub(channel) 57 | load_specified_model_request = msg_transfer_pb2.load_specified_model_Request( 58 | specified_model=model_name) 59 | try: 60 | stub.load_specified_model(load_specified_model_request, timeout=10) 61 | except Exception as err: 62 | logger.exception("Load specified model error:", err) 63 | 64 | 65 | def get_loaded_models(grpc_server): 66 | """Get the loaded models' name from the server 67 | 68 | :param grpc_server: server url 69 | :return: loaded model list 70 | """ 71 | channel = grpc.insecure_channel(grpc_server) 72 | stub = msg_transfer_pb2_grpc.MsgTransferStub(channel) 73 | loaded_model_name_request = msg_transfer_pb2.Loaded_Model_Name_Request() 74 | try: 75 | loaded_model_name_reply = stub.get_loaded_models_name(loaded_model_name_request, timeout=10) 76 | except Exception as err: 77 | logger.exception("Get loaded model error:", err) 78 | loaded_model_name = loaded_model_name_reply.loaded_model_name 79 | return loaded_model_name 80 | -------------------------------------------------------------------------------- /backend_server/model_controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | import psutil 3 | import torch 4 | import backend_globals 5 | from backend_server.grpc_config import msg_transfer_pb2 6 | from tools.read_config import read_config 7 | from torchvision.models.detection import * 8 | from torchvision.models import * 9 | from loguru import logger 10 | 11 | 12 | def load_model_files_advance(): 13 | """load model files into memory when the server is loaded. 14 | 15 | When the server is loaded, this function gets the preload model names from configure file, 16 | and loads these models one by one. 17 | 18 | :return: None 19 | """ 20 | weight_folder = os.path.join(os.path.dirname(__file__), "../cv_model") 21 | preload_models = read_config("preload-models") 22 | 23 | for model in preload_models: 24 | try: 25 | for file in os.listdir(weight_folder): 26 | if model in file: 27 | file_name = file 28 | break 29 | assert file_name is not None 30 | except AssertionError: 31 | logger.exception("there is no matched file!") 32 | weight_files_path = os.path.join(weight_folder, file_name) 33 | file_load = torch.load(weight_files_path) 34 | backend_globals.loaded_model[model] = file_load 35 | 36 | 37 | def load_a_model(selected_model): 38 | """Load the weight file of selected model. 39 | 40 | There are two cases: 41 | 1. the model has been loaded in advance(when server is loaded) 42 | 2. the model doesn't be loaded 43 | for the first case, just returns the model directly; 44 | for the second case, finds the weight file of model, load and return . 45 | 46 | :param selected_model: The name of the model to load 47 | :return: model: loaded model 48 | """ 49 | loaded_models = backend_globals.loaded_model.keys() 50 | 51 | if selected_model in loaded_models: 52 | model = eval(selected_model)() 53 | model.load_state_dict(backend_globals.loaded_model[selected_model], False) 54 | else: 55 | weight_folder = os.path.join(os.path.dirname(__file__), "../cv_model") 56 | try: 57 | for file in os.listdir(weight_folder): 58 | if selected_model in file: 59 | file_name = file 60 | break 61 | assert file_name is not None 62 | except AssertionError as err: 63 | logger.exception("there is no matched file!") 64 | weight_files_path = os.path.join(weight_folder, file_name) 65 | file_load = torch.load(weight_files_path) 66 | backend_globals.loaded_model[selected_model] = file_load 67 | model = eval(selected_model)() 68 | model.load_state_dict(file_load, False) 69 | model.eval() 70 | return model 71 | 72 | 73 | def unload_model(model_name): 74 | """unload the model which is specified by model_name. 75 | 76 | :param model_name: The name of the model to unload 77 | :return: None 78 | """ 79 | del backend_globals.loaded_model[model_name] 80 | 81 | 82 | def get_server_utilization(): 83 | """Get the cpu usage and memory usage of the device. 84 | 85 | Getting the cpu usage and memory usage of the device, 86 | and then sealing these two data in Server_Utilization_Reply. 87 | 88 | :return: server_utilization_reply, a data structure defined in grpc 89 | """ 90 | cpu_usage = psutil.cpu_percent() 91 | memory_usage = psutil.virtual_memory().percent 92 | server_utilization_reply = msg_transfer_pb2.Server_Utilization_Reply(cpu_usage=cpu_usage, memory_usage=memory_usage) 93 | return server_utilization_reply 94 | -------------------------------------------------------------------------------- /edge_main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import argparse 5 | import queue 6 | import threading 7 | import edge_globals 8 | import numpy as np 9 | from loguru import logger 10 | from local.sys_info import SysInfo 11 | from tools.read_config import read_config 12 | from local.local_store import DataStore 13 | from local.video_reader import VideoReader 14 | from local.decision_engine import DecisionEngine 15 | from model_manager.model_cache import load_models 16 | from config.model_info import edge_object_detection_model 17 | from edge_worker import local_worker, offload_worker, Task, id_gen, ThreadPoolExecutorWithQueueSizeLimit 18 | 19 | if __name__ == '__main__': 20 | parser = argparse.ArgumentParser() 21 | group = parser.add_mutually_exclusive_group() 22 | group.add_argument('-f', '--file', help="input video file or local camera") 23 | group.add_argument('-r', '--rtsp', help="RTSP camera", action='store_true') 24 | parser.add_argument( 25 | '-s', '--serv', type=int, default=1, 26 | help="input service demand, 1 for OBJECT_DETECTION", 27 | ) 28 | parser.add_argument('-i', '--interval', type=int, help="interval between reading two frames in ms", required=True) 29 | args = parser.parse_args() 30 | 31 | logger.add("log/client_{time}.log", level="INFO") 32 | 33 | file_type = edge_globals.IMAGE_TYPE 34 | serv_type = 1 # args.serv 35 | INTERVAL = args.interval / 1000.0 # convert into seconds 36 | input_file = args.file 37 | if input_file is not None: 38 | if os.path.isfile(input_file) is False and input_file.isdigit() is False: 39 | logger.error("input video file or local camera does not exist") 40 | sys.exit() 41 | elif input_file.isdigit(): 42 | input_file = int(input_file) 43 | 44 | if input_file is None and args.rtsp is False: 45 | logger.error("select either video file or RTSP camera") 46 | sys.exit() 47 | 48 | # obtain the control policy from the configuration file 49 | edge_policy = read_config("edge-setting", "control_policy") 50 | # load the video analytics models into memory) 51 | if edge_policy != "always_cloud_lowest_delay": 52 | logger.info("local models are loading...") 53 | edge_globals.loaded_model = load_models(edge_object_detection_model) 54 | logger.info("local models have loaded!") 55 | # create the objects for video reading, decision making, and information management 56 | reader = VideoReader(input_file, args.rtsp) 57 | edge_globals.sys_info = SysInfo() 58 | decision_engine = DecisionEngine(edge_globals.sys_info) 59 | edge_globals.datastore = DataStore() 60 | # start the thread pool for processing offloading requests 61 | WORKER_NUM = int(read_config("edge-setting", "worker_number")) 62 | executor = ThreadPoolExecutorWithQueueSizeLimit(max_workers=WORKER_NUM) 63 | 64 | # the queue for local processing task passing 65 | task_queue = queue.Queue(int(read_config("edge-setting", "queue_maxsize"))) 66 | # start the thread for local inference 67 | local_processor = threading.Thread(target=local_worker, args=(task_queue,)) 68 | local_processor.start() 69 | 70 | # n = 0 71 | # read frames from video file or camera in loop 72 | while True: 73 | 74 | frame = reader.read_frame() 75 | if frame is None: 76 | executor.shutdown(wait=True) 77 | local_processor.join(timeout=20) 78 | cloud_average_process_delay = np.average([p.value for p in edge_globals.sys_info.offload_delay]) 79 | logger.info("Service come over!") 80 | sys.exit() 81 | 82 | # obtain the CPU and memory usage 83 | edge_globals.sys_info.update_local_utilization() 84 | 85 | # create the inference as a task 86 | task_id = id_gen() 87 | t_start = time.time() 88 | task = Task(task_id, frame, serv_type, t_start) 89 | 90 | # make decision on video frame processing 91 | task = decision_engine.get_decision(edge_policy, task) 92 | 93 | # local processing on the edge 94 | if task.location == edge_globals.LOCAL: 95 | 96 | task_queue.put(task, block=True) 97 | edge_globals.sys_info.local_pending_task += 1 98 | 99 | # offload to the cloud for processing 100 | elif task.location == edge_globals.OFFLOAD: 101 | executor.submit(offload_worker, task) 102 | t_end = time.time() 103 | 104 | if t_end - t_start < INTERVAL: 105 | dur = INTERVAL - (t_end - t_start) 106 | time.sleep(dur) 107 | -------------------------------------------------------------------------------- /edge_worker.py: -------------------------------------------------------------------------------- 1 | import queue 2 | import sys 3 | import time 4 | import string 5 | import random 6 | import numpy as np 7 | from loguru import logger 8 | from concurrent import futures 9 | 10 | import edge_globals 11 | 12 | from tools.read_config import read_config 13 | from local.preprocessor import preprocess 14 | from frontend_server.offloading import send_frame 15 | from tools.transfer_files_tool import transfer_array_and_str 16 | from model_manager import object_detection, image_classification 17 | 18 | 19 | # the video frame handler of the forwarding server 20 | frame_handler = read_config("flask-url", "video_frame_url") 21 | 22 | 23 | # generate the id for a task 24 | def id_gen(size=6, chars=string.ascii_uppercase + string.digits): 25 | return ''.join(random.choice(chars) for _ in range(size)) 26 | 27 | 28 | class ThreadPoolExecutorWithQueueSizeLimit(futures.ThreadPoolExecutor): 29 | def __init__(self, maxsize=50, *args, **kwargs): 30 | super().__init__(*args, **kwargs) 31 | self._work_queue = queue.Queue(maxsize=maxsize) 32 | 33 | 34 | class Task: 35 | 36 | def __init__(self, task_id, frame, serv_type, t_start): 37 | self.task_id = task_id 38 | self.frame = frame 39 | self.serv_type = serv_type 40 | self.t_start = t_start 41 | self.selected_model = None 42 | self.location = None 43 | self.new_size = None 44 | self.new_qp = None 45 | 46 | 47 | def local_inference(task): 48 | """local inference for a video frame""" 49 | 50 | model = edge_globals.loaded_model[task.selected_model] 51 | if task.serv_type == edge_globals.OBJECT_DETECTION: 52 | result = object_detection.object_detection_api(task.frame, model, threshold=0.8) 53 | return result 54 | if task.serv_type == edge_globals.IMAGE_CLASSIFICATION: 55 | result = image_classification.image_classification(task.frame, model) 56 | return result 57 | 58 | 59 | def local_worker(task_queue): 60 | 61 | while True: 62 | 63 | # get a task from the queue 64 | try: 65 | task = task_queue.get(block=True) 66 | edge_globals.sys_info.local_pending_task -= 1 67 | except Exception: 68 | average_local_delay = np.average([p.value for p in edge_globals.sys_info.local_delay]) 69 | # logger.info("average local delay:"+str(average_local_delay)) 70 | sys.exit() 71 | else: 72 | # locally process the task 73 | t_start = task.t_start 74 | result = local_inference(task) 75 | t_end = time.time() 76 | processing_delay = t_end - t_start 77 | 78 | # logger.info("local_processing_delay:"+str(processing_delay)) 79 | # record the processing delay 80 | edge_globals.sys_info.append_local_delay(t_start, processing_delay) 81 | 82 | if task.serv_type == edge_globals.IMAGE_CLASSIFICATION: 83 | logger.info("image classification result:"+result) 84 | elif task.serv_type == edge_globals.OBJECT_DETECTION: 85 | logger.info("object detection works well! please go to info_store/handled_result to check.") 86 | edge_globals.datastore.store_image(result) 87 | 88 | 89 | def offload_worker(task): 90 | task = preprocess(task) 91 | file_size = sys.getsizeof(task.frame) 92 | 93 | # send the video frame to the server 94 | 95 | try: 96 | result_dict, start_time, processing_delay, arrive_transfer_server_time = \ 97 | send_frame(frame_handler, task.frame, task.selected_model) 98 | t_end = time.time() 99 | except Exception as err: 100 | logger.exception("offloading error") 101 | else: 102 | total_processing_delay = t_end - task.t_start 103 | # record the bandwidth and the processing delay 104 | bandwidth = file_size / arrive_transfer_server_time 105 | edge_globals.sys_info.append_bandwidth(task.t_start, bandwidth) 106 | edge_globals.sys_info.append_offload_delay(task.t_start, total_processing_delay) 107 | 108 | if task.serv_type == edge_globals.IMAGE_CLASSIFICATION: 109 | result = result_dict["prediction"] 110 | logger.info("offload:"+result) 111 | 112 | elif task.serv_type == edge_globals.OBJECT_DETECTION: 113 | 114 | frame_shape = tuple(int(s) for s in result_dict["frame_shape"][1:-1].split(",")) 115 | frame_handled = transfer_array_and_str(result_dict["result"], 'down').reshape(frame_shape) 116 | edge_globals.datastore.store_image(frame_handled) 117 | logger.info("cloud process image well!") 118 | -------------------------------------------------------------------------------- /backend_server/rpc_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | from concurrent import futures 3 | import grpc 4 | import sys 5 | 6 | import torch 7 | import backend_globals 8 | sys.path.append("../") 9 | from backend_server.model_controller import load_a_model, get_server_utilization, load_model_files_advance 10 | from model_manager import object_detection, image_classification 11 | from backend_server.grpc_config import msg_transfer_pb2_grpc, msg_transfer_pb2 12 | from tools.transfer_files_tool import transfer_array_and_str 13 | from tools.read_config import read_config 14 | from loguru import logger 15 | from config.model_info import cloud_object_detection_model 16 | from model_manager.model_cache import load_models 17 | 18 | 19 | object_detection_models = read_config("object-detection") 20 | image_classification_models = read_config("image-classification") 21 | 22 | logger.add("log/grpc-server_{time}.log") 23 | 24 | # if torch.cuda.is_available(): 25 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0" 26 | 27 | 28 | class MsgTransferServer(msg_transfer_pb2_grpc.MsgTransferServicer): 29 | """gRPC server stub. 30 | 31 | This is a gRPC server stub. Get the request from the gRPC client stub, 32 | process it and return the result, including image frame, load model request, system info and so on. 33 | """ 34 | 35 | def image_processor(self, request, context): 36 | """Image process interface. 37 | 38 | Get the image process request from the client, process it and return the result. 39 | 40 | """ 41 | selected_model = request.model 42 | frame = request.frame 43 | frame_shape = tuple(int(s) for s in request.frame_shape[1:-1].split(",")) 44 | model = backend_globals.loaded_model[selected_model] 45 | img = transfer_array_and_str(frame, 'down').reshape(frame_shape) 46 | msg_reply = image_handler(img, model, selected_model) 47 | 48 | return msg_reply 49 | 50 | def get_server_utilization(self, request, context): 51 | """Server utilization interface 52 | 53 | Return server utilization to the client. 54 | """ 55 | server_utilization_reply = get_server_utilization() 56 | 57 | return server_utilization_reply 58 | 59 | def get_loaded_models_name(self, request, context): 60 | """Loaded model name interface 61 | 62 | Return the loaded model names in the server. 63 | """ 64 | loaded_model_name_reply = msg_transfer_pb2.Loaded_Model_Name_Reply( 65 | loaded_model_name=str(backend_globals.loaded_model.keys()) 66 | ) 67 | return loaded_model_name_reply 68 | 69 | def load_specified_model(self, request, context): 70 | """Load specified model interface. 71 | 72 | Load the specified model as the client request. 73 | """ 74 | 75 | specified_model = request.specified_model 76 | load_a_model(specified_model) 77 | load_specified_model_reply = msg_transfer_pb2.load_specified_model_Reply() 78 | return load_specified_model_reply 79 | 80 | 81 | def image_handler(img, model, selected_model): 82 | """Image process function 83 | 84 | :param img: image frame 85 | :param model: loaded model 86 | :param selected_model: loaded model name 87 | :return: processed result 88 | """ 89 | 90 | if selected_model in object_detection_models: 91 | frame_handled = object_detection.object_detection_api(img, model, threshold=0.8) 92 | frame_handled_shape = str(frame_handled.shape) 93 | img_str = transfer_array_and_str(frame_handled, 'up') 94 | msg_reply = msg_transfer_pb2.MsgReply( 95 | result=img_str, frame_shape=frame_handled_shape 96 | ) 97 | return msg_reply 98 | else: 99 | result = image_classification.image_classification(img, model) 100 | msg_reply = msg_transfer_pb2.MsgReply( 101 | result=result, frame_shape="" 102 | ) 103 | return msg_reply 104 | 105 | 106 | def serve(): 107 | 108 | logger.info("grpc server loading...") 109 | backend_globals.loaded_model = load_models(cloud_object_detection_model) 110 | logger.info("server models have loaded!") 111 | MAX_MESSAGE_LENGTH = 256 * 1024 * 1024 112 | server = grpc.server( 113 | futures.ThreadPoolExecutor(max_workers=1), 114 | maximum_concurrent_rpcs=10, 115 | options=[ 116 | ('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), 117 | ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH), 118 | ] 119 | ) 120 | msg_transfer_pb2_grpc.add_MsgTransferServicer_to_server( 121 | MsgTransferServer(), server) 122 | server.add_insecure_port('[::]:50051') 123 | server.start() 124 | logger.info("server start!") 125 | server.wait_for_termination() 126 | 127 | 128 | if __name__ == '__main__': 129 | 130 | serve() 131 | 132 | -------------------------------------------------------------------------------- /test/model/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import torchvision 5 | from PIL import Image 6 | from torchvision import transforms as T 7 | import cv2 8 | from torchvision.models.detection import * 9 | from loguru import logger 10 | 11 | 12 | COCO_INSTANCE_CATEGORY_NAMES = [ 13 | '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 14 | 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', 15 | 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 16 | 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 17 | 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 18 | 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 19 | 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 20 | 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 21 | 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 22 | 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 23 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 24 | 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' 25 | ] 26 | 27 | n = 0 28 | 29 | def load_model(selected_model): 30 | 31 | weight_folder = os.path.join(os.path.dirname(__file__), "cv_model") 32 | try: 33 | for file in os.listdir(weight_folder): 34 | if selected_model in file: 35 | file_name = file 36 | break 37 | assert file_name is not None 38 | except AssertionError as err: 39 | 40 | logger.exception("there is no matched file!") 41 | else: 42 | 43 | weight_files_path = os.path.join(weight_folder, file_name) 44 | file_load = torch.load(weight_files_path) 45 | model = eval(selected_model)() 46 | model.load_state_dict(file_load, False) 47 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 48 | model.eval() 49 | model.to(device) 50 | return model 51 | 52 | 53 | def get_prediction(img_path, model, threshold): 54 | 55 | img = Image.fromarray(cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)) 56 | #img = Image.open(img_path) 57 | transform = T.Compose([T.ToTensor()]) 58 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 59 | logger.debug(device) 60 | img = transform(img) 61 | img = img.to(device) 62 | pred = model([img]) 63 | # if device == "cuda": 64 | pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cuda().data.cpu().numpy())] 65 | pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())] 66 | pred_score = list(pred[0]['scores'].detach().cpu().numpy()) 67 | #else: 68 | # pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] 69 | # pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] 70 | # pred_score = list(pred[0]['scores'].detach().numpy()) 71 | pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1] 72 | pred_boxes = pred_boxes[:pred_t + 1] 73 | pred_class = pred_class[:pred_t + 1] 74 | return pred_boxes, pred_class 75 | 76 | 77 | 78 | def object_detection_api(img_path, model, threshold=0.5, rect_th=3, text_size=3, text_th=3): 79 | t6 = time.time() 80 | boxes, pred_cls = get_prediction(img_path, model, threshold) 81 | t7 = time.time() 82 | logger.debug("get coordinate:"+str(t7-t6)) 83 | 84 | # Get predictions 85 | # img = cv2.imread(img_path) 86 | # Read image with cv2 87 | img = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB) 88 | # Convert to RGB 89 | #t4 = time.time() 90 | global n 91 | for i in range(len(boxes)): 92 | # Draw Rectangle with the coordinates 93 | cv2.rectangle(img, boxes[i][0], boxes[i][1], color=(0, 255, 0), thickness=rect_th) 94 | # Write the prediction class 95 | cv2.putText(img, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 255, 0), 96 | thickness=text_th) 97 | cv2.imwrite("./result/result"+str(n) +".jpg", img) 98 | #t5 = time.time() 99 | # logger.debug("draw time:"+str(t5-t4)) 100 | n += 1 101 | return img 102 | 103 | 104 | def reader(cap): 105 | 106 | if cap.isOpened(): 107 | ret, frame = cap.read() 108 | if ret: 109 | return frame 110 | else: 111 | cap.release() 112 | return None 113 | 114 | 115 | 116 | if __name__ == "__main__": 117 | 118 | 119 | selected_model = "fasterrcnn_mobilenet_v3_large_fpn" 120 | video = "VIRAT_S_000200_00_000100_000171.mp4" 121 | # video = "people.jpg" 122 | cap = cv2.VideoCapture(video) 123 | 124 | t1 = time.time() 125 | model = load_model(selected_model) 126 | t2 = time.time() 127 | frame_number = 0 128 | total_prediction_time = 0 129 | while True: 130 | 131 | frame = reader(cap) 132 | 133 | if frame is None: 134 | break 135 | 136 | frame_number += 1 137 | #"retinanet_resnet50_fpn" 138 | # "maskrcnn_resnet50_fpn" 139 | t3 = time.time() 140 | object_detection_api(frame, model, threshold=0.8) 141 | t4 = time.time() 142 | prediction_time = t4 - t3 143 | logger.debug("prediction time:"+str(t4-t3)) 144 | total_prediction_time += prediction_time 145 | #logger.debug("load model time:"+str(t2-t1)) 146 | #/logger.debug("prediction:"+str(t3-t2)) 147 | logger.debug("load model time:"+str(t2-t1)) 148 | logger.debug("average prediction time:"+str(total_prediction_time/frame_number)) 149 | -------------------------------------------------------------------------------- /backend_server/grpc_config/msg_transfer_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | from backend_server.grpc_config import msg_transfer_pb2 as server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2 6 | 7 | 8 | class MsgTransferStub(object): 9 | """Missing associated documentation comment in .proto file.""" 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.image_processor = channel.unary_unary( 18 | '/MsgTransfer/image_processor', 19 | request_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgRequest.SerializeToString, 20 | response_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgReply.FromString, 21 | ) 22 | self.get_server_utilization = channel.unary_unary( 23 | '/MsgTransfer/get_server_utilization', 24 | request_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Request.SerializeToString, 25 | response_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Reply.FromString, 26 | ) 27 | self.get_loaded_models_name = channel.unary_unary( 28 | '/MsgTransfer/get_loaded_models_name', 29 | request_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Request.SerializeToString, 30 | response_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Reply.FromString, 31 | ) 32 | self.load_specified_model = channel.unary_unary( 33 | '/MsgTransfer/load_specified_model', 34 | request_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Request.SerializeToString, 35 | response_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Reply.FromString, 36 | ) 37 | 38 | 39 | class MsgTransferServicer(object): 40 | """Missing associated documentation comment in .proto file.""" 41 | 42 | def image_processor(self, request, context): 43 | """Missing associated documentation comment in .proto file.""" 44 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 45 | context.set_details('Method not implemented!') 46 | raise NotImplementedError('Method not implemented!') 47 | 48 | def get_server_utilization(self, request, context): 49 | """Missing associated documentation comment in .proto file.""" 50 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 51 | context.set_details('Method not implemented!') 52 | raise NotImplementedError('Method not implemented!') 53 | 54 | def get_loaded_models_name(self, request, context): 55 | """Missing associated documentation comment in .proto file.""" 56 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 57 | context.set_details('Method not implemented!') 58 | raise NotImplementedError('Method not implemented!') 59 | 60 | def load_specified_model(self, request, context): 61 | """Missing associated documentation comment in .proto file.""" 62 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 63 | context.set_details('Method not implemented!') 64 | raise NotImplementedError('Method not implemented!') 65 | 66 | 67 | def add_MsgTransferServicer_to_server(servicer, server): 68 | rpc_method_handlers = { 69 | 'image_processor': grpc.unary_unary_rpc_method_handler( 70 | servicer.image_processor, 71 | request_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgRequest.FromString, 72 | response_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgReply.SerializeToString, 73 | ), 74 | 'get_server_utilization': grpc.unary_unary_rpc_method_handler( 75 | servicer.get_server_utilization, 76 | request_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Request.FromString, 77 | response_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Reply.SerializeToString, 78 | ), 79 | 'get_loaded_models_name': grpc.unary_unary_rpc_method_handler( 80 | servicer.get_loaded_models_name, 81 | request_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Request.FromString, 82 | response_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Reply.SerializeToString, 83 | ), 84 | 'load_specified_model': grpc.unary_unary_rpc_method_handler( 85 | servicer.load_specified_model, 86 | request_deserializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Request.FromString, 87 | response_serializer=server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Reply.SerializeToString, 88 | ), 89 | } 90 | generic_handler = grpc.method_handlers_generic_handler( 91 | 'MsgTransfer', rpc_method_handlers) 92 | server.add_generic_rpc_handlers((generic_handler,)) 93 | 94 | 95 | # This class is part of an EXPERIMENTAL API. 96 | class MsgTransfer(object): 97 | """Missing associated documentation comment in .proto file.""" 98 | 99 | @staticmethod 100 | def image_processor(request, 101 | target, 102 | options=(), 103 | channel_credentials=None, 104 | call_credentials=None, 105 | insecure=False, 106 | compression=None, 107 | wait_for_ready=None, 108 | timeout=None, 109 | metadata=None): 110 | return grpc.experimental.unary_unary(request, target, '/MsgTransfer/image_processor', 111 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgRequest.SerializeToString, 112 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.MsgReply.FromString, 113 | options, channel_credentials, 114 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 115 | 116 | @staticmethod 117 | def get_server_utilization(request, 118 | target, 119 | options=(), 120 | channel_credentials=None, 121 | call_credentials=None, 122 | insecure=False, 123 | compression=None, 124 | wait_for_ready=None, 125 | timeout=None, 126 | metadata=None): 127 | return grpc.experimental.unary_unary(request, target, '/MsgTransfer/get_server_utilization', 128 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Request.SerializeToString, 129 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Server_Utilization_Reply.FromString, 130 | options, channel_credentials, 131 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 132 | 133 | @staticmethod 134 | def get_loaded_models_name(request, 135 | target, 136 | options=(), 137 | channel_credentials=None, 138 | call_credentials=None, 139 | insecure=False, 140 | compression=None, 141 | wait_for_ready=None, 142 | timeout=None, 143 | metadata=None): 144 | return grpc.experimental.unary_unary(request, target, '/MsgTransfer/get_loaded_models_name', 145 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Request.SerializeToString, 146 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Loaded_Model_Name_Reply.FromString, 147 | options, channel_credentials, 148 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 149 | 150 | @staticmethod 151 | def load_specified_model(request, 152 | target, 153 | options=(), 154 | channel_credentials=None, 155 | call_credentials=None, 156 | insecure=False, 157 | compression=None, 158 | wait_for_ready=None, 159 | timeout=None, 160 | metadata=None): 161 | return grpc.experimental.unary_unary(request, target, '/MsgTransfer/load_specified_model', 162 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Request.SerializeToString, 163 | server_dot_grpc__config_dot_protos_dot_msg__transfer__pb2.Load_Specified_Model_Reply.FromString, 164 | options, channel_credentials, 165 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## I. Overview 2 | **SmartEye is an open source framework for real-time video analytics by leveraging the edge-cloud collaboration.** The system consists of 1) an on-edge processing layer which enables video preprocessing, model selection, on-edge inference, and task offloading; 2) a request forwarding layer which serves as a gateway of the cloud and forwards the offloaded tasks to backend workers; and 3) a backend inference layer that processes the offloaded tasks with specified DNN models. 3 | 4 | 5 | 6 | **- The Edge Node** performs video frame reading, video preprocessing, local inference, offloading, and decision making, etc. The edge node reads video frames from the camera or video files and preprocesses the video frames. After preprocessing, the inference for a video frame can be performed on the edge node or offloaded to the cloud. The decision engine makes video preprocessing and offloading decisions based on the control policies. 7 | 8 | **- The Forwarding Server** serves as the cloud's gateway to respond to the edge node's offloading requests. The edge node submits an HTTP request to the forwarding server by attaching the video frame. The forwarding server dispatches the inference requests among the backend servers based on the forwarding policy. The forwarding server monitors the backend servers and uses the status information to make dispatch decisions. 9 | 10 | **- The Inference Server** are provisioned in the cloud to conduct video analytics inferences. Each inference server loads several DNN models for video analytics. The inference servers receive the offloaded tasks from the forwarding server and make inferences with the specified models. 11 | 12 | ## II. Installation 13 | 14 | System Requirements 15 | 16 | * [ubuntu 18.04](http://releases.ubuntu.com/18.04/) 17 | * [Python 3.6.9](https://www.python.org/downloads/release/python-369/) 18 | * [Jetpack 4.5](https://developer.nvidia.com/jetpack-sdk-45-archive) 19 | * [cuda 11.0](https://developer.nvidia.com/cuda-11.0-update1-download-archive?target_os=Linux&target_arch=x86_64) 20 | * [pytorch 1.8.0](https://pytorch.org/) 21 | 22 | Please click the above links for the installation of the dependent software. 23 | 24 | It may require to install some lacked libraries, e.g., opencv, torchvision, torch, psutil, grpc, flask. 25 | There are three parts in this installation: 26 | 27 | Part 1: install libraries in the edge node. 28 | ```bash 29 | sudo apt update 30 | sudo apt install libopencv-dev 31 | pip3 install loguru 32 | pip3 install psutil 33 | pip3 install configparser 34 | ``` 35 | 36 | Part 2: install libraries in the forwarding server. 37 | ```bash 38 | pip3 install grpcio 39 | pip3 install grpcio-tools googleapis-common-protos 40 | pip3 install flask 41 | pip3 install apscheduler 42 | pip3 install loguru 43 | ``` 44 | 45 | Part 3: install libraries in the backend inference server. 46 | ```bash 47 | sudo apt update 48 | sudo apt install libopencv-dev 49 | pip3 install loguru 50 | pip3 install psutil 51 | pip3 install grpcio 52 | pip3 install grpcio-tools googleapis-common-protos 53 | 54 | ``` 55 | 56 | ### 1. Clone the code from Github. 57 | You need to install the Git Large File Storage (LFS) to clone the large DNN models from this repository. 58 | For deb linux user, you can download and install with the following command. 59 | ```bash 60 | curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.python.sh | bash 61 | sudo apt-get install git-lfs 62 | git init 63 | git lfs install 64 | ``` 65 | 66 | After installing Git LFS, you can clone the souce code of this project from GitHub. 67 | 68 | ```bash 69 | git clone https://github.com/MSNLAB/SmartEye.git 70 | ``` 71 | ### 2. Revise the configuration file 72 | 73 | Revise the configuration file **SmartEye/config/config.ini**. 74 | 75 | The edge node, forwarding server, and background inference servers read the configuration from **config/config.ini**. 76 | Make sure each item is set with appropriate value according to your system configuration. 77 | 78 | #### Configure the IP addresses of the backend inference servers. 79 | Replace 'server_1_ip' with the real IP address of your server, and leave the IP port number unchanged. 80 | You can add lines in this format as many as your backend inference servers under the [grpc-url] section. 81 | 82 | ```bash 83 | 84 | [grpc-url] 85 | url0=server_1_ip:50051 86 | url1=server_2_ip:50051 87 | 88 | ``` 89 | 90 | #### Configure the IP address of the forwarding server. 91 | Replace the forwarding_server_ip with the real IP address of your forwarding server, and leave the IP port number and the other parts unchanged. 92 | 93 | ```bash 94 | 95 | [flask-url] 96 | video_frame_url=http://forwarding_server_ip:5000/image_handler 97 | 98 | ``` 99 | 100 | 101 | #### 3. Deployment 102 | 103 | You can copy the configured source codes to the edge node, the forwarding server, and the backend inference servers respectively for deployment. You can use the scp command to do it. 104 | ```bash 105 | scp -r SmartEye/ server_account@server_ip_address:target_path 106 | ``` 107 | 108 | ## III. Usage 109 | 110 | To start the service, there are three steps to go: 111 | 112 | **Step 1:** start every backend inference server. If you have more than one inference server, start them one by one. The GPU used by the inference server can be specified in the command line. 113 | ```bash 114 | cd ~/SmartEye/backend_server/ 115 | CUDA_VISIBLE_DEVICES=gpu_device_number python3 rpc_server.py 116 | ``` 117 | 118 | If you are using servers without GPU, you can start the server with the following command line. 119 | ```bash 120 | cd ~/SmartEye/backend_server/ 121 | nohup python3 rpc_server.py > error_msg & 122 | ``` 123 | 124 | **Step 2:** start the forwarding server. 125 | ```bash 126 | cd ~/SmartEye/frontend_server/ 127 | nohup python3 forwarding_server.py > server.log 2>&1 & 128 | ``` 129 | 130 | **Step 3:** start the edge node. 131 | 132 | #### Use the following command for reading video frames from a local video file 133 | ```bash 134 | cd ~/SmartEye 135 | python3 edge_main.py -f your_video_path -i 50 136 | ``` 137 | 138 | -f, --file: the path of a local video file 139 | -i, --interval: type int, interval between reading two frames in millisecond (ms) 140 | 141 | #### If the edge node reads video frames from an RTSP camera, you need to first configure **config/config.ini**. 142 | ```bash 143 | [camera-info] 144 | account=your_account 145 | password=your_password 146 | ip_address=camera_ip 147 | channel=1 148 | ``` 149 | Then you can use the following command for reading from the specified camera 150 | ```bash 151 | cd ~/SmartEye 152 | python3 edge_main --rtsp -i 50 153 | ``` 154 | -r, --rtsp: use the RTSP camera 155 | -i, --interval: type int, interval between reading two frames in ms 156 | 157 | #### If the edge node reads from a local physically connected camera, use the following command 158 | replace device_no with your real camera device no, e.g., 0 159 | ```bash 160 | cd ~/SmartEye 161 | python3 edge_main -f device_no -i 50 162 | ``` 163 | -f, --file: the device number of a local camera 164 | -i, --interval: type int, interval between reading two frames in millisecond (ms) 165 | 166 | #### You can visually check the results for object detection under the folder of info_system/handled_result. 167 | 168 | ## IV. Policy Configuration or Customization 169 | 170 | You can choose one of the following three policies by changing the value of **control_policy** under the **edge-setting** section in **config/config.ini**. 171 | 172 | There are three policies you can use to process your video: 173 | 1. always_local_fastest_model 174 | 2. always_cloud_lowest_delay 175 | 3. threshold_offload_policy 176 | 177 | **always_local_fastest_model:** the inference is only conducted on the edge with the fastest model without preprocessing. 178 | **always_cloud_lowest_delay:** the inference is conducted only in the cloud with the most accurate model, and video frames are downsized before offloading. 179 | **threshold_offload_policy:** if the number of local pending tasks is less than a threshold (i.e., 2), the inference for the next video frame will be conducted on the edge node with the fastest model; otherwise, the next video frame will be first downsized and then offloaded to the cloud for inference with the most accurate model. 180 | 181 | **If you want to customize the control policy of the edge node, you can proceed with the following steps:** 182 | 1. define a **policy functon** in **decision_engine.py**, and implement the policies for video frame preprocessing, model selection, and offloading. 183 | 2. add the function name into **self.policy_set** of the __init__()function of class **DecisionEngine**. 184 | 3. changing the value of **control_policy** into the name of your policy function under the **edge-setting** section in **config/config.ini**. 185 | 186 | 187 | ## V. License 188 | THIS SOFTWARE IS RELEASED UNDER THE MIT LICENSE (MIT) 189 | Copyright (c), 2021, NUST SCE 190 | 191 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 192 | 193 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 194 | 195 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 196 | 197 | ## VI. Contact 198 | If you find any problem with this software, please feel free to contact us, your feedback is appreciated. 199 | 200 | Email: guanyugao@gmail.com; gygao@njust.edu.cn 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | -------------------------------------------------------------------------------- /backend_server/grpc_config/msg_transfer_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: server/grpc_config/protos/msg_transfer.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import message as _message 7 | from google.protobuf import reflection as _reflection 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor.FileDescriptor( 17 | name='server/grpc_config/protos/msg_transfer.proto', 18 | package='', 19 | syntax='proto3', 20 | serialized_options=None, 21 | create_key=_descriptor._internal_create_key, 22 | serialized_pb=b'\n,server/grpc_config/protos/msg_transfer.proto\"?\n\nMsgRequest\x12\r\n\x05model\x18\x01 \x01(\t\x12\r\n\x05\x66rame\x18\x02 \x01(\t\x12\x13\n\x0b\x66rame_shape\x18\x03 \x01(\t\"/\n\x08MsgReply\x12\x0e\n\x06result\x18\x02 \x01(\t\x12\x13\n\x0b\x66rame_shape\x18\x03 \x01(\t\"\x1c\n\x1aServer_Utilization_Request\"C\n\x18Server_Utilization_Reply\x12\x11\n\tcpu_usage\x18\x01 \x01(\x02\x12\x14\n\x0cmemory_usage\x18\x02 \x01(\x02\"\x1b\n\x19Loaded_Model_Name_Request\"4\n\x17Loaded_Model_Name_Reply\x12\x19\n\x11loaded_model_name\x18\x01 \x01(\t\"7\n\x1cLoad_Specified_Model_Request\x12\x17\n\x0fspecified_model\x18\x01 \x01(\t\"\x1c\n\x1aLoad_Specified_Model_Reply2\xb6\x02\n\x0bMsgTransfer\x12+\n\x0fimage_processor\x12\x0b.MsgRequest\x1a\t.MsgReply\"\x00\x12R\n\x16get_server_utilization\x12\x1b.Server_Utilization_Request\x1a\x19.Server_Utilization_Reply\"\x00\x12P\n\x16get_loaded_models_name\x12\x1a.Loaded_Model_Name_Request\x1a\x18.Loaded_Model_Name_Reply\"\x00\x12T\n\x14load_specified_model\x12\x1d.Load_Specified_Model_Request\x1a\x1b.Load_Specified_Model_Reply\"\x00\x62\x06proto3' 23 | ) 24 | 25 | 26 | 27 | 28 | _MSGREQUEST = _descriptor.Descriptor( 29 | name='MsgRequest', 30 | full_name='MsgRequest', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | create_key=_descriptor._internal_create_key, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='model', full_name='MsgRequest.model', index=0, 38 | number=1, type=9, cpp_type=9, label=1, 39 | has_default_value=False, default_value=b"".decode('utf-8'), 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 43 | _descriptor.FieldDescriptor( 44 | name='frame', full_name='MsgRequest.frame', index=1, 45 | number=2, type=9, cpp_type=9, label=1, 46 | has_default_value=False, default_value=b"".decode('utf-8'), 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 50 | _descriptor.FieldDescriptor( 51 | name='frame_shape', full_name='MsgRequest.frame_shape', index=2, 52 | number=3, type=9, cpp_type=9, label=1, 53 | has_default_value=False, default_value=b"".decode('utf-8'), 54 | message_type=None, enum_type=None, containing_type=None, 55 | is_extension=False, extension_scope=None, 56 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 57 | ], 58 | extensions=[ 59 | ], 60 | nested_types=[], 61 | enum_types=[ 62 | ], 63 | serialized_options=None, 64 | is_extendable=False, 65 | syntax='proto3', 66 | extension_ranges=[], 67 | oneofs=[ 68 | ], 69 | serialized_start=48, 70 | serialized_end=111, 71 | ) 72 | 73 | 74 | _MSGREPLY = _descriptor.Descriptor( 75 | name='MsgReply', 76 | full_name='MsgReply', 77 | filename=None, 78 | file=DESCRIPTOR, 79 | containing_type=None, 80 | create_key=_descriptor._internal_create_key, 81 | fields=[ 82 | _descriptor.FieldDescriptor( 83 | name='result', full_name='MsgReply.result', index=0, 84 | number=2, type=9, cpp_type=9, label=1, 85 | has_default_value=False, default_value=b"".decode('utf-8'), 86 | message_type=None, enum_type=None, containing_type=None, 87 | is_extension=False, extension_scope=None, 88 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 89 | _descriptor.FieldDescriptor( 90 | name='frame_shape', full_name='MsgReply.frame_shape', index=1, 91 | number=3, type=9, cpp_type=9, label=1, 92 | has_default_value=False, default_value=b"".decode('utf-8'), 93 | message_type=None, enum_type=None, containing_type=None, 94 | is_extension=False, extension_scope=None, 95 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 96 | ], 97 | extensions=[ 98 | ], 99 | nested_types=[], 100 | enum_types=[ 101 | ], 102 | serialized_options=None, 103 | is_extendable=False, 104 | syntax='proto3', 105 | extension_ranges=[], 106 | oneofs=[ 107 | ], 108 | serialized_start=113, 109 | serialized_end=160, 110 | ) 111 | 112 | 113 | _SERVER_UTILIZATION_REQUEST = _descriptor.Descriptor( 114 | name='Server_Utilization_Request', 115 | full_name='Server_Utilization_Request', 116 | filename=None, 117 | file=DESCRIPTOR, 118 | containing_type=None, 119 | create_key=_descriptor._internal_create_key, 120 | fields=[ 121 | ], 122 | extensions=[ 123 | ], 124 | nested_types=[], 125 | enum_types=[ 126 | ], 127 | serialized_options=None, 128 | is_extendable=False, 129 | syntax='proto3', 130 | extension_ranges=[], 131 | oneofs=[ 132 | ], 133 | serialized_start=162, 134 | serialized_end=190, 135 | ) 136 | 137 | 138 | _SERVER_UTILIZATION_REPLY = _descriptor.Descriptor( 139 | name='Server_Utilization_Reply', 140 | full_name='Server_Utilization_Reply', 141 | filename=None, 142 | file=DESCRIPTOR, 143 | containing_type=None, 144 | create_key=_descriptor._internal_create_key, 145 | fields=[ 146 | _descriptor.FieldDescriptor( 147 | name='cpu_usage', full_name='Server_Utilization_Reply.cpu_usage', index=0, 148 | number=1, type=2, cpp_type=6, label=1, 149 | has_default_value=False, default_value=float(0), 150 | message_type=None, enum_type=None, containing_type=None, 151 | is_extension=False, extension_scope=None, 152 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 153 | _descriptor.FieldDescriptor( 154 | name='memory_usage', full_name='Server_Utilization_Reply.memory_usage', index=1, 155 | number=2, type=2, cpp_type=6, label=1, 156 | has_default_value=False, default_value=float(0), 157 | message_type=None, enum_type=None, containing_type=None, 158 | is_extension=False, extension_scope=None, 159 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 160 | ], 161 | extensions=[ 162 | ], 163 | nested_types=[], 164 | enum_types=[ 165 | ], 166 | serialized_options=None, 167 | is_extendable=False, 168 | syntax='proto3', 169 | extension_ranges=[], 170 | oneofs=[ 171 | ], 172 | serialized_start=192, 173 | serialized_end=259, 174 | ) 175 | 176 | 177 | _LOADED_MODEL_NAME_REQUEST = _descriptor.Descriptor( 178 | name='Loaded_Model_Name_Request', 179 | full_name='Loaded_Model_Name_Request', 180 | filename=None, 181 | file=DESCRIPTOR, 182 | containing_type=None, 183 | create_key=_descriptor._internal_create_key, 184 | fields=[ 185 | ], 186 | extensions=[ 187 | ], 188 | nested_types=[], 189 | enum_types=[ 190 | ], 191 | serialized_options=None, 192 | is_extendable=False, 193 | syntax='proto3', 194 | extension_ranges=[], 195 | oneofs=[ 196 | ], 197 | serialized_start=261, 198 | serialized_end=288, 199 | ) 200 | 201 | 202 | _LOADED_MODEL_NAME_REPLY = _descriptor.Descriptor( 203 | name='Loaded_Model_Name_Reply', 204 | full_name='Loaded_Model_Name_Reply', 205 | filename=None, 206 | file=DESCRIPTOR, 207 | containing_type=None, 208 | create_key=_descriptor._internal_create_key, 209 | fields=[ 210 | _descriptor.FieldDescriptor( 211 | name='loaded_model_name', full_name='Loaded_Model_Name_Reply.loaded_model_name', index=0, 212 | number=1, type=9, cpp_type=9, label=1, 213 | has_default_value=False, default_value=b"".decode('utf-8'), 214 | message_type=None, enum_type=None, containing_type=None, 215 | is_extension=False, extension_scope=None, 216 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 217 | ], 218 | extensions=[ 219 | ], 220 | nested_types=[], 221 | enum_types=[ 222 | ], 223 | serialized_options=None, 224 | is_extendable=False, 225 | syntax='proto3', 226 | extension_ranges=[], 227 | oneofs=[ 228 | ], 229 | serialized_start=290, 230 | serialized_end=342, 231 | ) 232 | 233 | 234 | _LOAD_SPECIFIED_MODEL_REQUEST = _descriptor.Descriptor( 235 | name='Load_Specified_Model_Request', 236 | full_name='Load_Specified_Model_Request', 237 | filename=None, 238 | file=DESCRIPTOR, 239 | containing_type=None, 240 | create_key=_descriptor._internal_create_key, 241 | fields=[ 242 | _descriptor.FieldDescriptor( 243 | name='specified_model', full_name='Load_Specified_Model_Request.specified_model', index=0, 244 | number=1, type=9, cpp_type=9, label=1, 245 | has_default_value=False, default_value=b"".decode('utf-8'), 246 | message_type=None, enum_type=None, containing_type=None, 247 | is_extension=False, extension_scope=None, 248 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 249 | ], 250 | extensions=[ 251 | ], 252 | nested_types=[], 253 | enum_types=[ 254 | ], 255 | serialized_options=None, 256 | is_extendable=False, 257 | syntax='proto3', 258 | extension_ranges=[], 259 | oneofs=[ 260 | ], 261 | serialized_start=344, 262 | serialized_end=399, 263 | ) 264 | 265 | 266 | _LOAD_SPECIFIED_MODEL_REPLY = _descriptor.Descriptor( 267 | name='Load_Specified_Model_Reply', 268 | full_name='Load_Specified_Model_Reply', 269 | filename=None, 270 | file=DESCRIPTOR, 271 | containing_type=None, 272 | create_key=_descriptor._internal_create_key, 273 | fields=[ 274 | ], 275 | extensions=[ 276 | ], 277 | nested_types=[], 278 | enum_types=[ 279 | ], 280 | serialized_options=None, 281 | is_extendable=False, 282 | syntax='proto3', 283 | extension_ranges=[], 284 | oneofs=[ 285 | ], 286 | serialized_start=401, 287 | serialized_end=429, 288 | ) 289 | 290 | DESCRIPTOR.message_types_by_name['MsgRequest'] = _MSGREQUEST 291 | DESCRIPTOR.message_types_by_name['MsgReply'] = _MSGREPLY 292 | DESCRIPTOR.message_types_by_name['Server_Utilization_Request'] = _SERVER_UTILIZATION_REQUEST 293 | DESCRIPTOR.message_types_by_name['Server_Utilization_Reply'] = _SERVER_UTILIZATION_REPLY 294 | DESCRIPTOR.message_types_by_name['Loaded_Model_Name_Request'] = _LOADED_MODEL_NAME_REQUEST 295 | DESCRIPTOR.message_types_by_name['Loaded_Model_Name_Reply'] = _LOADED_MODEL_NAME_REPLY 296 | DESCRIPTOR.message_types_by_name['Load_Specified_Model_Request'] = _LOAD_SPECIFIED_MODEL_REQUEST 297 | DESCRIPTOR.message_types_by_name['Load_Specified_Model_Reply'] = _LOAD_SPECIFIED_MODEL_REPLY 298 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 299 | 300 | MsgRequest = _reflection.GeneratedProtocolMessageType('MsgRequest', (_message.Message,), { 301 | 'DESCRIPTOR' : _MSGREQUEST, 302 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 303 | # @@protoc_insertion_point(class_scope:MsgRequest) 304 | }) 305 | _sym_db.RegisterMessage(MsgRequest) 306 | 307 | MsgReply = _reflection.GeneratedProtocolMessageType('MsgReply', (_message.Message,), { 308 | 'DESCRIPTOR' : _MSGREPLY, 309 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 310 | # @@protoc_insertion_point(class_scope:MsgReply) 311 | }) 312 | _sym_db.RegisterMessage(MsgReply) 313 | 314 | Server_Utilization_Request = _reflection.GeneratedProtocolMessageType('Server_Utilization_Request', (_message.Message,), { 315 | 'DESCRIPTOR' : _SERVER_UTILIZATION_REQUEST, 316 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 317 | # @@protoc_insertion_point(class_scope:Server_Utilization_Request) 318 | }) 319 | _sym_db.RegisterMessage(Server_Utilization_Request) 320 | 321 | Server_Utilization_Reply = _reflection.GeneratedProtocolMessageType('Server_Utilization_Reply', (_message.Message,), { 322 | 'DESCRIPTOR' : _SERVER_UTILIZATION_REPLY, 323 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 324 | # @@protoc_insertion_point(class_scope:Server_Utilization_Reply) 325 | }) 326 | _sym_db.RegisterMessage(Server_Utilization_Reply) 327 | 328 | Loaded_Model_Name_Request = _reflection.GeneratedProtocolMessageType('Loaded_Model_Name_Request', (_message.Message,), { 329 | 'DESCRIPTOR' : _LOADED_MODEL_NAME_REQUEST, 330 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 331 | # @@protoc_insertion_point(class_scope:Loaded_Model_Name_Request) 332 | }) 333 | _sym_db.RegisterMessage(Loaded_Model_Name_Request) 334 | 335 | Loaded_Model_Name_Reply = _reflection.GeneratedProtocolMessageType('Loaded_Model_Name_Reply', (_message.Message,), { 336 | 'DESCRIPTOR' : _LOADED_MODEL_NAME_REPLY, 337 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 338 | # @@protoc_insertion_point(class_scope:Loaded_Model_Name_Reply) 339 | }) 340 | _sym_db.RegisterMessage(Loaded_Model_Name_Reply) 341 | 342 | Load_Specified_Model_Request = _reflection.GeneratedProtocolMessageType('Load_Specified_Model_Request', (_message.Message,), { 343 | 'DESCRIPTOR' : _LOAD_SPECIFIED_MODEL_REQUEST, 344 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 345 | # @@protoc_insertion_point(class_scope:Load_Specified_Model_Request) 346 | }) 347 | _sym_db.RegisterMessage(Load_Specified_Model_Request) 348 | 349 | Load_Specified_Model_Reply = _reflection.GeneratedProtocolMessageType('Load_Specified_Model_Reply', (_message.Message,), { 350 | 'DESCRIPTOR' : _LOAD_SPECIFIED_MODEL_REPLY, 351 | '__module__' : 'server.grpc_config.protos.msg_transfer_pb2' 352 | # @@protoc_insertion_point(class_scope:Load_Specified_Model_Reply) 353 | }) 354 | _sym_db.RegisterMessage(Load_Specified_Model_Reply) 355 | 356 | 357 | 358 | _MSGTRANSFER = _descriptor.ServiceDescriptor( 359 | name='MsgTransfer', 360 | full_name='MsgTransfer', 361 | file=DESCRIPTOR, 362 | index=0, 363 | serialized_options=None, 364 | create_key=_descriptor._internal_create_key, 365 | serialized_start=432, 366 | serialized_end=742, 367 | methods=[ 368 | _descriptor.MethodDescriptor( 369 | name='image_processor', 370 | full_name='MsgTransfer.image_processor', 371 | index=0, 372 | containing_service=None, 373 | input_type=_MSGREQUEST, 374 | output_type=_MSGREPLY, 375 | serialized_options=None, 376 | create_key=_descriptor._internal_create_key, 377 | ), 378 | _descriptor.MethodDescriptor( 379 | name='get_server_utilization', 380 | full_name='MsgTransfer.get_server_utilization', 381 | index=1, 382 | containing_service=None, 383 | input_type=_SERVER_UTILIZATION_REQUEST, 384 | output_type=_SERVER_UTILIZATION_REPLY, 385 | serialized_options=None, 386 | create_key=_descriptor._internal_create_key, 387 | ), 388 | _descriptor.MethodDescriptor( 389 | name='get_loaded_models_name', 390 | full_name='MsgTransfer.get_loaded_models_name', 391 | index=2, 392 | containing_service=None, 393 | input_type=_LOADED_MODEL_NAME_REQUEST, 394 | output_type=_LOADED_MODEL_NAME_REPLY, 395 | serialized_options=None, 396 | create_key=_descriptor._internal_create_key, 397 | ), 398 | _descriptor.MethodDescriptor( 399 | name='load_specified_model', 400 | full_name='MsgTransfer.load_specified_model', 401 | index=3, 402 | containing_service=None, 403 | input_type=_LOAD_SPECIFIED_MODEL_REQUEST, 404 | output_type=_LOAD_SPECIFIED_MODEL_REPLY, 405 | serialized_options=None, 406 | create_key=_descriptor._internal_create_key, 407 | ), 408 | ]) 409 | _sym_db.RegisterServiceDescriptor(_MSGTRANSFER) 410 | 411 | DESCRIPTOR.services_by_name['MsgTransfer'] = _MSGTRANSFER 412 | 413 | # @@protoc_insertion_point(module_scope) 414 | -------------------------------------------------------------------------------- /model_manager/imagenet_classes.txt: -------------------------------------------------------------------------------- 1 | 0, tench 2 | 1, goldfish 3 | 2, great_white_shark 4 | 3, tiger_shark 5 | 4, hammerhead 6 | 5, electric_ray 7 | 6, stingray 8 | 7, cock 9 | 8, hen 10 | 9, ostrich 11 | 10, brambling 12 | 11, goldfinch 13 | 12, house_finch 14 | 13, junco 15 | 14, indigo_bunting 16 | 15, robin 17 | 16, bulbul 18 | 17, jay 19 | 18, magpie 20 | 19, chickadee 21 | 20, water_ouzel 22 | 21, kite 23 | 22, bald_eagle 24 | 23, vulture 25 | 24, great_grey_owl 26 | 25, European_fire_salamander 27 | 26, common_newt 28 | 27, eft 29 | 28, spotted_salamander 30 | 29, axolotl 31 | 30, bullfrog 32 | 31, tree_frog 33 | 32, tailed_frog 34 | 33, loggerhead 35 | 34, leatherback_turtle 36 | 35, mud_turtle 37 | 36, terrapin 38 | 37, box_turtle 39 | 38, banded_gecko 40 | 39, common_iguana 41 | 40, American_chameleon 42 | 41, whiptail 43 | 42, agama 44 | 43, frilled_lizard 45 | 44, alligator_lizard 46 | 45, Gila_monster 47 | 46, green_lizard 48 | 47, African_chameleon 49 | 48, Komodo_dragon 50 | 49, African_crocodile 51 | 50, American_alligator 52 | 51, triceratops 53 | 52, thunder_snake 54 | 53, ringneck_snake 55 | 54, hognose_snake 56 | 55, green_snake 57 | 56, king_snake 58 | 57, garter_snake 59 | 58, water_snake 60 | 59, vine_snake 61 | 60, night_snake 62 | 61, boa_constrictor 63 | 62, rock_python 64 | 63, Indian_cobra 65 | 64, green_mamba 66 | 65, sea_snake 67 | 66, horned_viper 68 | 67, diamondback 69 | 68, sidewinder 70 | 69, trilobite 71 | 70, harvestman 72 | 71, scorpion 73 | 72, black_and_gold_garden_spider 74 | 73, barn_spider 75 | 74, garden_spider 76 | 75, black_widow 77 | 76, tarantula 78 | 77, wolf_spider 79 | 78, tick 80 | 79, centipede 81 | 80, black_grouse 82 | 81, ptarmigan 83 | 82, ruffed_grouse 84 | 83, prairie_chicken 85 | 84, peacock 86 | 85, quail 87 | 86, partridge 88 | 87, African_grey 89 | 88, macaw 90 | 89, sulphur-crested_cockatoo 91 | 90, lorikeet 92 | 91, coucal 93 | 92, bee_eater 94 | 93, hornbill 95 | 94, hummingbird 96 | 95, jacamar 97 | 96, toucan 98 | 97, drake 99 | 98, red-breasted_merganser 100 | 99, goose 101 | 100, black_swan 102 | 101, tusker 103 | 102, echidna 104 | 103, platypus 105 | 104, wallaby 106 | 105, koala 107 | 106, wombat 108 | 107, jellyfish 109 | 108, sea_anemone 110 | 109, brain_coral 111 | 110, flatworm 112 | 111, nematode 113 | 112, conch 114 | 113, snail 115 | 114, slug 116 | 115, sea_slug 117 | 116, chiton 118 | 117, chambered_nautilus 119 | 118, Dungeness_crab 120 | 119, rock_crab 121 | 120, fiddler_crab 122 | 121, king_crab 123 | 122, American_lobster 124 | 123, spiny_lobster 125 | 124, crayfish 126 | 125, hermit_crab 127 | 126, isopod 128 | 127, white_stork 129 | 128, black_stork 130 | 129, spoonbill 131 | 130, flamingo 132 | 131, little_blue_heron 133 | 132, American_egret 134 | 133, bittern 135 | 134, crane 136 | 135, limpkin 137 | 136, European_gallinule 138 | 137, American_coot 139 | 138, bustard 140 | 139, ruddy_turnstone 141 | 140, red-backed_sandpiper 142 | 141, redshank 143 | 142, dowitcher 144 | 143, oystercatcher 145 | 144, pelican 146 | 145, king_penguin 147 | 146, albatross 148 | 147, grey_whale 149 | 148, killer_whale 150 | 149, dugong 151 | 150, sea_lion 152 | 151, Chihuahua 153 | 152, Japanese_spaniel 154 | 153, Maltese_dog 155 | 154, Pekinese 156 | 155, Shih-Tzu 157 | 156, Blenheim_spaniel 158 | 157, papillon 159 | 158, toy_terrier 160 | 159, Rhodesian_ridgeback 161 | 160, Afghan_hound 162 | 161, basset 163 | 162, beagle 164 | 163, bloodhound 165 | 164, bluetick 166 | 165, black-and-tan_coonhound 167 | 166, Walker_hound 168 | 167, English_foxhound 169 | 168, redbone 170 | 169, borzoi 171 | 170, Irish_wolfhound 172 | 171, Italian_greyhound 173 | 172, whippet 174 | 173, Ibizan_hound 175 | 174, Norwegian_elkhound 176 | 175, otterhound 177 | 176, Saluki 178 | 177, Scottish_deerhound 179 | 178, Weimaraner 180 | 179, Staffordshire_bullterrier 181 | 180, American_Staffordshire_terrier 182 | 181, Bedlington_terrier 183 | 182, Border_terrier 184 | 183, Kerry_blue_terrier 185 | 184, Irish_terrier 186 | 185, Norfolk_terrier 187 | 186, Norwich_terrier 188 | 187, Yorkshire_terrier 189 | 188, wire-haired_fox_terrier 190 | 189, Lakeland_terrier 191 | 190, Sealyham_terrier 192 | 191, Airedale 193 | 192, cairn 194 | 193, Australian_terrier 195 | 194, Dandie_Dinmont 196 | 195, Boston_bull 197 | 196, miniature_schnauzer 198 | 197, giant_schnauzer 199 | 198, standard_schnauzer 200 | 199, Scotch_terrier 201 | 200, Tibetan_terrier 202 | 201, silky_terrier 203 | 202, soft-coated_wheaten_terrier 204 | 203, West_Highland_white_terrier 205 | 204, Lhasa 206 | 205, flat-coated_retriever 207 | 206, curly-coated_retriever 208 | 207, golden_retriever 209 | 208, Labrador_retriever 210 | 209, Chesapeake_Bay_retriever 211 | 210, German_short-haired_pointer 212 | 211, vizsla 213 | 212, English_setter 214 | 213, Irish_setter 215 | 214, Gordon_setter 216 | 215, Brittany_spaniel 217 | 216, clumber 218 | 217, English_springer 219 | 218, Welsh_springer_spaniel 220 | 219, cocker_spaniel 221 | 220, Sussex_spaniel 222 | 221, Irish_water_spaniel 223 | 222, kuvasz 224 | 223, schipperke 225 | 224, groenendael 226 | 225, malinois 227 | 226, briard 228 | 227, kelpie 229 | 228, komondor 230 | 229, Old_English_sheepdog 231 | 230, Shetland_sheepdog 232 | 231, collie 233 | 232, Border_collie 234 | 233, Bouvier_des_Flandres 235 | 234, Rottweiler 236 | 235, German_shepherd 237 | 236, Doberman 238 | 237, miniature_pinscher 239 | 238, Greater_Swiss_Mountain_dog 240 | 239, Bernese_mountain_dog 241 | 240, Appenzeller 242 | 241, EntleBucher 243 | 242, boxer 244 | 243, bull_mastiff 245 | 244, Tibetan_mastiff 246 | 245, French_bulldog 247 | 246, Great_Dane 248 | 247, Saint_Bernard 249 | 248, Eskimo_dog 250 | 249, malamute 251 | 250, Siberian_husky 252 | 251, dalmatian 253 | 252, affenpinscher 254 | 253, basenji 255 | 254, pug 256 | 255, Leonberg 257 | 256, Newfoundland 258 | 257, Great_Pyrenees 259 | 258, Samoyed 260 | 259, Pomeranian 261 | 260, chow 262 | 261, keeshond 263 | 262, Brabancon_griffon 264 | 263, Pembroke 265 | 264, Cardigan 266 | 265, toy_poodle 267 | 266, miniature_poodle 268 | 267, standard_poodle 269 | 268, Mexican_hairless 270 | 269, timber_wolf 271 | 270, white_wolf 272 | 271, red_wolf 273 | 272, coyote 274 | 273, dingo 275 | 274, dhole 276 | 275, African_hunting_dog 277 | 276, hyena 278 | 277, red_fox 279 | 278, kit_fox 280 | 279, Arctic_fox 281 | 280, grey_fox 282 | 281, tabby 283 | 282, tiger_cat 284 | 283, Persian_cat 285 | 284, Siamese_cat 286 | 285, Egyptian_cat 287 | 286, cougar 288 | 287, lynx 289 | 288, leopard 290 | 289, snow_leopard 291 | 290, jaguar 292 | 291, lion 293 | 292, tiger 294 | 293, cheetah 295 | 294, brown_bear 296 | 295, American_black_bear 297 | 296, ice_bear 298 | 297, sloth_bear 299 | 298, mongoose 300 | 299, meerkat 301 | 300, tiger_beetle 302 | 301, ladybug 303 | 302, ground_beetle 304 | 303, long-horned_beetle 305 | 304, leaf_beetle 306 | 305, dung_beetle 307 | 306, rhinoceros_beetle 308 | 307, weevil 309 | 308, fly 310 | 309, bee 311 | 310, ant 312 | 311, grasshopper 313 | 312, cricket 314 | 313, walking_stick 315 | 314, cockroach 316 | 315, mantis 317 | 316, cicada 318 | 317, leafhopper 319 | 318, lacewing 320 | 319, dragonfly 321 | 320, damselfly 322 | 321, admiral 323 | 322, ringlet 324 | 323, monarch 325 | 324, cabbage_butterfly 326 | 325, sulphur_butterfly 327 | 326, lycaenid 328 | 327, starfish 329 | 328, sea_urchin 330 | 329, sea_cucumber 331 | 330, wood_rabbit 332 | 331, hare 333 | 332, Angora 334 | 333, hamster 335 | 334, porcupine 336 | 335, fox_squirrel 337 | 336, marmot 338 | 337, beaver 339 | 338, guinea_pig 340 | 339, sorrel 341 | 340, zebra 342 | 341, hog 343 | 342, wild_boar 344 | 343, warthog 345 | 344, hippopotamus 346 | 345, ox 347 | 346, water_buffalo 348 | 347, bison 349 | 348, ram 350 | 349, bighorn 351 | 350, ibex 352 | 351, hartebeest 353 | 352, impala 354 | 353, gazelle 355 | 354, Arabian_camel 356 | 355, llama 357 | 356, weasel 358 | 357, mink 359 | 358, polecat 360 | 359, black-footed_ferret 361 | 360, otter 362 | 361, skunk 363 | 362, badger 364 | 363, armadillo 365 | 364, three-toed_sloth 366 | 365, orangutan 367 | 366, gorilla 368 | 367, chimpanzee 369 | 368, gibbon 370 | 369, siamang 371 | 370, guenon 372 | 371, patas 373 | 372, baboon 374 | 373, macaque 375 | 374, langur 376 | 375, colobus 377 | 376, proboscis_monkey 378 | 377, marmoset 379 | 378, capuchin 380 | 379, howler_monkey 381 | 380, titi 382 | 381, spider_monkey 383 | 382, squirrel_monkey 384 | 383, Madagascar_cat 385 | 384, indri 386 | 385, Indian_elephant 387 | 386, African_elephant 388 | 387, lesser_panda 389 | 388, giant_panda 390 | 389, barracouta 391 | 390, eel 392 | 391, coho 393 | 392, rock_beauty 394 | 393, anemone_fish 395 | 394, sturgeon 396 | 395, gar 397 | 396, lionfish 398 | 397, puffer 399 | 398, abacus 400 | 399, abaya 401 | 400, academic_gown 402 | 401, accordion 403 | 402, acoustic_guitar 404 | 403, aircraft_carrier 405 | 404, airliner 406 | 405, airship 407 | 406, altar 408 | 407, ambulance 409 | 408, amphibian 410 | 409, analog_clock 411 | 410, apiary 412 | 411, apron 413 | 412, ashcan 414 | 413, assault_rifle 415 | 414, backpack 416 | 415, bakery 417 | 416, balance_beam 418 | 417, balloon 419 | 418, ballpoint 420 | 419, Band_Aid 421 | 420, banjo 422 | 421, bannister 423 | 422, barbell 424 | 423, barber_chair 425 | 424, barbershop 426 | 425, barn 427 | 426, barometer 428 | 427, barrel 429 | 428, barrow 430 | 429, baseball 431 | 430, basketball 432 | 431, bassinet 433 | 432, bassoon 434 | 433, bathing_cap 435 | 434, bath_towel 436 | 435, bathtub 437 | 436, beach_wagon 438 | 437, beacon 439 | 438, beaker 440 | 439, bearskin 441 | 440, beer_bottle 442 | 441, beer_glass 443 | 442, bell_cote 444 | 443, bib 445 | 444, bicycle-built-for-two 446 | 445, bikini 447 | 446, binder 448 | 447, binoculars 449 | 448, birdhouse 450 | 449, boathouse 451 | 450, bobsled 452 | 451, bolo_tie 453 | 452, bonnet 454 | 453, bookcase 455 | 454, bookshop 456 | 455, bottlecap 457 | 456, bow 458 | 457, bow_tie 459 | 458, brass 460 | 459, brassiere 461 | 460, breakwater 462 | 461, breastplate 463 | 462, broom 464 | 463, bucket 465 | 464, buckle 466 | 465, bulletproof_vest 467 | 466, bullet_train 468 | 467, butcher_shop 469 | 468, cab 470 | 469, caldron 471 | 470, candle 472 | 471, cannon 473 | 472, canoe 474 | 473, can_opener 475 | 474, cardigan 476 | 475, car_mirror 477 | 476, carousel 478 | 477, carpenter's_kit 479 | 478, carton 480 | 479, car_wheel 481 | 480, cash_machine 482 | 481, cassette 483 | 482, cassette_player 484 | 483, castle 485 | 484, catamaran 486 | 485, CD_player 487 | 486, cello 488 | 487, cellular_telephone 489 | 488, chain 490 | 489, chainlink_fence 491 | 490, chain_mail 492 | 491, chain_saw 493 | 492, chest 494 | 493, chiffonier 495 | 494, chime 496 | 495, china_cabinet 497 | 496, Christmas_stocking 498 | 497, church 499 | 498, cinema 500 | 499, cleaver 501 | 500, cliff_dwelling 502 | 501, cloak 503 | 502, clog 504 | 503, cocktail_shaker 505 | 504, coffee_mug 506 | 505, coffeepot 507 | 506, coil 508 | 507, combination_lock 509 | 508, computer_keyboard 510 | 509, confectionery 511 | 510, container_ship 512 | 511, convertible 513 | 512, corkscrew 514 | 513, cornet 515 | 514, cowboy_boot 516 | 515, cowboy_hat 517 | 516, cradle 518 | 517, crane 519 | 518, crash_helmet 520 | 519, crate 521 | 520, crib 522 | 521, Crock_Pot 523 | 522, croquet_ball 524 | 523, crutch 525 | 524, cuirass 526 | 525, dam 527 | 526, desk 528 | 527, desktop_computer 529 | 528, dial_telephone 530 | 529, diaper 531 | 530, digital_clock 532 | 531, digital_watch 533 | 532, dining_table 534 | 533, dishrag 535 | 534, dishwasher 536 | 535, disk_brake 537 | 536, dock 538 | 537, dogsled 539 | 538, dome 540 | 539, doormat 541 | 540, drilling_platform 542 | 541, drum 543 | 542, drumstick 544 | 543, dumbbell 545 | 544, Dutch_oven 546 | 545, electric_fan 547 | 546, electric_guitar 548 | 547, electric_locomotive 549 | 548, entertainment_center 550 | 549, envelope 551 | 550, espresso_maker 552 | 551, face_powder 553 | 552, feather_boa 554 | 553, file 555 | 554, fireboat 556 | 555, fire_engine 557 | 556, fire_screen 558 | 557, flagpole 559 | 558, flute 560 | 559, folding_chair 561 | 560, football_helmet 562 | 561, forklift 563 | 562, fountain 564 | 563, fountain_pen 565 | 564, four-poster 566 | 565, freight_car 567 | 566, French_horn 568 | 567, frying_pan 569 | 568, fur_coat 570 | 569, garbage_truck 571 | 570, gasmask 572 | 571, gas_pump 573 | 572, goblet 574 | 573, go-kart 575 | 574, golf_ball 576 | 575, golfcart 577 | 576, gondola 578 | 577, gong 579 | 578, gown 580 | 579, grand_piano 581 | 580, greenhouse 582 | 581, grille 583 | 582, grocery_store 584 | 583, guillotine 585 | 584, hair_slide 586 | 585, hair_spray 587 | 586, half_track 588 | 587, hammer 589 | 588, hamper 590 | 589, hand_blower 591 | 590, hand-held_computer 592 | 591, handkerchief 593 | 592, hard_disc 594 | 593, harmonica 595 | 594, harp 596 | 595, harvester 597 | 596, hatchet 598 | 597, holster 599 | 598, home_theater 600 | 599, honeycomb 601 | 600, hook 602 | 601, hoopskirt 603 | 602, horizontal_bar 604 | 603, horse_cart 605 | 604, hourglass 606 | 605, iPod 607 | 606, iron 608 | 607, jack-o'-lantern 609 | 608, jean 610 | 609, jeep 611 | 610, jersey 612 | 611, jigsaw_puzzle 613 | 612, jinrikisha 614 | 613, joystick 615 | 614, kimono 616 | 615, knee_pad 617 | 616, knot 618 | 617, lab_coat 619 | 618, ladle 620 | 619, lampshade 621 | 620, laptop 622 | 621, lawn_mower 623 | 622, lens_cap 624 | 623, letter_opener 625 | 624, library 626 | 625, lifeboat 627 | 626, lighter 628 | 627, limousine 629 | 628, liner 630 | 629, lipstick 631 | 630, Loafer 632 | 631, lotion 633 | 632, loudspeaker 634 | 633, loupe 635 | 634, lumbermill 636 | 635, magnetic_compass 637 | 636, mailbag 638 | 637, mailbox 639 | 638, maillot 640 | 639, maillot 641 | 640, manhole_cover 642 | 641, maraca 643 | 642, marimba 644 | 643, mask 645 | 644, matchstick 646 | 645, maypole 647 | 646, maze 648 | 647, measuring_cup 649 | 648, medicine_chest 650 | 649, megalith 651 | 650, microphone 652 | 651, microwave 653 | 652, military_uniform 654 | 653, milk_can 655 | 654, minibus 656 | 655, miniskirt 657 | 656, minivan 658 | 657, missile 659 | 658, mitten 660 | 659, mixing_bowl 661 | 660, mobile_home 662 | 661, Model_T 663 | 662, modem 664 | 663, monastery 665 | 664, monitor 666 | 665, moped 667 | 666, mortar 668 | 667, mortarboard 669 | 668, mosque 670 | 669, mosquito_net 671 | 670, motor_scooter 672 | 671, mountain_bike 673 | 672, mountain_tent 674 | 673, mouse 675 | 674, mousetrap 676 | 675, moving_van 677 | 676, muzzle 678 | 677, nail 679 | 678, neck_brace 680 | 679, necklace 681 | 680, nipple 682 | 681, notebook 683 | 682, obelisk 684 | 683, oboe 685 | 684, ocarina 686 | 685, odometer 687 | 686, oil_filter 688 | 687, organ 689 | 688, oscilloscope 690 | 689, overskirt 691 | 690, oxcart 692 | 691, oxygen_mask 693 | 692, packet 694 | 693, paddle 695 | 694, paddlewheel 696 | 695, padlock 697 | 696, paintbrush 698 | 697, pajama 699 | 698, palace 700 | 699, panpipe 701 | 700, paper_towel 702 | 701, parachute 703 | 702, parallel_bars 704 | 703, park_bench 705 | 704, parking_meter 706 | 705, passenger_car 707 | 706, patio 708 | 707, pay-phone 709 | 708, pedestal 710 | 709, pencil_box 711 | 710, pencil_sharpener 712 | 711, perfume 713 | 712, Petri_dish 714 | 713, photocopier 715 | 714, pick 716 | 715, pickelhaube 717 | 716, picket_fence 718 | 717, pickup 719 | 718, pier 720 | 719, piggy_bank 721 | 720, pill_bottle 722 | 721, pillow 723 | 722, ping-pong_ball 724 | 723, pinwheel 725 | 724, pirate 726 | 725, pitcher 727 | 726, plane 728 | 727, planetarium 729 | 728, plastic_bag 730 | 729, plate_rack 731 | 730, plow 732 | 731, plunger 733 | 732, Polaroid_camera 734 | 733, pole 735 | 734, police_van 736 | 735, poncho 737 | 736, pool_table 738 | 737, pop_bottle 739 | 738, pot 740 | 739, potter's_wheel 741 | 740, power_drill 742 | 741, prayer_rug 743 | 742, printer 744 | 743, prison 745 | 744, projectile 746 | 745, projector 747 | 746, puck 748 | 747, punching_bag 749 | 748, purse 750 | 749, quill 751 | 750, quilt 752 | 751, racer 753 | 752, racket 754 | 753, radiator 755 | 754, radio 756 | 755, radio_telescope 757 | 756, rain_barrel 758 | 757, recreational_vehicle 759 | 758, reel 760 | 759, reflex_camera 761 | 760, refrigerator 762 | 761, remote_control 763 | 762, restaurant 764 | 763, revolver 765 | 764, rifle 766 | 765, rocking_chair 767 | 766, rotisserie 768 | 767, rubber_eraser 769 | 768, rugby_ball 770 | 769, rule 771 | 770, running_shoe 772 | 771, safe 773 | 772, safety_pin 774 | 773, saltshaker 775 | 774, sandal 776 | 775, sarong 777 | 776, sax 778 | 777, scabbard 779 | 778, scale 780 | 779, school_bus 781 | 780, schooner 782 | 781, scoreboard 783 | 782, screen 784 | 783, screw 785 | 784, screwdriver 786 | 785, seat_belt 787 | 786, sewing_machine 788 | 787, shield 789 | 788, shoe_shop 790 | 789, shoji 791 | 790, shopping_basket 792 | 791, shopping_cart 793 | 792, shovel 794 | 793, shower_cap 795 | 794, shower_curtain 796 | 795, ski 797 | 796, ski_mask 798 | 797, sleeping_bag 799 | 798, slide_rule 800 | 799, sliding_door 801 | 800, slot 802 | 801, snorkel 803 | 802, snowmobile 804 | 803, snowplow 805 | 804, soap_dispenser 806 | 805, soccer_ball 807 | 806, sock 808 | 807, solar_dish 809 | 808, sombrero 810 | 809, soup_bowl 811 | 810, space_bar 812 | 811, space_heater 813 | 812, space_shuttle 814 | 813, spatula 815 | 814, speedboat 816 | 815, spider_web 817 | 816, spindle 818 | 817, sports_car 819 | 818, spotlight 820 | 819, stage 821 | 820, steam_locomotive 822 | 821, steel_arch_bridge 823 | 822, steel_drum 824 | 823, stethoscope 825 | 824, stole 826 | 825, stone_wall 827 | 826, stopwatch 828 | 827, stove 829 | 828, strainer 830 | 829, streetcar 831 | 830, stretcher 832 | 831, studio_couch 833 | 832, stupa 834 | 833, submarine 835 | 834, suit 836 | 835, sundial 837 | 836, sunglass 838 | 837, sunglasses 839 | 838, sunscreen 840 | 839, suspension_bridge 841 | 840, swab 842 | 841, sweatshirt 843 | 842, swimming_trunks 844 | 843, swing 845 | 844, switch 846 | 845, syringe 847 | 846, table_lamp 848 | 847, tank 849 | 848, tape_player 850 | 849, teapot 851 | 850, teddy 852 | 851, television 853 | 852, tennis_ball 854 | 853, thatch 855 | 854, theater_curtain 856 | 855, thimble 857 | 856, thresher 858 | 857, throne 859 | 858, tile_roof 860 | 859, toaster 861 | 860, tobacco_shop 862 | 861, toilet_seat 863 | 862, torch 864 | 863, totem_pole 865 | 864, tow_truck 866 | 865, toyshop 867 | 866, tractor 868 | 867, trailer_truck 869 | 868, tray 870 | 869, trench_coat 871 | 870, tricycle 872 | 871, trimaran 873 | 872, tripod 874 | 873, triumphal_arch 875 | 874, trolleybus 876 | 875, trombone 877 | 876, tub 878 | 877, turnstile 879 | 878, typewriter_keyboard 880 | 879, umbrella 881 | 880, unicycle 882 | 881, upright 883 | 882, vacuum 884 | 883, vase 885 | 884, vault 886 | 885, velvet 887 | 886, vending_machine 888 | 887, vestment 889 | 888, viaduct 890 | 889, violin 891 | 890, volleyball 892 | 891, waffle_iron 893 | 892, wall_clock 894 | 893, wallet 895 | 894, wardrobe 896 | 895, warplane 897 | 896, washbasin 898 | 897, washer 899 | 898, water_bottle 900 | 899, water_jug 901 | 900, water_tower 902 | 901, whiskey_jug 903 | 902, whistle 904 | 903, wig 905 | 904, window_screen 906 | 905, window_shade 907 | 906, Windsor_tie 908 | 907, wine_bottle 909 | 908, wing 910 | 909, wok 911 | 910, wooden_spoon 912 | 911, wool 913 | 912, worm_fence 914 | 913, wreck 915 | 914, yawl 916 | 915, yurt 917 | 916, web_site 918 | 917, comic_book 919 | 918, crossword_puzzle 920 | 919, street_sign 921 | 920, traffic_light 922 | 921, book_jacket 923 | 922, menu 924 | 923, plate 925 | 924, guacamole 926 | 925, consomme 927 | 926, hot_pot 928 | 927, trifle 929 | 928, ice_cream 930 | 929, ice_lolly 931 | 930, French_loaf 932 | 931, bagel 933 | 932, pretzel 934 | 933, cheeseburger 935 | 934, hotdog 936 | 935, mashed_potato 937 | 936, head_cabbage 938 | 937, broccoli 939 | 938, cauliflower 940 | 939, zucchini 941 | 940, spaghetti_squash 942 | 941, acorn_squash 943 | 942, butternut_squash 944 | 943, cucumber 945 | 944, artichoke 946 | 945, bell_pepper 947 | 946, cardoon 948 | 947, mushroom 949 | 948, Granny_Smith 950 | 949, strawberry 951 | 950, orange 952 | 951, lemon 953 | 952, fig 954 | 953, pineapple 955 | 954, banana 956 | 955, jackfruit 957 | 956, custard_apple 958 | 957, pomegranate 959 | 958, hay 960 | 959, carbonara 961 | 960, chocolate_sauce 962 | 961, dough 963 | 962, meat_loaf 964 | 963, pizza 965 | 964, potpie 966 | 965, burrito 967 | 966, red_wine 968 | 967, espresso 969 | 968, cup 970 | 969, eggnog 971 | 970, alp 972 | 971, bubble 973 | 972, cliff 974 | 973, coral_reef 975 | 974, geyser 976 | 975, lakeside 977 | 976, promontory 978 | 977, sandbar 979 | 978, seashore 980 | 979, valley 981 | 980, volcano 982 | 981, ballplayer 983 | 982, groom 984 | 983, scuba_diver 985 | 984, rapeseed 986 | 985, daisy 987 | 986, yellow_lady's_slipper 988 | 987, corn 989 | 988, acorn 990 | 989, hip 991 | 990, buckeye 992 | 991, coral_fungus 993 | 992, agaric 994 | 993, gyromitra 995 | 994, stinkhorn 996 | 995, earthstar 997 | 996, hen-of-the-woods 998 | 997, bolete 999 | 998, ear 1000 | 999, toilet_tissue 1001 | 1002 | 1003 | --------------------------------------------------------------------------------