├── atlas_utils ├── __init__.py ├── lib │ ├── __init__.py │ ├── atlas200dk │ │ └── libatlasutil.so │ ├── atlasutil_so.py │ └── src │ │ ├── atlas_utils.h │ │ ├── camera.h │ │ ├── Makefile │ │ └── camera.cpp ├── presenteragent │ ├── __init__.py │ ├── presenter_message.proto │ ├── presenter_datatype.py │ ├── presenter_message.py │ ├── presenter_agent.py │ ├── socket_client.py │ ├── presenter_channel.py │ └── presenter_message_pb2.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ └── acl_resource.cpython-38.pyc ├── chanel_id_generator.py ├── acl_resource.py ├── resource_list.py ├── acl_logger.py ├── camera.py ├── README.md ├── constants.py ├── utils.py ├── acl_image.py ├── dvpp_vdec.py ├── acl_model.py ├── video.py └── acl_dvpp.py ├── data ├── bus.png ├── dog.jpg ├── 000000.png ├── 000001.png ├── 000002.png ├── 000003.png ├── 000004.png ├── 000005.png ├── 000006.png ├── 000007.png ├── 000008.png ├── 000009.png ├── 000010.png ├── 000011.png ├── 000012.png ├── 000013.png ├── 000014.png ├── 000015.png └── person.jpg ├── model ├── yolox_nano.om └── yolox_nano.onnx ├── README.md ├── script └── yolo_onnx_opt.py └── src ├── main_yolox.py └── acl_yolox.py /atlas_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /atlas_utils/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data/bus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/bus.png -------------------------------------------------------------------------------- /data/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/dog.jpg -------------------------------------------------------------------------------- /data/000000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000000.png -------------------------------------------------------------------------------- /data/000001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000001.png -------------------------------------------------------------------------------- /data/000002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000002.png -------------------------------------------------------------------------------- /data/000003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000003.png -------------------------------------------------------------------------------- /data/000004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000004.png -------------------------------------------------------------------------------- /data/000005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000005.png -------------------------------------------------------------------------------- /data/000006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000006.png -------------------------------------------------------------------------------- /data/000007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000007.png -------------------------------------------------------------------------------- /data/000008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000008.png -------------------------------------------------------------------------------- /data/000009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000009.png -------------------------------------------------------------------------------- /data/000010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000010.png -------------------------------------------------------------------------------- /data/000011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000011.png -------------------------------------------------------------------------------- /data/000012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000012.png -------------------------------------------------------------------------------- /data/000013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000013.png -------------------------------------------------------------------------------- /data/000014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000014.png -------------------------------------------------------------------------------- /data/000015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/000015.png -------------------------------------------------------------------------------- /data/person.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/data/person.jpg -------------------------------------------------------------------------------- /model/yolox_nano.om: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/model/yolox_nano.om -------------------------------------------------------------------------------- /model/yolox_nano.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/model/yolox_nano.onnx -------------------------------------------------------------------------------- /atlas_utils/lib/atlas200dk/libatlasutil.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/atlas_utils/lib/atlas200dk/libatlasutil.so -------------------------------------------------------------------------------- /atlas_utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/atlas_utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /atlas_utils/__pycache__/acl_resource.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HITSZ-NRSL/yolox_for_cann_atlas200dk/HEAD/atlas_utils/__pycache__/acl_resource.cpython-38.pyc -------------------------------------------------------------------------------- /atlas_utils/lib/atlasutil_so.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import ctypes 3 | import os 4 | import platform 5 | 6 | import acl 7 | 8 | from atlas_utils.constants import ACL_HOST, ACL_DEVICE 9 | 10 | 11 | def _load_lib_atlasutil(): 12 | run_mode, ret = acl.rt.get_run_mode() 13 | 14 | lib = None 15 | if run_mode == ACL_DEVICE: 16 | cur_dir = os.path.dirname(os.path.abspath(__file__)) 17 | so_path = os.path.join(cur_dir, 'atlas200dk/libatlasutil.so') 18 | lib=ctypes.CDLL(so_path) 19 | 20 | return lib 21 | 22 | 23 | class _AtlasutilLib(object): 24 | _instance_lock=threading.Lock() 25 | lib=_load_lib_atlasutil() 26 | 27 | def __init__(self): 28 | pass 29 | 30 | def __new__(cls, *args, **kwargs): 31 | if not hasattr(_AtlasutilLib, "_instance"): 32 | with _AtlasutilLib._instance_lock: 33 | if not hasattr(_AtlasutilLib, "_instance"): 34 | _AtlasutilLib._instance=object.__new__( 35 | cls, *args, **kwargs) 36 | return _AtlasutilLib._instance 37 | 38 | libatlas=_AtlasutilLib.lib 39 | -------------------------------------------------------------------------------- /atlas_utils/chanel_id_generator.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | 4 | class _ChannelIdGenerator(object): 5 | """Generate global unique id number, single instance mode class""" 6 | _instance_lock = threading.Lock() 7 | channel_id = 0 8 | 9 | def __init__(self): 10 | pass 11 | 12 | def __new__(cls, *args, **kwargs): 13 | if not hasattr(_ChannelIdGenerator, "_instance"): 14 | with _ChannelIdGenerator._instance_lock: 15 | if not hasattr(_ChannelIdGenerator, "_instance"): 16 | _ChannelIdGenerator._instance = object.__new__( 17 | cls, *args, **kwargs) 18 | return _ChannelIdGenerator._instance 19 | 20 | def generator_channel_id(self): 21 | """Generate global unique id number 22 | The id number is increase 23 | """ 24 | curren_channel_id = 0 25 | with _ChannelIdGenerator._instance_lock: 26 | curren_channel_id = _ChannelIdGenerator.channel_id 27 | _ChannelIdGenerator.channel_id += 1 28 | 29 | return curren_channel_id 30 | 31 | 32 | def gen_unique_channel_id(): 33 | """Interface of generate global unique id number""" 34 | generator = _ChannelIdGenerator() 35 | return generator.generator_channel_id() 36 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package ascend.presenter.proto; 4 | 5 | enum OpenChannelErrorCode { 6 | kOpenChannelErrorNone = 0; 7 | kOpenChannelErrorNoSuchChannel = 1; 8 | kOpenChannelErrorChannelAlreadyOpened = 2; 9 | kOpenChannelErrorOther = -1; 10 | } 11 | 12 | enum ChannelContentType { 13 | kChannelContentTypeImage = 0; 14 | kChannelContentTypeVideo = 1; 15 | } 16 | 17 | // By Protocol Buffer Style Guide, need to use underscore_separated_names 18 | // for field names 19 | message OpenChannelRequest { 20 | string channel_name = 1; 21 | ChannelContentType content_type = 2; 22 | } 23 | 24 | message OpenChannelResponse { 25 | OpenChannelErrorCode error_code = 1; 26 | string error_message = 2; 27 | } 28 | 29 | message HeartbeatMessage { 30 | 31 | } 32 | 33 | enum ImageFormat { 34 | kImageFormatJpeg = 0; 35 | } 36 | 37 | message Coordinate { 38 | uint32 x = 1; 39 | uint32 y = 2; 40 | } 41 | 42 | message Rectangle_Attr { 43 | Coordinate left_top = 1; 44 | Coordinate right_bottom = 2; 45 | string label_text = 3; 46 | } 47 | 48 | message PresentImageRequest { 49 | ImageFormat format = 1; 50 | uint32 width = 2; 51 | uint32 height = 3; 52 | bytes data = 4; 53 | repeated Rectangle_Attr rectangle_list = 5; 54 | } 55 | 56 | enum PresentDataErrorCode { 57 | kPresentDataErrorNone = 0; 58 | kPresentDataErrorUnsupportedType = 1; 59 | kPresentDataErrorUnsupportedFormat = 2; 60 | kPresentDataErrorOther = -1; 61 | } 62 | 63 | message PresentImageResponse { 64 | PresentDataErrorCode error_code = 1; 65 | string error_message = 2; 66 | } 67 | 68 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_datatype.py: -------------------------------------------------------------------------------- 1 | STATUS_DISCONNECT = 0 2 | STATUS_CONNECTED = 1 3 | STATUS_OPEN_CH_REQUEST = 2 4 | STATUS_OPENED = 3 5 | STATUS_EXITING = 4 6 | STATUS_EXITTED = 5 7 | 8 | CONTENT_TYPE_IMAGE = 0 9 | CONTENT_TYPE_VIDEO = 1 10 | 11 | STATUS_OK = 0 12 | STATUS_ERROR = 1 13 | 14 | 15 | class Point(object): 16 | """ 17 | point coordinate 18 | """ 19 | 20 | def __init__(self, x=0, y=0): 21 | self.x = x 22 | self.y = y 23 | 24 | 25 | class Box(object): 26 | """ 27 | object rectangle area 28 | """ 29 | 30 | def __init__(self, lt, rb): 31 | self.lt = Point(lt) 32 | self.rb = Point(rb) 33 | 34 | def box_valid(self): 35 | """ 36 | verify box coordinate is valid 37 | """ 38 | return ((self.lt.x >= 0) 39 | and (self.lt.y >= 0) 40 | and (self.rb.x >= self.lt.x) 41 | and (self.rb.y >= self.lt.y)) 42 | 43 | 44 | class ObjectDetectionResult(object): 45 | """ 46 | object detection information, include object position, confidence and label 47 | """ 48 | 49 | def __init__(self, ltx=0, lty=0, rbx=0, rby=0, text=None): 50 | self.object_class = 0 51 | self.confidence = 0 52 | self.box = Box((ltx, lty), (rbx, rby)) 53 | self.result_text = text 54 | 55 | def check_box_vaild(self, width, height): 56 | """ 57 | verify object position is valid 58 | """ 59 | return (self.box.box_valid() and 60 | (self.box.rb.x <= width) and 61 | (self.box.rb.y <= height)) 62 | 63 | 64 | class FinishMsg(object): 65 | """ 66 | the message to notify presenter agent exit 67 | """ 68 | 69 | def __init__(self, data): 70 | self.data = data 71 | -------------------------------------------------------------------------------- /atlas_utils/acl_resource.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (R) @huawei.com, all rights reserved 3 | -*- coding:utf-8 -*- 4 | CREATED: 2021-01-20 20:12:13 5 | MODIFIED: 2021-02-03 14:04:45 6 | """ 7 | import acl 8 | 9 | import atlas_utils.utils as utils 10 | from atlas_utils.resource_list import resource_list 11 | 12 | 13 | class AclResource(object): 14 | """ 15 | AclResource 16 | """ 17 | 18 | def __init__(self, device_id=0): 19 | self.device_id = device_id 20 | self.context = None 21 | self.stream = None 22 | self.run_mode = None 23 | 24 | def init(self): 25 | """ 26 | init resource 27 | """ 28 | print("init resource stage:") 29 | ret = acl.init() 30 | utils.check_ret("acl.init", ret) 31 | 32 | ret = acl.rt.set_device(self.device_id) 33 | utils.check_ret("acl.rt.set_device", ret) 34 | 35 | self.context, ret = acl.rt.create_context(self.device_id) 36 | utils.check_ret("acl.rt.create_context", ret) 37 | 38 | self.stream, ret = acl.rt.create_stream() 39 | utils.check_ret("acl.rt.create_stream", ret) 40 | 41 | self.run_mode, ret = acl.rt.get_run_mode() 42 | utils.check_ret("acl.rt.get_run_mode", ret) 43 | 44 | print("Init resource success") 45 | 46 | def __del__(self): 47 | print("acl resource release all resource") 48 | resource_list.destroy() 49 | if self.stream: 50 | print("acl resource release stream") 51 | acl.rt.destroy_stream(self.stream) 52 | 53 | if self.context: 54 | print("acl resource release context") 55 | acl.rt.destroy_context(self.context) 56 | 57 | print("Reset acl device ", self.device_id) 58 | acl.rt.reset_device(self.device_id) 59 | acl.finalize() 60 | print("Release acl resource success") 61 | -------------------------------------------------------------------------------- /atlas_utils/resource_list.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | REGISTER = 0 4 | UNREGISTER = 1 5 | 6 | 7 | class _ResourceList(object): 8 | """Acl resources of current application 9 | This class provide register inferace of acl resource, when application 10 | exit, all register resource will release befor acl.rt.reset_device to 11 | avoid program abnormal 12 | """ 13 | _instance_lock = threading.Lock() 14 | 15 | def __init__(self): 16 | self.resources = [] 17 | 18 | def __new__(cls, *args, **kwargs): 19 | if not hasattr(_ResourceList, "_instance"): 20 | with _ResourceList._instance_lock: 21 | if not hasattr(_ResourceList, "_instance"): 22 | _ResourceList._instance = object.__new__( 23 | cls, *args, **kwargs) 24 | return _ResourceList._instance 25 | 26 | def register(self, resource): 27 | """Resource register interface 28 | Args: 29 | resource: object with acl resource, the object must be has 30 | method destroy() 31 | """ 32 | item = {"resource": resource, "status": REGISTER} 33 | self.resources.append(item) 34 | 35 | def unregister(self, resource): 36 | """Resource unregister interface 37 | If registered resource release by self and no need _ResourceList 38 | release, the resource object should unregister self 39 | Args: 40 | resource: registered resource 41 | """ 42 | for item in self.resources: 43 | if resource == item["resource"]: 44 | item["status"] = UNREGISTER 45 | 46 | def destroy(self): 47 | """Destroy all register resource""" 48 | for item in self.resources: 49 | if item["status"] == REGISTER: 50 | item["resource"].destroy() 51 | item["status"] = UNREGISTER 52 | 53 | 54 | resource_list = _ResourceList() 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # yolox_opencv_python 2 | This is a project to deploy YOLOX on Atlas200DK using CANN. 3 | 4 | 在Atlas200dk中使用CANN部署yolox模型推理 5 | 6 | # Environments 7 | You should have set up the CANN environments on Atlas200DK, 8 | some other needed packages are as belows 9 | ``` 10 | opencv_python (>=4.3 only for opencv dnn inference) 11 | opencv_contrib_python>=4.3 (only for opencv dnn inference) 12 | numpy 13 | pyACL (CANN environments have set this) 14 | ``` 15 | 16 | # Usage 17 | ## First, git clone this code, yolox_nano.onnx has been on the 'model' dir 18 | 19 | if you want other models, you can download them on the origin repo: https://github.com/Megvii-BaseDetection/YOLOX.git 20 | 21 | and put the downloaded onnx into the ./model dir: 22 | ``` 23 | git clone https://github.com/stunback/yolox_for_cann_atlas200dk.git 24 | # if you have downloaded yolox_s.onnx 25 | cd yolox_for_cann_atlas200dk 26 | mv onnx_path model/ 27 | ``` 28 | 29 | ## Second, remove the focus layer on the onnx model 30 | change the ONNX_MODEL_PATH on ./script/yolo_onnx_opt.py 31 | 32 | then run the script: 33 | ``` 34 | cd script 35 | python yolo_onnx_opt.py 36 | ``` 37 | 38 | ## Third, use atc tool to export the onnx model into cann model 39 | Use yolox_nano_simple.onnx for example: 40 | ``` 41 | cd ../model 42 | atc --model=./yolox_nano_simple.onnx --framework=5 --output=yolox_nano_simple --input_format=NCHW --soc_version=Ascend310 43 | ``` 44 | 45 | ## At last, run the inference demo 46 | change the model path on src/acl_yolox.py, and run: 47 | ``` 48 | cd ../src 49 | python acl_yolox.py 50 | ``` 51 | 52 | ## Additional 53 | An opencv inference demo is also provided: 54 | ``` 55 | cd ../src 56 | python main_yolox.py 57 | ``` 58 | 59 | # Model Inference Speed 60 | Hardware: Atlas200dk npu 61 | 62 | yolox_nano(416) onnx=308.2ms cann=11.5ms 63 | 64 | yolox_tiny(416) onnx=763.8ms cann=12.2ms 65 | 66 | yolox_s(640) onnx=2907.3ms cann=16.5ms 67 | 68 | yolox_x(640) onnx=24268ms cann=62.8ms (4.49GFLOPs/s) 69 | 70 | # Others 71 | Blogs about yolov5, yolox and nanodet: 72 | https://blog.csdn.net/qq_41035283/article/details/119150751 -------------------------------------------------------------------------------- /atlas_utils/lib/src/atlas_utils.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2020 Huawei Technologies Co., Ltd 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | 16 | * File utils.h 17 | * Description: handle file operations 18 | */ 19 | #pragma once 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include "acl/acl.h" 31 | 32 | extern "C" { 33 | 34 | /** 35 | * @brief calculate YUVSP420 image size 36 | * @param [in] width: image width 37 | * @param [in] height: image height 38 | * @return bytes size of image 39 | */ 40 | #define YUV420SP_SIZE(width, height) ((width) * (height) * 3 / 2) 41 | 42 | /** 43 | * @brief Write acl error level log to host log 44 | * @param [in] fmt: the input format string 45 | * @return none 46 | */ 47 | #define ATLAS_LOG_ERROR(fmt, ...) \ 48 | do{aclAppLog(ACL_ERROR, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ 49 | fprintf(stdout, "[ERROR] " fmt "\n", ##__VA_ARGS__);}while(0) 50 | 51 | /** 52 | * @brief Write acl info level log to host log 53 | * @param [in] fmt: the input format string 54 | * @return none 55 | */ 56 | #define ATLAS_LOG_INFO(fmt, ...) \ 57 | do{aclAppLog(ACL_INFO, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ 58 | fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) 59 | 60 | /** 61 | * @brief Write acl debug level log to host log 62 | * @param [in] fmt: the input format string 63 | * @return none 64 | */ 65 | #define ATLAS_LOG_DEBUG(fmt, ...) \ 66 | do{aclAppLog(ACL_DEBUG, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__); \ 67 | fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__);}while(0) 68 | } -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_message.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import struct 4 | import socket 5 | 6 | import atlas_utils.presenteragent.presenter_message_pb2 as pb2 7 | 8 | 9 | def pack_message(msg_name, msg_data): 10 | """Pack message name and data to byte stream""" 11 | buf = msg_data.SerializeToString() 12 | msg_body_len = len(buf) 13 | msg_name_len = len(msg_name) 14 | msg_total_len = msg_name_len + msg_body_len + 5 15 | data = b'' 16 | msg_total_len = socket.htonl(msg_total_len) 17 | pack_data = struct.pack('IB', msg_total_len, msg_name_len) 18 | data += pack_data 19 | data += msg_name.encode() 20 | data += buf 21 | 22 | return data 23 | 24 | 25 | def open_channel_request(channel_name, content_type): 26 | """Create open channel request message""" 27 | request = pb2.OpenChannelRequest() 28 | request.channel_name = channel_name 29 | request.content_type = content_type 30 | 31 | return pack_message(pb2._OPENCHANNELREQUEST.full_name, request) 32 | 33 | 34 | def image_frame_request( 35 | image_width, 36 | image_height, 37 | image_data, 38 | detection_result): 39 | """Create image frame request message""" 40 | request = pb2.PresentImageRequest() 41 | request.format = 0 42 | request.width = image_width 43 | request.height = image_height 44 | request.data = image_data 45 | for i in range(0, len(detection_result)): 46 | myadd = request.rectangle_list.add() 47 | myadd.left_top.x = detection_result[i].box.lt.x 48 | myadd.left_top.y = detection_result[i].box.lt.y 49 | myadd.right_bottom.x = detection_result[i].box.rb.x 50 | myadd.right_bottom.y = detection_result[i].box.rb.y 51 | myadd.label_text = detection_result[i].result_text 52 | 53 | return pack_message(pb2._PRESENTIMAGEREQUEST.full_name, request) 54 | 55 | 56 | def heartbeat_message(): 57 | """Create headbeat message""" 58 | return pack_message( 59 | pb2._HEARTBEATMESSAGE.full_name, 60 | pb2.HeartbeatMessage()) 61 | 62 | 63 | def is_open_channel_response(msg_name): 64 | """Confirm the message is open channel response or not""" 65 | return (msg_name == pb2._OPENCHANNELRESPONSE.full_name) 66 | 67 | 68 | def is_image_frame_response(msg_name): 69 | """Confirm the message is image frame response or not""" 70 | return (msg_name == pb2._PRESENTIMAGERESPONSE.full_name) 71 | -------------------------------------------------------------------------------- /atlas_utils/lib/src/camera.h: -------------------------------------------------------------------------------- 1 | /** 2 | * ============================================================================ 3 | * 4 | * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * 1 Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * 12 | * 2 Redistributions in binary form must reproduce the above copyright notice, 13 | * this list of conditions and the following disclaimer in the documentation 14 | * and/or other materials provided with the distribution. 15 | * 16 | * 3 Neither the names of the copyright holders nor the names of the 17 | * contributors may be used to endorse or promote products derived from this 18 | * software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | * ============================================================================ 32 | */ 33 | #ifndef _CAMERA_H 34 | #define _CAMERA_H 35 | 36 | #define CAMERA_NUM (2) 37 | 38 | #define CAMERA(i) (g_CameraMgr.cap[i]) 39 | 40 | const int ATLAS_OK = 0; 41 | const int ATLAS_ERROR = 1; 42 | 43 | struct CameraOutput { 44 | int size; 45 | uint8_t* data; 46 | }; 47 | 48 | struct Camera { 49 | bool inited = false; 50 | int id = 255; 51 | int fps = 0; 52 | int width = 0; 53 | int height = 0; 54 | int frameSize = 0; 55 | }; 56 | 57 | struct CameraManager { 58 | bool hwInited = 0; 59 | Camera cap[CAMERA_NUM]; 60 | }; 61 | 62 | #endif 63 | -------------------------------------------------------------------------------- /atlas_utils/lib/src/Makefile: -------------------------------------------------------------------------------- 1 | TOPDIR := $(patsubst %,%,$(CURDIR)) 2 | 3 | ifndef DDK_PATH 4 | $(error "Can not find DDK_PATH env, please set it in environment!.") 5 | endif 6 | 7 | ifeq ($(mode),) 8 | mode=AtlasDK 9 | endif 10 | 11 | ifeq ($(mode), AtlasDK) 12 | CC := aarch64-linux-gnu-g++ 13 | arch := arm 14 | OUT_DIR = ../atlas200dk/ 15 | else ifeq ($(mode), ASIC) 16 | CC := g++ 17 | 18 | arch_local=$(shell arch) 19 | $(warning "arch: "$(arch_local)) 20 | ifeq ($(arch_local), aarch64) 21 | arch := arm 22 | else 23 | arch := x86 24 | endif 25 | OUT_DIR = ../asic/$(arch) 26 | else 27 | $(error "Unsupported mode: "$(mode)", please input: AtlasDK or ASIC.") 28 | endif 29 | 30 | NPU_HOST_LIB = $(DDK_PATH)/acllib/lib64/stub/ 31 | 32 | LOCAL_MODULE_NAME := libatlasutil.so 33 | 34 | LOCAL_DIR := . 35 | OBJ_DIR = $(OUT_DIR)/obj 36 | DEPS_DIR = $(OUT_DIR)/deps 37 | LOCAL_LIBRARY=$(OUT_DIR)/$(LOCAL_MODULE_NAME) 38 | OUT_INC_DIR = $(OUT_DIR)/include 39 | 40 | INC_DIR = \ 41 | -I./ \ 42 | -I../include \ 43 | -I$(HOME)/Ascend/driver/ \ 44 | -I$(HOME)/ascend_ddk/$(arch)/include/ \ 45 | -I$(HOME)/ascend_ddk/$(arch)/include/ascenddk/ \ 46 | -I$(HOME)/ascend_ddk/$(arch)/include/ascenddk/presenter/agent/ \ 47 | -I$(DDK_PATH)/acllib/include/ \ 48 | -I$(DDK_PATH)/atc/include/protobuf 49 | 50 | CC_FLAGS := $(INC_DIR) -DENABLE_DVPP_INTERFACE -std=c++11 -fPIC -Wall -O2 51 | LNK_FLAGS := \ 52 | -Wl,-rpath-link=$(NPU_HOST_LIB) \ 53 | -Wl,-rpath-link=$(HOME)/ascend_ddk/$(arch)/lib \ 54 | -L$(NPU_HOST_LIB) \ 55 | -L$(HOME)/ascend_ddk/$(arch)/lib \ 56 | -lascendcl \ 57 | -lacl_dvpp \ 58 | -lstdc++ \ 59 | -lpthread \ 60 | -shared 61 | 62 | ifeq ($(mode), AtlasDK) 63 | LNK_FLAGS += -L$(HOME)/Ascend/driver -lmedia_mini 64 | endif 65 | 66 | SRCS_ALL := $(patsubst $(LOCAL_DIR)/%.cpp, %.cpp, $(shell find $(LOCAL_DIR) -name "*.cpp")) 67 | ifeq ($(mode), AtlasDK) 68 | SRCS := $(SRCS_ALL) 69 | else 70 | SRCS := $(subst camera.cpp, ,$(SRCS_ALL)) 71 | endif 72 | 73 | OBJS := $(addprefix $(OBJ_DIR)/, $(patsubst %.cpp, %.o,$(SRCS))) 74 | 75 | ALL_OBJS := $(OBJS) 76 | 77 | all: do_pre_build do_build 78 | 79 | do_pre_build: 80 | $(Q)echo - do [$@] 81 | $(Q)mkdir -p $(OBJ_DIR) 82 | 83 | do_build: $(LOCAL_LIBRARY) | do_pre_build 84 | $(Q)echo - do [$@] 85 | 86 | $(LOCAL_LIBRARY): $(ALL_OBJS) 87 | $(Q)echo [LD] $@ 88 | $(Q)$(CC) $(CC_FLAGS) -o $@ $^ -Wl,--whole-archive -Wl,--no-whole-archive -Wl,--start-group -Wl,--end-group -Wl,-rpath='/home/HwHiAiUser/HIAI_PROJECTS/ascend_lib' $(LNK_FLAGS) 89 | 90 | 91 | $(OBJS): $(OBJ_DIR)/%.o : %.cpp | do_pre_build 92 | $(Q)echo [CC] $@ 93 | $(Q)mkdir -p $(dir $@) 94 | $(Q)$(CC) $(CC_FLAGS) $(INC_DIR) -c -fstack-protector-all $< -o $@ 95 | 96 | 97 | clean: 98 | rm -rf $(OUT_DIR)/* 99 | -------------------------------------------------------------------------------- /atlas_utils/acl_logger.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | import acl 5 | 6 | 7 | _ACL_DEBUG = 0 8 | _ACL_INFO = 1 9 | _ACL_WARNING = 2 10 | _ACL_ERROR = 3 11 | 12 | 13 | def log_error(*log_msg): 14 | """Recode error level log to file 15 | Args: 16 | *log_msg: format string and args list 17 | """ 18 | log_str = [str(i) for i in log_msg] 19 | log_str = "".join(log_str) 20 | 21 | print(log_str) 22 | 23 | caller_frame = sys._getframe().f_back 24 | # caller file 25 | filename = caller_frame.f_code.co_filename 26 | # caller line no 27 | line_no = caller_frame.f_lineno 28 | # caller function 29 | func_name = caller_frame.f_code.co_name 30 | 31 | message = "[" + filename + ":" + str(line_no) + \ 32 | " " + func_name + "]" + log_str 33 | acl.app_log(_ACL_ERROR, message) 34 | 35 | 36 | def log_warning(*log_msg): 37 | """Recode warning level log to file 38 | Args: 39 | *log_msg: format string and args list 40 | """ 41 | log_str = [str(i) for i in log_msg] 42 | log_str = "".join(log_str) 43 | caller_frame = sys._getframe().f_back 44 | # caller file 45 | filename = caller_frame.f_code.co_filename 46 | # caller line no 47 | line_no = caller_frame.f_lineno 48 | # caller function 49 | func_name = caller_frame.f_code.co_name 50 | 51 | message = "[" + filename + ":" + str(line_no) + \ 52 | " " + func_name + "]" + log_str 53 | acl.app_log(_ACL_WARNING, message) 54 | 55 | 56 | def log_info(*log_msg): 57 | """Recode info level log to file 58 | Args: 59 | *log_msg: format string and args list 60 | """ 61 | log_str = [str(i) for i in log_msg] 62 | log_str = "".join(log_str) 63 | print(log_str) 64 | caller_frame = sys._getframe().f_back 65 | # caller file 66 | filename = caller_frame.f_code.co_filename 67 | # caller line no 68 | line_no = caller_frame.f_lineno 69 | # caller function 70 | func_name = caller_frame.f_code.co_name 71 | 72 | message = "[" + filename + ":" + str(line_no) + \ 73 | " " + func_name + "]" + log_str 74 | acl.app_log(_ACL_INFO, message) 75 | 76 | 77 | def log_debug(*log_msg): 78 | """Recode debug level log to file 79 | Args: 80 | *log_msg: format string and args list 81 | """ 82 | log_str = [str(i) for i in log_msg] 83 | log_str = "".join(log_str) 84 | caller_frame = sys._getframe().f_back 85 | # caller file 86 | filename = caller_frame.f_code.co_filename 87 | # caller line no 88 | line_no = caller_frame.f_lineno 89 | # caller function 90 | func_name = caller_frame.f_code.co_name 91 | 92 | message = "[" + filename + ":" + str(line_no) + \ 93 | " " + func_name + "]" + log_str 94 | 95 | acl.app_log(_ACL_DEBUG, message) 96 | -------------------------------------------------------------------------------- /atlas_utils/camera.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | # 4 | from ctypes import * 5 | import os 6 | import time 7 | import sys 8 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 9 | sys.path.append(BASE_DIR) 10 | 11 | from atlas_utils.lib.atlasutil_so import libatlas 12 | import atlas_utils.constants as const 13 | from atlas_utils.acl_image import AclImage 14 | from atlas_utils.acl_logger import log_error, log_info 15 | 16 | CAMERA_OK = 0 17 | CAMERA_ERROR = 1 18 | 19 | CAMERA_CLOSED = 0 20 | CAMERA_OPENED = 1 21 | 22 | 23 | class CameraOutputC(Structure): 24 | """Ctypes parameter object for frame data""" 25 | _fields_ = [ 26 | ('size', c_int), 27 | ('data', POINTER(c_ubyte)) 28 | ] 29 | 30 | 31 | class Camera(object): 32 | """Atlas200dk board camera access class""" 33 | def __init__(self, camera_id, fps=20, size=(1280, 720)): 34 | """Create camera instance 35 | Args: 36 | camera_id: camera slot 37 | fps: frame per second 38 | size: frame resolution 39 | """ 40 | self._id = camera_id 41 | self._fps = fps 42 | self._width = size[0] 43 | self._height = size[1] 44 | self._size = int(self._width * self._height * 3 / 2) 45 | self._status = CAMERA_CLOSED 46 | if CAMERA_OK == self._open(): 47 | self._status = CAMERA_OPENED 48 | else: 49 | log_error("Open camera %d failed" % (camera_id)) 50 | 51 | def _open(self): 52 | ret = libatlas.OpenCameraEx(self._id, self._fps, 53 | self._width, self._height) 54 | if (ret != CAMERA_OK): 55 | log_error("Open camera %d failed ,ret = %d" % (self._id, ret)) 56 | return CAMERA_ERROR 57 | self._status = CAMERA_OPENED 58 | return CAMERA_OK 59 | 60 | def is_opened(self): 61 | """Camera is opened or not""" 62 | return (self._status == CAMERA_OPENED) 63 | 64 | def read(self): 65 | """Read frame from camera""" 66 | frame_data = CameraOutputC() 67 | ret = libatlas.ReadCameraFrame(self._id, byref(frame_data)) 68 | if (ret != CAMERA_OK): 69 | log_error("Read camera %d failed" % (self._id)) 70 | return None 71 | 72 | return AclImage( 73 | addressof(frame_data.data.contents), 74 | self._width, 75 | self._height, 76 | self._size, 77 | const.MEMORY_DVPP) 78 | 79 | def close(self): 80 | """Close camera""" 81 | log_info("Close camera ", self._id) 82 | libatlas.CloseCameraEx(self._id) 83 | 84 | def __del__(self): 85 | self.close() 86 | 87 | 88 | if __name__ == "__main__": 89 | cap = Camera(camera_id=0, fps=20, size=(1280, 720)) 90 | 91 | start = time.time() 92 | for i in range(0, 100): 93 | image = cap.read() 94 | print("Read 100 frame exhaust ", time.time() - start) 95 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_agent.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import time 4 | from threading import Thread 5 | import sys 6 | 7 | from atlas_utils.acl_logger import log_error, log_info 8 | from atlas_utils.presenteragent.socket_client import AgentSocket 9 | import atlas_utils.presenteragent.presenter_message as pm 10 | import atlas_utils.presenteragent.presenter_datatype as datatype 11 | 12 | 13 | class PresenterAgent(object): 14 | """Message proxy to presenter server""" 15 | def __init__(self, server_ip, port): 16 | self.socket = AgentSocket(server_ip, port) 17 | self._closed = False 18 | self.heart_beat_thread = None 19 | 20 | def connect_server(self): 21 | """Connect presenter server""" 22 | return self.socket.connect() 23 | 24 | def start_heard_beat_thread(self): 25 | """Start thread that send heardbeat messages""" 26 | self.heart_beat_thread = Thread(target=self._keep_alive) 27 | self.heart_beat_thread.start() 28 | 29 | def _keep_alive(self): 30 | msg = pm.heartbeat_message() 31 | 32 | while True: 33 | if self._closed: 34 | log_error("Heard beat thread exit") 35 | break 36 | 37 | self.socket.send_msg(msg) 38 | time.sleep(2) 39 | 40 | def exit(self): 41 | """Proxy exit""" 42 | self.socket.close() 43 | self._closed = True 44 | 45 | 46 | def StartPresenterAgent( 47 | msg_queue, 48 | server_ip, 49 | port, 50 | open_status, 51 | data_respone_counter): 52 | """Startup presenter agent""" 53 | agent = PresenterAgent(server_ip, port) 54 | ret = agent.connect_server() 55 | if ret: 56 | log_error("Connect server failed, ret =", ret) 57 | return 58 | 59 | open_status.value = datatype.STATUS_CONNECTED 60 | 61 | while True: 62 | data = msg_queue.get() 63 | if data is None: 64 | continue 65 | 66 | if isinstance(data, datatype.FinishMsg): 67 | log_info("Receive presenter agent exit notification, queue size ", 68 | msg_queue.qsize()) 69 | time.sleep(0.1) 70 | agent.exit() 71 | break 72 | 73 | agent.socket.send_msg(data) 74 | msg_name, msg_body = agent.socket.recv_msg() 75 | if (msg_name is None) or (msg_body is None): 76 | log_error("Recv invalid message, message name ", msg_name) 77 | continue 78 | 79 | if ((open_status.value == datatype.STATUS_CONNECTED) and 80 | pm.is_open_channel_response(msg_name)): 81 | log_info("Received open channel respone") 82 | open_status.value = datatype.STATUS_OPENED 83 | agent.start_heard_beat_thread() 84 | log_info( 85 | "presenter agent change connect_status to ", 86 | open_status.value) 87 | 88 | if ((open_status.value == datatype.STATUS_OPENED) and 89 | pm.is_image_frame_response(msg_name)): 90 | data_respone_counter.value += 1 91 | #log_info("send ok ", data_respone_counter.value) 92 | -------------------------------------------------------------------------------- /atlas_utils/README.md: -------------------------------------------------------------------------------- 1 | # python atlasutil 使用说明 2 | 3 | ## 使用约束 4 | 5 | 1.本库仅供当前社区开源样例使用,不覆盖ascend平台应用开发的所有场景,不作为用户应用开发的标准库; 6 | 7 | 2.本库仅在Atlas200DK和Atlas300(x86)服务器上做了验证。 8 | 9 | ## C码库编译 10 | 11 | 本库包含Atlas200dk的板载摄像头访问接口,该接口是在C码(lib/src/目录)基础上做的python封装。在Atlas200dk设备上使用本库时,如果对这部分代码有修改,需要重新编译C码。编译依赖libmedia_mini.so, 部署方法参见[环境准备和依赖安装](../../../cplusplus/environment)Atlas200DK基础环境配置部分 12 | 13 | ### 编译步骤 14 | 15 | 1.进入lib/src目录; 16 | 17 | 2.执行编译安装命令: 18 | 19 | ``` 20 | make 21 | ``` 22 | 23 | 编译生成的libatalsutil.so在../atlas200dk/目录下。 24 | 25 | Atlas300上使用本库时不涉及该C码部分,不需要编译 26 | 27 | ## 部署方法 28 | 29 | 执行应用前需要将本库部署到运行环境。 30 | 31 | python atlasutil库依赖pyav, numpy和PIL。在运行环境中需要安装这些第三方库 32 | 33 | ### 安装pyav 34 | 35 | 1. 安装ffmpeg。因为apt-get安装的ffmpeg版本很低,所以需要采用源码编译的方式在运行环境中部署ffmpeg。ffmpeg的编译部署参考[环境准备和依赖安装](../../../cplusplus/environment)安装ffmpeg章节 36 | 37 | 2. 安装其他依赖: 38 | 39 | ``` 40 | apt-get install python3-pip 41 | pip3.6 install --upgrade pip 42 | pip3.6 install Cython 43 | apt-get install pkg-config libxcb-shm0-dev libxcb-xfixes0-dev 44 | cp /home/HwHiAiUser/ascend_ddk//lib/pkgconfig/* /usr/share/pkgconfig/ 45 | ``` 46 | 47 | 其中arch参数在Atlas200dk上使用arm, 即: 48 | 49 | `cp /home/HwHiAiUser/ascend_ddk/arm/lib/pkgconfig/* /usr/share/pkgconfig/` 50 | 51 | 在Atlas300上,根据服务器CPU是arm还是x86_64,分别取arm或者x86 52 | 53 | 3. 源码安装pyav 54 | 55 | ``` 56 | git clone https://gitee.com/mirrors/PyAV.git 57 | cd PyAV 58 | python3.6 setup.py build --ffmpeg-dir=/home/HwHiAiUser/ascend_ddk/ 59 | python3.6 setup.py install 60 | ``` 61 | 62 | arch参数的选择同上 63 | 64 | ``` 65 | 安装过程中常见报错: 66 | 67 | 错误1:apt-get报错Job for nginx.service failed because the control process exited with error code. 68 | 解决方法:将/etc/nginx/sites-enabled/default中 69 | listen 80 default_server; 70 | listen [::]:80 default_server; 71 | 改为: 72 | listen 80; 73 | #listen [::]:80 default_server; 74 | 75 | 错误2:编译PyAv报错 76 | Could not find libavdevice with pkg-config. 77 | Could not find libavfilter with pkg-config. 78 | 解决方法: 79 | 步骤1.确认cp /home/HwHiAiUser/ascend_ddk//lib/pkgconfig/* /usr/share/pkgconfig/ 执行成功 80 | 步骤2.设置环境变量: 81 | export PKG_CONFIG_PATH=/usr/share/pkgconfig/ 82 | ``` 83 | 84 | 4. 测试pyav安装是否成功 85 | 86 | ``` 87 | cd .. 88 | python3.6 89 | import av 90 | ``` 91 | 92 | 注意:不要再PyAv目录下测试,否则报错 93 | 94 | ModuleNotFoundError: No module named 'av._core' 95 | 96 | ### 安装numpy和PIL 97 | 98 | ``` 99 | pip3.6 install numpy 100 | pip3.6 install Pillow 101 | ``` 102 | 103 | ### 安装python atlasutil库 104 | 105 | python atlasutil库以源码方式提供,安装时将atlas_utils目录拷贝到运行环境,并将该路径加入PYTHONPATH环境变量即可。例如将整个samples仓拷贝到运行环境$HOME目录下,在~/.bashrc文件中添加: 106 | 107 | ``` 108 | export PYTHONPATH=$HOME/samples/python/common/:$PYTHONPATH 109 | ``` 110 | 111 | 并保存,然后执行 112 | 113 | ``` 114 | source ~/.bashrc 115 | ``` 116 | 117 | 或者单独将atlas_utils目录拷贝到运行环境$HOME/ascend_ddk/目录下,在~/.bashrc文件中添加: 118 | 119 | ``` 120 | export PYTHONPATH=$HOME/ascend_ddk/:$PYTHONPATH 121 | ``` 122 | 123 | 并保存,然后执行 124 | 125 | ``` 126 | source ~/.bashrc 127 | ``` 128 | 129 | 在应用代码中调用atlasutil库的接口时导入,例如: 130 | 131 | ``` 132 | import atlas_utils.presenteragent.presenter_channel as presenter_channel 133 | 134 | chan = presenter_channel.open_channel(config_file) 135 | ``` 136 | 137 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/socket_client.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import sys 4 | sys.path.append("..") 5 | 6 | import threading 7 | import socket 8 | import time 9 | import struct 10 | 11 | from atlas_utils.acl_logger import log_error, log_info 12 | 13 | class AgentSocket(object): 14 | """Create socket between app and presenter server""" 15 | def __init__(self, server_ip, port): 16 | """Create socket instance 17 | Args: 18 | server_ip: presenter server ip addr 19 | port: connect port of presenter server 20 | """ 21 | self._server_address = (server_ip, port) 22 | self._sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 23 | 24 | def connect(self): 25 | """Create connect with presenter server 26 | Returns: 27 | ret: connect error code, 0 is connect success, otherwise failed 28 | """ 29 | ret = 0 30 | for i in range(0, 5): 31 | ret = self._sock_client.connect_ex(self._server_address) 32 | if ret == 0: 33 | break 34 | time.sleep(0.2) 35 | return ret 36 | 37 | def _read_socket(self, read_len): 38 | has_read_len = 0 39 | read_buf = b'' 40 | total_buf = b'' 41 | 42 | while has_read_len != read_len: 43 | try: 44 | read_buf = self._sock_client.recv(read_len - has_read_len) 45 | except socket.error: 46 | log_error("Read socket failed, error ", socket.error) 47 | return False, None 48 | if read_buf == b'': 49 | return False, None 50 | total_buf += read_buf 51 | has_read_len = len(total_buf) 52 | 53 | return True, total_buf 54 | 55 | def _read_msg_head(self, read_len): 56 | ret, msg_head = self._read_socket(read_len) 57 | if not ret: 58 | log_error("socket receive msg head null") 59 | return None, None 60 | 61 | # in Struct(), 'I' is unsigned int, 'B' is unsigned char 62 | msg_head_data = struct.Struct('IB') 63 | (msg_total_len, msg_name_len) = msg_head_data.unpack(msg_head) 64 | msg_total_len = socket.ntohl(msg_total_len) 65 | 66 | return msg_total_len, msg_name_len 67 | 68 | def _read_msg_name(self, msg_name_len): 69 | ret, msg_name = self._read_socket(msg_name_len) 70 | 71 | if not ret: 72 | log_error("Socket receive msg but name is null") 73 | return False, None 74 | try: 75 | msg_name = msg_name.decode("utf-8") 76 | except: 77 | log_error("Msg name decode to utf-8 error") 78 | return False, None 79 | 80 | return True, msg_name 81 | 82 | def _read_msg_body(self, msg_body_len): 83 | ret, msg_body = self._read_socket(msg_body_len) 84 | if not ret: 85 | log_error("Socket receive msg but body null") 86 | return False, None 87 | return True, msg_body 88 | 89 | def recv_msg(self): 90 | """Receive message from presenter server 91 | Returns: 92 | msg_name: received message name 93 | msg_body: received message data 94 | """ 95 | # Step1: read msg head 96 | msg_total_len, msg_name_len = self._read_msg_head(5) 97 | if msg_total_len is None: 98 | log_error("msg total len is None.") 99 | return None, None 100 | 101 | # Step2: read msg name 102 | ret, msg_name = self._read_msg_name(msg_name_len) 103 | if not ret: 104 | return None, None 105 | 106 | # Step3: read msg body 107 | msg_body_len = msg_total_len - 5 - msg_name_len 108 | if msg_body_len < 0: 109 | log_error("msg total len is 0") 110 | return None, None 111 | ret, msg_body = self._read_msg_body(msg_body_len) 112 | if not ret: 113 | return None, None 114 | 115 | return msg_name, msg_body 116 | 117 | def send_msg(self, data): 118 | """Send message to presenter server 119 | Args: 120 | data: message data 121 | Returns: 122 | 0 send success 123 | 1 send failed 124 | """ 125 | try: 126 | self._sock_client.sendall(data) 127 | except: 128 | log_error("Send msg failed") 129 | return 1 130 | return 0 131 | 132 | def close(self): 133 | """Close connect""" 134 | self._sock_client.shutdown(socket.SHUT_RDWR) 135 | self._sock_client.close() 136 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_channel.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import time 4 | import configparser 5 | from multiprocessing import Process, Queue, Manager 6 | import queue 7 | import numpy as np 8 | import sys 9 | # sys.path.append("..") 10 | 11 | import acl 12 | import atlas_utils.constants as const 13 | from atlas_utils.acl_logger import log_error, log_info 14 | from atlas_utils.acl_image import AclImage 15 | 16 | 17 | import atlas_utils.presenteragent.presenter_datatype as dtype 18 | import atlas_utils.presenteragent.presenter_agent as agent 19 | import atlas_utils.presenteragent.presenter_message as pm 20 | 21 | 22 | class PresenterChannel(object): 23 | """Communication channel between presenter agent and server""" 24 | def __init__(self, server_ip, port, name='video', 25 | content_type=dtype.CONTENT_TYPE_VIDEO): 26 | """Create instance""" 27 | self._server_ip = server_ip 28 | self._port = port 29 | self._type = content_type 30 | self._name = name 31 | self.agent_msg_queue = Queue() 32 | self.open_status = Manager().Value('i', dtype.STATUS_DISCONNECT) 33 | self.data_respone_counter = Manager().Value('i', 0) 34 | self._send_counter = 0 35 | 36 | def startup(self): 37 | """Create channel and connect with presenter server 38 | Returns: 39 | 0 connect success 40 | 1 connect failed 41 | """ 42 | agent_process = Process( 43 | target=agent.StartPresenterAgent, 44 | args=( 45 | self.agent_msg_queue, 46 | self._server_ip, 47 | self._port, 48 | self.open_status, 49 | self.data_respone_counter)) 50 | agent_process.start() 51 | time.sleep(0.5) 52 | self._send_open_channel_request(self._name, self._type) 53 | return self._wait_open_status(dtype.STATUS_OPENED) 54 | 55 | def _wait_open_status(self, listen_status): 56 | ret = dtype.STATUS_ERROR 57 | for i in range(0, 100): 58 | time.sleep(0.001) 59 | if self.open_status.value == listen_status: 60 | log_info("Open status is %d now" % (listen_status)) 61 | ret = dtype.STATUS_OK 62 | break 63 | return ret 64 | 65 | def send_message(self, data): 66 | """Send message to presenter server""" 67 | self.agent_msg_queue.put(data) 68 | self._send_counter += 1 69 | 70 | def _send_open_channel_request(self, channel_name, content_type): 71 | request_msg = pm.open_channel_request(channel_name, content_type) 72 | self.send_message(request_msg) 73 | 74 | def send_detection_data(self, image_width, image_height, 75 | image, detection_result): 76 | """Send image frame request to presenter server""" 77 | image_data = None 78 | if isinstance(image, AclImage): 79 | image_data = image.nparray() 80 | elif isinstance(image, np.ndarray): 81 | image_data = image 82 | else: 83 | log_error("Invalid data to send, ", image) 84 | return False 85 | 86 | request_msg = pm.image_frame_request(image_width, image_height, 87 | image_data.tobytes(), 88 | detection_result) 89 | self.send_message(request_msg) 90 | 91 | return True 92 | 93 | def send_image(self, image_width, image_height, image): 94 | """Send image frame request that only has image to presenter server""" 95 | detection_result = [] 96 | return self.send_detection_data(image_width, image_height, 97 | image, detection_result) 98 | 99 | def _send_heart_beat_message(self): 100 | msg = pm.heartbeat_message() 101 | self.send_message(msg) 102 | 103 | def close(self): 104 | """Close channel""" 105 | if self.open_status == dtype.STATUS_EXITTED: 106 | return 107 | 108 | log_info("Presenter channel close...") 109 | eos = dtype.FinishMsg("exit") 110 | self.send_message(eos) 111 | while self.agent_msg_queue.qsize() > 0: 112 | time.sleep(0.001) 113 | self.open_status = dtype.STATUS_EXITTED 114 | 115 | def __del__(self): 116 | self.close() 117 | 118 | 119 | def get_channel_config(config_file): 120 | """Get connect parameters from config file""" 121 | config = configparser.ConfigParser() 122 | config.read(config_file) 123 | presenter_server_ip = config['baseconf']['presenter_server_ip'] 124 | port = int(config['baseconf']['presenter_server_port']) 125 | channel_name = config['baseconf']['channel_name'] 126 | content_type = int(config['baseconf']['content_type']) 127 | 128 | log_info( 129 | "presenter server ip %s, port %d, channel name %s, " 130 | "type %d" % 131 | (presenter_server_ip, port, channel_name, content_type)) 132 | return presenter_server_ip, port, channel_name, content_type 133 | 134 | 135 | def open_channel(config_file): 136 | """Connect with presenter server""" 137 | server_ip, port, channel_name, content_type = get_channel_config( 138 | config_file) 139 | channel = PresenterChannel(server_ip, port, channel_name, content_type) 140 | ret = channel.startup() 141 | if ret: 142 | log_error("ERROR:Open channel failed") 143 | return None 144 | return channel 145 | -------------------------------------------------------------------------------- /script/yolo_onnx_opt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import onnx 3 | import onnxsim 4 | 5 | ONNX_MODEL_PATH = r'../model/yolox_nano.onnx' 6 | ONNX_OPT_SAVE_PATH = os.path.splitext(ONNX_MODEL_PATH)[0] + '_simple.onnx' 7 | 8 | 9 | def add_node_inputs(in_graph, node_idx, input_name=None, input_shape=None): 10 | input_info = onnx.helper.make_tensor_value_info(input_name, elem_type=onnx.TensorProto.FLOAT, shape=input_shape) 11 | in_graph.input.insert(0, input_info) 12 | in_graph.node[node_idx].input[0] = input_name 13 | return in_graph 14 | 15 | 16 | def add_node_outputs(in_graph, node_idx, output_name=None, output_shape=None): 17 | output_info = onnx.helper.make_tensor_value_info(output_name, elem_type=onnx.TensorProto.FLOAT, shape=output_shape) 18 | in_graph.output.insert(0, output_info) 19 | in_graph.node[node_idx].output[0] = output_name 20 | return in_graph 21 | 22 | 23 | def cut_input_nodes(in_graph, input_nodes_names=None, input_names=None): 24 | nodes = in_graph.node 25 | graph_input_num = len(in_graph.input) 26 | # 重新设置graph input的names 27 | input_names_iter = iter(input_names) 28 | # 将node name与idx对应 29 | nodes_dict = {nodes[i].name: i for i in range(len(nodes))} 30 | # 将node output与idx对应 31 | nodes_outputs_dict = {nodes[i].output[0]: i for i in range(len(nodes))} 32 | # 初始化del_idxs,记录nodes所需修改 33 | # 0=不修改,1=删除,2=保留节点,设置输入shape和naame 34 | nodes_names = nodes_dict.keys() 35 | del_idxs = [0] * len(nodes_names) 36 | # 遍历需要去除的input_nodes,将他们的状态写到del_idxs 37 | for input_node_name in input_nodes_names: 38 | assert input_node_name in nodes_names, 'input node name %f is not in input onnx model' % input_node_name 39 | input_node_idx = nodes_dict[input_node_name] 40 | del_idxs[input_node_idx] = 2 41 | for node in nodes[input_node_idx::-1]: 42 | for node_input in node.input: 43 | if node_input in nodes_outputs_dict: 44 | if del_idxs[input_node_idx] == 2 or del_idxs[input_node_idx] == 1: 45 | del_idxs[nodes_outputs_dict[node_input]] = 1 46 | 47 | # 反向遍历所有nodes,将del_idxs标明需要修改的nodes按状态进行修改 48 | for i in range(len(del_idxs) - 1, -1, -1): 49 | if del_idxs[i] == 0: 50 | continue 51 | elif del_idxs[i] == 1: 52 | # 删掉input_nodes前的所有节点 53 | nodes.remove(nodes[i]) 54 | elif del_idxs[i] == 2: 55 | # 记录input_nodes的output shape,然后设置新input name和tensor 56 | node_out_dim_info = in_graph.value_info[i-graph_input_num].type.tensor_type.shape.dim 57 | input_shape = [n.dim_value for n in node_out_dim_info] 58 | input_name = next(input_names_iter) 59 | # 给新的input node设定输入graph.input的name和tensor 60 | in_graph = add_node_inputs(in_graph, node_idx=i, input_name=input_name, input_shape=input_shape) 61 | 62 | new_nodes_inputs = [nodes[i].input[0] for i in range(len(nodes))] 63 | for inp in in_graph.input: 64 | if inp.name not in new_nodes_inputs: 65 | in_graph.input.remove(inp) 66 | 67 | return in_graph 68 | 69 | 70 | def cut_output_nodes(in_graph, output_nodes_names=None): 71 | nodes = in_graph.node 72 | # 将node name与idx对应 73 | nodes_dict = {nodes[i].name: i for i in range(len(nodes))} 74 | # 将node output与idx对应 75 | nodes_outputs_dict = {nodes[i].output[0]: i for i in range(len(nodes))} 76 | # 初始化del_idxs,记录nodes所需修改 77 | # 0=不修改,1=删除,2=保留节点 78 | nodes_names = nodes_dict.keys() 79 | del_idxs = [0] * len(nodes_names) 80 | # 遍历需要去除的output_nodes,将他们的状态写到del_idxs 81 | for output_node_name in output_nodes_names: 82 | assert output_node_name in nodes_names, 'input node name %f is not in input onnx model' % output_node_name 83 | output_node_idx = nodes_dict[output_node_name] 84 | del_idxs[output_node_idx] = 2 85 | for node in nodes[output_node_idx:]: 86 | for node_input in node.input: 87 | if node_input in nodes_outputs_dict: 88 | if del_idxs[nodes_outputs_dict[node_input]] == 2 or del_idxs[nodes_outputs_dict[node_input]] == 1: 89 | del_idxs[nodes_dict[node.name]] = 1 90 | 91 | # 反向遍历所有nodes,将del_idxs标明需要修改的nodes按状态进行修改 92 | for i in range(len(del_idxs) - 1, -1, -1): 93 | if del_idxs[i] == 0: 94 | continue 95 | elif del_idxs[i] == 1: 96 | # 删掉output_nodes后的所有节点 97 | nodes.remove(nodes[i]) 98 | 99 | new_nodes_outputs = [nodes[i].output[0] for i in range(len(nodes))] 100 | for output in in_graph.output: 101 | if output.name not in new_nodes_outputs: 102 | in_graph.output.remove(output) 103 | 104 | return in_graph 105 | 106 | 107 | if __name__ == '__main__': 108 | yolox_opt = True 109 | print('========== Step 0. Load the onnx model ========') 110 | model = onnx.load(ONNX_MODEL_PATH) 111 | print('Done') 112 | print('========== Step 1. Simplify the onnx model ========') 113 | model_sim, ret = onnxsim.simplify(model) 114 | assert ret, 'Failed on simplifying the model' 115 | print('Done') 116 | graph = model_sim.graph 117 | print('========== Step 2. Remove the input nodes and add inputs ========') 118 | new_graph = cut_input_nodes(graph, 119 | input_nodes_names=['Conv_41'], 120 | input_names=['img_input']) 121 | print('Done') 122 | print('========== Step 3. Remove the output nodes and add outputs ========') 123 | if not yolox_opt: 124 | new_graph = cut_output_nodes(new_graph, 125 | output_nodes_names=['Transpose_570']) 126 | print('Done') 127 | print('========== Step 4. Save graph ========') 128 | onnx.checker.check_model(model_sim) 129 | onnx.save(model_sim, ONNX_OPT_SAVE_PATH) 130 | print('Done') 131 | -------------------------------------------------------------------------------- /atlas_utils/lib/src/camera.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * ============================================================================ 3 | * 4 | * Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions are met: 8 | * 9 | * 1 Redistributions of source code must retain the above copyright notice, 10 | * this list of conditions and the following disclaimer. 11 | * 12 | * 2 Redistributions in binary form must reproduce the above copyright notice, 13 | * this list of conditions and the following disclaimer in the documentation 14 | * and/or other materials provided with the distribution. 15 | * 16 | * 3 Neither the names of the copyright holders nor the names of the 17 | * contributors may be used to endorse or promote products derived from this 18 | * software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | * POSSIBILITY OF SUCH DAMAGE. 31 | * ============================================================================ 32 | */ 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include "acl/acl.h" 42 | #include "acl/ops/acl_dvpp.h" 43 | #include "atlas_utils.h" 44 | #include "camera.h" 45 | 46 | using namespace std; 47 | 48 | extern "C" { 49 | #include "peripheral_api.h" 50 | #include "camera.h" 51 | CameraManager g_CameraMgr; 52 | 53 | void HwInit() { 54 | if (!g_CameraMgr.hwInited) { 55 | MediaLibInit(); 56 | g_CameraMgr.hwInited = 1; 57 | } 58 | } 59 | 60 | int CameraInit(int id, int fps, int width, int height) { 61 | Camera& cap = CAMERA(id); 62 | cap.frameSize = YUV420SP_SIZE(width, height); 63 | cap.id = id; 64 | cap.fps = fps; 65 | cap.width = width; 66 | cap.height = height; 67 | cap.inited = true; 68 | 69 | return ATLAS_OK; 70 | } 71 | 72 | int ConfigCamera(int id, int fps, int width, int height) { 73 | int ret = SetCameraProperty(id, CAMERA_PROP_FPS, &fps); 74 | if (ret == LIBMEDIA_STATUS_FAILED) { 75 | ATLAS_LOG_ERROR("Set camera fps failed"); 76 | return ATLAS_ERROR; 77 | } 78 | 79 | CameraResolution resolution; 80 | resolution.width = width; 81 | resolution.height = height; 82 | ret = SetCameraProperty(id, CAMERA_PROP_RESOLUTION, &resolution); 83 | if (ret == LIBMEDIA_STATUS_FAILED) { 84 | ATLAS_LOG_ERROR("Set camera resolution failed"); 85 | return ATLAS_ERROR; 86 | } 87 | 88 | CameraCapMode mode = CAMERA_CAP_ACTIVE; 89 | ret = SetCameraProperty(id, CAMERA_PROP_CAP_MODE, &mode); 90 | if (ret == LIBMEDIA_STATUS_FAILED) { 91 | ATLAS_LOG_ERROR("Set camera mode:%d failed", mode); 92 | return ATLAS_ERROR; 93 | } 94 | 95 | return ATLAS_OK; 96 | } 97 | 98 | int OpenCameraEx(int id, int fps, int width, int height) { 99 | if ((id < 0) || (id >= CAMERA_NUM)) { 100 | ATLAS_LOG_ERROR("Open camera failed for invalid id %d", id); 101 | return ATLAS_ERROR; 102 | } 103 | 104 | HwInit(); 105 | 106 | CameraStatus status = QueryCameraStatus(id); 107 | if (status == CAMERA_STATUS_CLOSED){ 108 | // Open Camera 109 | if (LIBMEDIA_STATUS_FAILED == OpenCamera(id)) { 110 | ATLAS_LOG_ERROR("Camera%d closed, and open failed.", id); 111 | return ATLAS_ERROR; 112 | } 113 | } else if (status != CAMERA_STATUS_OPEN) { 114 | //如果摄像头状态不是close状态也不是open状态,则认为摄像头异常 115 | ATLAS_LOG_ERROR("Invalid camera%d status %d", id, status); 116 | return ATLAS_ERROR; 117 | } 118 | 119 | //Set camera property 120 | if (ATLAS_OK != ConfigCamera(id, fps, width, height)) { 121 | CloseCamera(id); 122 | ATLAS_LOG_ERROR("Set camera%d property failed", id); 123 | return ATLAS_ERROR; 124 | } 125 | 126 | if (!CAMERA(id).inited) { 127 | CameraInit(id, fps, width, height); 128 | } 129 | 130 | ATLAS_LOG_INFO("Open camera %d success", id); 131 | 132 | return ATLAS_OK; 133 | } 134 | 135 | int ReadCameraFrame(int id, CameraOutput& frame) { 136 | int size = CAMERA(id).frameSize; 137 | void* data = nullptr; 138 | auto aclRet = acldvppMalloc(&data, size); 139 | if (aclRet != ACL_ERROR_NONE) { 140 | ATLAS_LOG_ERROR("acl malloc dvpp data failed, dataSize %d, error %d", 141 | size, aclRet); 142 | return ATLAS_ERROR; 143 | } 144 | 145 | int ret = ReadFrameFromCamera(id, (void*)data, (int *)&size); 146 | if ((ret == LIBMEDIA_STATUS_FAILED) || 147 | (size != CAMERA(id).frameSize)) { 148 | acldvppFree(data); 149 | ATLAS_LOG_ERROR("Get image from camera %d failed, size %d", id, size); 150 | return ATLAS_ERROR; 151 | } 152 | frame.size = size; 153 | frame.data = (uint8_t*)data; 154 | 155 | return ATLAS_OK; 156 | } 157 | 158 | int CloseCameraEx(int cameraId) { 159 | if (LIBMEDIA_STATUS_FAILED == CloseCamera(cameraId)) { 160 | ATLAS_LOG_ERROR("Close camera %d failed", cameraId); 161 | return ATLAS_ERROR; 162 | } 163 | 164 | return ATLAS_OK; 165 | } 166 | 167 | } 168 | -------------------------------------------------------------------------------- /atlas_utils/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (R) @huawei.com, all rights reserved 3 | -*- coding:utf-8 -*- 4 | CREATED: 2020-6-04 20:12:13 5 | MODIFIED: 2020-6-06 14:04:45 6 | """ 7 | SUCCESS = 0 8 | FAILED = 1 9 | 10 | ACL_DEVICE = 0 11 | ACL_HOST = 1 12 | 13 | MEMORY_NORMAL = 0 14 | MEMORY_HOST = 1 15 | MEMORY_DEVICE = 2 16 | MEMORY_DVPP = 3 17 | MEMORY_CTYPES = 4 18 | 19 | IMAGE_DATA_NUMPY = 0 20 | IMAGE_DATA_BUFFER = 1 21 | 22 | READ_VIDEO_OK = 0 23 | 24 | # error code 25 | ACL_ERROR_NONE = 0 26 | ACL_ERROR_INVALID_PARAM = 100000 27 | ACL_ERROR_UNINITIALIZE = 100001 28 | ACL_ERROR_REPEAT_INITIALIZE = 100002 29 | ACL_ERROR_INVALID_FILE = 100003 30 | ACL_ERROR_WRITE_FILE = 100004 31 | ACL_ERROR_INVALID_FILE_SIZE = 100005 32 | ACL_ERROR_PARSE_FILE = 100006 33 | ACL_ERROR_FILE_MISSING_ATTR = 100007 34 | ACL_ERROR_FILE_ATTR_INVALID = 100008 35 | ACL_ERROR_INVALID_DUMP_CONFIG = 100009 36 | ACL_ERROR_INVALID_PROFILING_CONFIG = 100010 37 | ACL_ERROR_INVALID_MODEL_ID = 100011 38 | ACL_ERROR_DESERIALIZE_MODEL = 100012 39 | ACL_ERROR_PARSE_MODEL = 100013 40 | ACL_ERROR_READ_MODEL_FAILURE = 100014 41 | ACL_ERROR_MODEL_SIZE_INVALID = 100015 42 | ACL_ERROR_MODEL_MISSING_ATTR = 100016 43 | ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017 44 | ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018 45 | ACL_ERROR_MODEL_NOT_DYNAMIC = 100019 46 | ACL_ERROR_OP_TYPE_NOT_MATCH = 100020 47 | ACL_ERROR_OP_INPUT_NOT_MATCH = 100021 48 | ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022 49 | ACL_ERROR_OP_ATTR_NOT_MATCH = 100023 50 | ACL_ERROR_OP_NOT_FOUND = 100024 51 | ACL_ERROR_OP_LOAD_FAILED = 100025 52 | ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026 53 | ACL_ERROR_FORMAT_NOT_MATCH = 100027 54 | ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028 55 | ACL_ERROR_KERNEL_NOT_FOUND = 100029 56 | ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030 57 | ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031 58 | ACL_ERROR_INVALID_QUEUE_ID = 100032 59 | ACL_ERROR_REPEAT_SUBSCRIBE = 100033 60 | ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034 61 | ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035 62 | ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036 63 | ACL_ERROR_REPEAT_FINALIZE = 100037 64 | ACL_ERROR_BAD_ALLOC = 200000 65 | ACL_ERROR_API_NOT_SUPPORT = 200001 66 | ACL_ERROR_INVALID_DEVICE = 200002 67 | ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003 68 | ACL_ERROR_RESOURCE_NOT_MATCH = 200004 69 | ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005 70 | ACL_ERROR_STORAGE_OVER_LIMIT = 300000 71 | ACL_ERROR_INTERNAL_ERROR = 500000 72 | ACL_ERROR_FAILURE = 500001 73 | ACL_ERROR_GE_FAILURE = 500002 74 | ACL_ERROR_RT_FAILURE = 500003 75 | ACL_ERROR_DRV_FAILURE = 500004 76 | # rule for mem 77 | ACL_MEM_MALLOC_HUGE_FIRST = 0 78 | ACL_MEM_MALLOC_HUGE_ONLY = 1 79 | ACL_MEM_MALLOC_NORMAL_ONLY = 2 80 | # rule for memory copy 81 | ACL_MEMCPY_HOST_TO_HOST = 0 82 | ACL_MEMCPY_HOST_TO_DEVICE = 1 83 | ACL_MEMCPY_DEVICE_TO_HOST = 2 84 | ACL_MEMCPY_DEVICE_TO_DEVICE = 3 85 | # input 86 | LAST_ONE = -1 87 | LAST_TWO = -2 88 | type_dict = { 89 | "bool": 0, 90 | "int8": 1, 91 | "int16": 2, 92 | "int32": 4, 93 | "int64": 8, 94 | "uint8": 1, 95 | "uint16": 2, 96 | "uint32": 4, 97 | "uint64": 8, 98 | "float16": 2, 99 | "float32": 4, 100 | "float64": 8, 101 | "float_": 8 102 | } 103 | NPY_BOOL = 0 104 | NPY_BYTE = 1 105 | NPY_UBYTE = 2 106 | NPY_SHORT = 3 107 | NPY_USHORT = 4 108 | NPY_INT = 5 109 | NPY_UINT = 6 110 | NPY_LONG = 7 111 | NPY_ULONG = 8 112 | NPY_LONGLONG = 9 113 | NPY_ULONGLONG = 10 114 | 115 | ACL_DT_UNDEFINED = -1 116 | ACL_FLOAT = 0 117 | ACL_FLOAT16 = 1 118 | ACL_INT8 = 2 119 | ACL_INT32 = 3 120 | ACL_UINT8 = 4 121 | ACL_INT16 = 6 122 | ACL_UINT16 = 7 123 | ACL_UINT32 = 8 124 | ACL_INT64 = 9 125 | ACL_UINT64 = 10 126 | ACL_DOUBLE = 11 127 | ACL_BOOL = 12 128 | 129 | 130 | # data format 131 | ACL_FORMAT_UNDEFINED = -1 132 | ACL_FORMAT_NCHW = 0 133 | ACL_FORMAT_NHWC = 1 134 | ACL_FORMAT_ND = 2 135 | ACL_FORMAT_NC1HWC0 = 3 136 | ACL_FORMAT_FRACTAL_Z = 4 137 | ACL_DT_UNDEFINED = -1 138 | ACL_FLOAT = 0 139 | ACL_FLOAT16 = 1 140 | ACL_INT8 = 2 141 | ACL_INT32 = 3 142 | ACL_UINT8 = 4 143 | ACL_INT16 = 6 144 | ACL_UINT16 = 7 145 | ACL_UINT32 = 8 146 | ACL_INT64 = 9 147 | ACL_UINT64 = 10 148 | ACL_DOUBLE = 11 149 | ACL_BOOL = 12 150 | acl_dtype = { 151 | "dt_undefined": -1, 152 | "float": 0, 153 | "float16": 1, 154 | "int8": 2, 155 | "int32": 3, 156 | "uint8": 4, 157 | "int16": 6, 158 | "uint16": 7, 159 | "uint32": 8, 160 | "int64": 9, 161 | "double": 11, 162 | "bool": 12 163 | } 164 | ACL_CALLBACK_NO_BLOCK = 0 165 | ACL_CALLBACK_BLOCK = 1 166 | PIXEL_FORMAT_YUV_400 = 0 # 0, YUV400 8bit 167 | PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1 # 1, YUV420SP NV12 8bit 168 | PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2 # 2, YUV420SP NV21 8bit 169 | PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3 # 3, YUV422SP NV12 8bit 170 | PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4 # 4, YUV422SP NV21 8bit 171 | PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5 # 5, YUV444SP NV12 8bit 172 | PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6 # 6, YUV444SP NV21 8bit 173 | PIXEL_FORMAT_YUYV_PACKED_422 = 7 # 7, YUV422P YUYV 8bit 174 | PIXEL_FORMAT_UYVY_PACKED_422 = 8 # 8, YUV422P UYVY 8bit 175 | PIXEL_FORMAT_YVYU_PACKED_422 = 9 # 9, YUV422P YVYU 8bit 176 | PIXEL_FORMAT_VYUY_PACKED_422 = 10 # 10, YUV422P VYUY 8bit 177 | PIXEL_FORMAT_YUV_PACKED_444 = 11 # 11, YUV444P 8bit 178 | PIXEL_FORMAT_RGB_888 = 12 # 12, RGB888 179 | PIXEL_FORMAT_BGR_888 = 13 # 13, BGR888 180 | PIXEL_FORMAT_ARGB_8888 = 14 # 14, ARGB8888 181 | PIXEL_FORMAT_ABGR_8888 = 15 # 15, ABGR8888 182 | PIXEL_FORMAT_RGBA_8888 = 16 # 16, RGBA8888 183 | PIXEL_FORMAT_BGRA_8888 = 17 # 17, BGRA8888 184 | PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18 # 18, YUV420SP 10bit 185 | PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19 # 19, YVU420sp 10bit 186 | PIXEL_FORMAT_YVU_PLANAR_420 = 20 # 20, YUV420P 8bit 187 | # images format 188 | IMG_EXT = ['.jpg', '.JPG', '.png', '.PNG', '.bmp', '.BMP', '.jpeg', '.JPEG'] 189 | 190 | ENCODE_FORMAT_UNKNOW = 0 191 | ENCODE_FORMAT_JPEG = 1 192 | ENCODE_FORMAT_PNG = 2 193 | ENCODE_FORMAT_YUV420_SP = 3 194 | 195 | """ 196 | enType 0 197 | 0 H265 main level 198 | 1 H264 baseline level 199 | 2 H264 main level 200 | 3 H264 high level 201 | """ 202 | ENTYPE_H265_MAIN = 0 203 | ENTYPE_H264_BASE = 1 204 | ENTYPE_H264_MAIN = 2 205 | ENTYPE_H264_HIGH = 3 206 | 207 | # h264 stream codec id 208 | AV_CODEC_ID_H264 = 27 209 | # h265 stream codec id 210 | AV_CODEC_ID_HEVC = 173 211 | # h264 baseline level 212 | FF_PROFILE_H264_BASELINE = 66 213 | # h264 main level profile 214 | FF_PROFILE_H264_MAIN = 77 215 | # h264 high level profile 216 | FF_PROFILE_H264_HIGH = 100 217 | # h265 main level profile 218 | FF_PROFILE_HEVC_MAIN = 1 219 | -------------------------------------------------------------------------------- /atlas_utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import acl 3 | import atlas_utils.constants as const 4 | from atlas_utils.acl_logger import log_error, log_info 5 | import time 6 | 7 | from functools import wraps 8 | DEBUG = True 9 | 10 | 11 | def check_ret(message, ret_int): 12 | """Check int value is 0 or not 13 | Args: 14 | message: output log str 15 | ret_int: check value that type is int 16 | """ 17 | if ret_int != 0: 18 | raise Exception("{} failed ret_int={}" 19 | .format(message, ret_int)) 20 | 21 | 22 | def check_none(message, ret_none): 23 | """Check object is None or not 24 | Args: 25 | message: output log str 26 | ret_none: check object 27 | """ 28 | if ret_none is None: 29 | raise Exception("{} failed" 30 | .format(message)) 31 | 32 | 33 | def copy_data_device_to_host(device_data, data_size): 34 | """Copy device data to host 35 | Args: 36 | device_data: data that to be copyed 37 | data_size: data size 38 | Returns: 39 | None: copy failed 40 | others: host data which copy from device_data 41 | """ 42 | host_buffer, ret = acl.rt.malloc_host(data_size) 43 | if ret != const.ACL_ERROR_NONE: 44 | log_error("Malloc host memory failed, error: ", ret) 45 | return None 46 | 47 | ret = acl.rt.memcpy(host_buffer, data_size, 48 | device_data, data_size, 49 | const.ACL_MEMCPY_DEVICE_TO_HOST) 50 | if ret != const.ACL_ERROR_NONE: 51 | log_error("Copy device data to host memory failed, error: ", ret) 52 | acl.rt.free_host(host_buffer) 53 | return None 54 | 55 | return host_buffer 56 | 57 | 58 | def copy_data_device_to_device(device_data, data_size): 59 | """Copy device data to device 60 | Args: 61 | device_data: data that to be copyed 62 | data_size: data size 63 | Returns: 64 | None: copy failed 65 | others: device data which copy from device_data 66 | """ 67 | device_buffer, ret = acl.rt.malloc(data_size, 68 | const.ACL_MEM_MALLOC_NORMAL_ONLY) 69 | if ret != const.ACL_ERROR_NONE: 70 | log_error("Malloc device memory failed, error: ", ret) 71 | return None 72 | 73 | ret = acl.rt.memcpy(device_buffer, data_size, 74 | device_data, data_size, 75 | const.ACL_MEMCPY_DEVICE_TO_DEVICE) 76 | if ret != const.ACL_ERROR_NONE: 77 | log_error("Copy device data to device memory failed, error: ", ret) 78 | acl.rt.free(device_buffer) 79 | return None 80 | 81 | return device_buffer 82 | 83 | 84 | def copy_data_host_to_device(host_data, data_size): 85 | """Copy host data to device 86 | Args: 87 | host_data: data that to be copyed 88 | data_size: data size 89 | Returns: 90 | None: copy failed 91 | others: device data which copy from host_data 92 | """ 93 | device_buffer, ret = acl.rt.malloc(data_size, 94 | const.ACL_MEM_MALLOC_NORMAL_ONLY) 95 | if ret != const.ACL_ERROR_NONE: 96 | log_error("Malloc device memory failed, error: ", ret) 97 | return None 98 | 99 | ret = acl.rt.memcpy(device_buffer, data_size, 100 | host_data, data_size, 101 | const.ACL_MEMCPY_HOST_TO_DEVICE) 102 | if ret != const.ACL_ERROR_NONE: 103 | log_error("Copy device data to device memory failed, error: ", ret) 104 | acl.rt.free(device_buffer) 105 | return None 106 | 107 | return device_buffer 108 | 109 | 110 | def copy_data_host_to_host(host_data, data_size): 111 | """Copy host data to host 112 | Args: 113 | host_data: data that to be copyed 114 | data_size: data size 115 | Returns: 116 | None: copy failed 117 | others: host data which copy from host_data 118 | """ 119 | host_buffer, ret = acl.rt.malloc_host(data_size) 120 | if ret != const.ACL_ERROR_NONE: 121 | log_error("Malloc host memory failed, error: ", ret) 122 | return None 123 | 124 | ret = acl.rt.memcpy(host_buffer, data_size, 125 | host_data, data_size, 126 | const.ACL_MEMCPY_HOST_TO_HOST) 127 | if ret != const.ACL_ERROR_NONE: 128 | log_error("Copy host data to host memory failed, error: ", ret) 129 | acl.rt.free_host(host_buffer) 130 | return None 131 | 132 | return host_buffer 133 | 134 | 135 | def copy_data_to_dvpp(data, size, run_mode): 136 | """Copy data to dvpp 137 | Args: 138 | data: data that to be copyed 139 | data_size: data size 140 | run_mode: device run mode 141 | Returns: 142 | None: copy failed 143 | others: data which copy from host_data 144 | """ 145 | policy = const.ACL_MEMCPY_HOST_TO_DEVICE 146 | if run_mode == const.ACL_DEVICE: 147 | policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE 148 | 149 | dvpp_buf, ret = acl.media.dvpp_malloc(size) 150 | check_ret("acl.rt.malloc_host", ret) 151 | 152 | ret = acl.rt.memcpy(dvpp_buf, size, data, size, policy) 153 | check_ret("acl.rt.memcpy", ret) 154 | 155 | return dvpp_buf 156 | 157 | 158 | def copy_data_as_numpy(data, size, data_mem_type, run_mode): 159 | """Copy data as numpy array 160 | Args: 161 | data: data that to be copyed 162 | size: data size 163 | data_mem_type: src data memory type 164 | run_mode: device run mode 165 | Returns: 166 | None: copy failed 167 | others: numpy array whoes data copy from host_data 168 | """ 169 | np_data = np.zeros(size, dtype=np.byte) 170 | np_data_ptr = acl.util.numpy_to_ptr(np_data) 171 | 172 | policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE 173 | if run_mode == const.ACL_HOST: 174 | if ((data_mem_type == const.MEMORY_DEVICE) or 175 | (data_mem_type == const.MEMORY_DVPP)): 176 | policy = const.ACL_MEMCPY_DEVICE_TO_HOST 177 | elif data_mem_type == const.MEMORY_HOST: 178 | policy = const.ACL_MEMCPY_HOST_TO_HOST 179 | 180 | ret = acl.rt.memcpy(np_data_ptr, size, data, size, policy) 181 | check_ret("acl.rt.memcpy", ret) 182 | 183 | return np_data 184 | 185 | 186 | def align_up(value, align): 187 | """Align up int value 188 | Args: 189 | value:input data 190 | align: align data 191 | Return: 192 | aligned data 193 | """ 194 | return int(int((value + align - 1) / align) * align) 195 | 196 | 197 | def align_up16(value): 198 | """Align up data with 16 199 | Args: 200 | value:input data 201 | Returns: 202 | 16 aligned data 203 | """ 204 | return align_up(value, 16) 205 | 206 | 207 | def align_up128(value): 208 | """Align up data with 128 209 | Args: 210 | value:input data 211 | Returns: 212 | 128 aligned data 213 | """ 214 | return align_up(value, 128) 215 | 216 | 217 | def align_up2(value): 218 | """Align up data with 2 219 | Args: 220 | value:input data 221 | Returns: 222 | 2 aligned data 223 | """ 224 | return align_up(value, 2) 225 | 226 | 227 | def yuv420sp_size(width, height): 228 | """Calculate yuv420sp image size 229 | Args: 230 | width: image width 231 | height: image height 232 | Returns: 233 | image data size 234 | """ 235 | return int(width * height * 3 / 2) 236 | 237 | 238 | def rgbu8_size(width, height): 239 | """Calculate rgb 24bit image size 240 | Args: 241 | width: image width 242 | height: image height 243 | Returns: 244 | rgb 24bit image data size 245 | """ 246 | return int(width * height * 3) 247 | 248 | 249 | def display_time(func): 250 | """print func execute time""" 251 | @wraps(func) 252 | def wrapper(*args, **kwargs): 253 | """wrapper caller""" 254 | if DEBUG: 255 | btime = time.time() 256 | res = func(*args, **kwargs) 257 | use_time = time.time() - btime 258 | print("in %s, use time:%s" % (func.__name__, use_time)) 259 | return res 260 | else: 261 | return func(*args, **kwargs) 262 | 263 | return wrapper 264 | -------------------------------------------------------------------------------- /atlas_utils/acl_image.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PIL import Image 4 | 5 | import acl 6 | import atlas_utils.utils as utils 7 | import atlas_utils.acl_logger as acl_log 8 | import atlas_utils.constants as const 9 | 10 | 11 | class AclImage(object): 12 | """Image data and operation class 13 | Wrap image data and operation method, support jpeg, png, yuv file and 14 | memory data 15 | 16 | Attributes: 17 | _run_mode: device run mode 18 | _data: image binary data or numpy array 19 | _memory_type: the data in which memory, include dvpp, 20 | device and np array 21 | width: image width 22 | height: image height 23 | _encode_format: image format 24 | _load_ok: load image success or not 25 | 26 | """ 27 | _run_mode, _ = acl.rt.get_run_mode() 28 | 29 | def __init__(self, image, width=0, height=0, 30 | size=0, memory_type=const.MEMORY_NORMAL): 31 | """Create AclImage instance 32 | Args: 33 | image: image data, binary, numpy array or file path 34 | width: image width. if image is jpeg or png file, 35 | this arg is not nesscessary 36 | height: image height. if image is jpeg or png file, this arg is 37 | not nesscessary 38 | size: image data size. if image is file path, this arg is not 39 | nesscessary 40 | memory_type: memory type of image data. if image is file path, this 41 | arg is not nesscessary 42 | """ 43 | self._data = None 44 | self._memory_type = memory_type 45 | self.width = 0 46 | self.height = 0 47 | self.size = 0 48 | self._encode_format = const.ENCODE_FORMAT_UNKNOW 49 | self._load_ok = True 50 | 51 | if isinstance(image, str): 52 | self._instance_by_image_file(image, width, height) 53 | elif isinstance(image, int): 54 | self._instance_by_buffer(image, width, height, size) 55 | elif isinstance(image, np.ndarray): 56 | self._instance_by_nparray(image, width, height) 57 | else: 58 | acl_log.log_error("Create instance failed for " 59 | "unknow image data type") 60 | 61 | def _instance_by_image_file(self, image_path, width, height): 62 | # Get image format by filename suffix 63 | self._encode_format = self._get_image_format_by_suffix(image_path) 64 | if self._encode_format == const.ENCODE_FORMAT_UNKNOW: 65 | acl_log.log_error("Load image %s failed" % (image_path)) 66 | self._load_ok = False 67 | return 68 | 69 | # Read image data from file to memory 70 | self._data = np.fromfile(image_path, dtype=np.byte) 71 | self._type = const.IMAGE_DATA_NUMPY 72 | self.size = self._data.itemsize * self._data.size 73 | self._memory_type = const.MEMORY_NORMAL 74 | 75 | # Get image parameters of jpeg or png file by pillow 76 | if ((self._encode_format == const.ENCODE_FORMAT_JPEG) or 77 | (self._encode_format == const.ENCODE_FORMAT_PNG)): 78 | image = Image.open(image_path) 79 | self.width, self.height = image.size 80 | else: 81 | # pillow can not decode yuv, so need input widht and height args 82 | self.width = width 83 | self.height = height 84 | 85 | def _get_image_format_by_suffix(self, filename): 86 | suffix = os.path.splitext(filename)[-1].strip().lower() 87 | if (suffix == ".jpg") or (suffix == ".jpeg"): 88 | image_format = const.ENCODE_FORMAT_JPEG 89 | elif suffix == ".png": 90 | image_format = const.ENCODE_FORMAT_PNG 91 | elif suffix == ".yuv": 92 | image_format = const.ENCODE_FORMAT_YUV420_SP 93 | else: 94 | acl_log.log_error("Unsupport image format: ", suffix) 95 | image_format = const.ENCODE_FORMAT_UNKNOW 96 | 97 | return image_format 98 | 99 | def is_loaded(self): 100 | """Image file load result 101 | When create image instance by file, call this method to check 102 | file load success or not 103 | 104 | Returns: 105 | True: load success 106 | False: load failed 107 | """ 108 | return self._load_ok 109 | 110 | def _instance_by_buffer(self, image_buffer, width, height, size): 111 | self.width = width 112 | self.height = height 113 | self.size = size 114 | self._data = image_buffer 115 | self._type = const.IMAGE_DATA_BUFFER 116 | 117 | def _instance_by_nparray(self, data, width, height): 118 | self.width = width 119 | self.height = height 120 | self.size = data.itemsize * data.size 121 | self._data = data 122 | self._type = const.IMAGE_DATA_NUMPY 123 | self._memory_type = const.MEMORY_NORMAL 124 | 125 | def nparray(self): 126 | """Trans image data to np array""" 127 | if self._type == const.IMAGE_DATA_NUMPY: 128 | return self._data.copy() 129 | 130 | return utils.copy_data_as_numpy(self._data, self.size, 131 | self._memory_type, AclImage._run_mode) 132 | 133 | def data(self): 134 | """Get image binary data""" 135 | if self._type == const.IMAGE_DATA_NUMPY: 136 | return acl.util.numpy_to_ptr(self._data) 137 | else: 138 | return self._data 139 | 140 | def copy_to_dvpp(self): 141 | """Copy image data to dvpp""" 142 | device_ptr = utils.copy_data_to_dvpp(self.data(), self.size, 143 | self._run_mode) 144 | if device_ptr is None: 145 | acl_log.log_error("Copy image to dvpp failed") 146 | return None 147 | return AclImage(device_ptr, self.width, self.height, 148 | self.size, const.MEMORY_DVPP) 149 | 150 | def copy_to_host(self): 151 | """"Copy data to host""" 152 | if self._type == const.IMAGE_DATA_NUMPY: 153 | data_np = self._data.copy() 154 | return AclImage(data_np, self.width, self.height) 155 | 156 | data = None 157 | mem_type = const.MEMORY_HOST 158 | if AclImage._run_mode == const.ACL_HOST: 159 | if self.is_local(): 160 | data = utils.copy_data_host_to_host(self._data, self.size) 161 | else: 162 | data = utils.copy_data_device_to_host(self._data, self.size) 163 | else: 164 | data = utils.copy_data_device_to_device(self._data, self.size) 165 | mem_type = const.MEMORY_DEVICE 166 | if data is None: 167 | acl_log.log_error("Copy image to host failed") 168 | return None 169 | 170 | return AclImage(data, self.width, self.height, self.size, mem_type) 171 | 172 | def is_local(self): 173 | """Image data is in host server memory and access directly or not""" 174 | # in atlas200dk, all kind memory can access directly 175 | if AclImage._run_mode == const.ACL_DEVICE: 176 | return True 177 | # in atlas300, only acl host memory or numpy array can access directly 178 | elif ((AclImage._run_mode == const.ACL_HOST) and 179 | ((self._memory_type == const.MEMORY_HOST) or 180 | (self._memory_type == const.MEMORY_NORMAL))): 181 | return True 182 | else: 183 | return False 184 | 185 | def save(self, filename): 186 | """Save image as file""" 187 | image_np = self.nparray() 188 | image_np.tofile(filename) 189 | 190 | def destroy(self): 191 | """Release image memory""" 192 | if (self._data is None) or (self.size == 0): 193 | acl_log.log_error("Release image abnormaly, data is None") 194 | return 195 | 196 | if self._memory_type == const.MEMORY_DEVICE: 197 | acl.rt.free(self._data) 198 | elif self._memory_type == const.MEMORY_HOST: 199 | acl.rt.free_host(self._data) 200 | elif self._memory_type == const.MEMORY_DVPP: 201 | acl.media.dvpp_free(self._data) 202 | # numpy no need release 203 | self._data = None 204 | self.size = 0 205 | 206 | def __del__(self): 207 | self.destroy() 208 | -------------------------------------------------------------------------------- /src/main_yolox.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | import sys 5 | import os 6 | import functools 7 | import time 8 | 9 | path = os.path.dirname(os.path.abspath(__file__)) 10 | sys.path.append(os.path.join("../../")) 11 | 12 | currentPath = os.path.join(path, "..") 13 | OUTPUT_DIR = os.path.join(currentPath, 'outputs') 14 | MODEL_PATH = os.path.join(currentPath, r'model/yolox_nano.onnx') 15 | IMAGE_SIZE = (416, 416) 16 | CONF_TH = 0.3 17 | NMS_TH = 0.45 18 | CLASSES = 80 19 | STRIDES = (8, 16, 32) 20 | IMG_STD_MEAN = ((123.485, 116.28, 103.53), (58.395, 57.12, 57.375)) 21 | CLSNAMES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 22 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 23 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 24 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 25 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 26 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 27 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 28 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 29 | 'hair drier', 'toothbrush'] 30 | 31 | 32 | def display_process_time(func): 33 | @functools.wraps(func) 34 | def decorated(*args, **kwargs): 35 | s1 = time.time() 36 | res = func(*args, **kwargs) 37 | s2 = time.time() 38 | print('%s process time %f ms' % (func.__name__, 1000*(s2-s1))) 39 | return res 40 | 41 | return decorated 42 | 43 | 44 | def plot_one_box(x, img, color=None, label=None): 45 | c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) 46 | cv2.rectangle(img, c1, c2, color, 2) 47 | # cv2.rectangle(img, (int(x[0]), int(x[1]) - 15), (int(x[0]) + 100, int(x[1]) + 2), (255, 128, 128), -1) 48 | cv2.putText(img, label, (int(x[0]), int(x[1]) - 8), cv2.FONT_ITALIC, 0.8, (0, 255, 0), thickness=2, 49 | lineType=cv2.LINE_AA) 50 | 51 | 52 | def draw_dets(img, dets, dst, ratios=None): 53 | if dets is None: 54 | print('Nothing detect') 55 | return 56 | for x1, y1, x2, y2, conf, cls in dets: 57 | label = CLSNAMES[int(cls)] 58 | plot_one_box(x=[x1, y1, x2, y2], img=img, label=label, color=[0, 0, 255]) 59 | if ratios is not None: 60 | if ratios[0] > 1: 61 | w = int(img.shape[0] / ratios[0]) 62 | img = img[:, :w, :] 63 | if ratios[0] < 1: 64 | h = int(img.shape[1] * ratios[0]) 65 | img = img[:h, :, :] 66 | cv2.imencode('.jpg', img)[1].tofile(dst) 67 | 68 | 69 | class YOLOX(object): 70 | def __init__(self, model_path=MODEL_PATH, image_size=IMAGE_SIZE, 71 | conf_thres=CONF_TH, nms_thres=NMS_TH, strides=STRIDES, std_mean=IMG_STD_MEAN): 72 | self.model_path = model_path 73 | self.input_size = image_size 74 | self.conf_thres = conf_thres 75 | self.nms_thres = nms_thres 76 | self.strides = strides 77 | 78 | self.std = np.array(std_mean[0]).reshape(1, 1, -1) 79 | self.mean = np.array(std_mean[1]).reshape(1, 1, -1) 80 | 81 | self.model = None 82 | self.grids = [] 83 | self.expanded_strides = [] 84 | self._init() 85 | 86 | def _load_model(self): 87 | self.model = cv2.dnn.readNet(self.model_path) 88 | 89 | def _init(self): 90 | self._load_model() 91 | self._make_grids() 92 | self.grids = np.concatenate(self.grids, axis=-2) 93 | self.expanded_strides = np.concatenate(self.expanded_strides, axis=-2) 94 | 95 | def _make_grids(self): 96 | for stride in self.strides: 97 | (x_step, y_step) = (self.input_size[1]//stride, self.input_size[0]//stride) 98 | x, y = np.arange(x_step), np.arange(y_step) 99 | xv, yv = np.meshgrid(*[x, y]) 100 | grid = np.stack((xv, yv), axis=2).reshape(1, -1, 2) 101 | self.grids.append(grid) 102 | self.expanded_strides.append(np.full((*grid.shape[:2], 1), stride)) 103 | 104 | def _xywh2xyxy(self, x): 105 | y = np.zeros_like(x) 106 | y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x 107 | y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y 108 | y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x 109 | y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y 110 | return y 111 | 112 | def _nms(self, boxes, scores, nms_thr): 113 | """Single class NMS implemented in Numpy.""" 114 | x1 = boxes[:, 0] 115 | y1 = boxes[:, 1] 116 | x2 = boxes[:, 2] 117 | y2 = boxes[:, 3] 118 | 119 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 120 | order = scores.argsort()[::-1] 121 | 122 | keep = [] 123 | while order.size > 0: 124 | i = order[0] 125 | keep.append(i) 126 | xx1 = np.maximum(x1[i], x1[order[1:]]) 127 | yy1 = np.maximum(y1[i], y1[order[1:]]) 128 | xx2 = np.minimum(x2[i], x2[order[1:]]) 129 | yy2 = np.minimum(y2[i], y2[order[1:]]) 130 | 131 | w = np.maximum(0.0, xx2 - xx1 + 1) 132 | h = np.maximum(0.0, yy2 - yy1 + 1) 133 | inter = w * h 134 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 135 | 136 | inds = np.where(ovr <= nms_thr)[0] 137 | order = order[inds + 1] 138 | 139 | return keep 140 | 141 | def _multiclass_nms(self, boxes, scores, nms_thr, score_thr): 142 | """Multiclass NMS implemented in Numpy""" 143 | final_dets = [] 144 | num_classes = scores.shape[-1] 145 | for cls_ind in range(num_classes): 146 | cls_scores = scores[:, cls_ind] 147 | valid_score_mask = cls_scores > score_thr 148 | if valid_score_mask.sum() == 0: 149 | continue 150 | else: 151 | valid_scores = cls_scores[valid_score_mask] 152 | valid_boxes = boxes[valid_score_mask] 153 | keep = self._nms(valid_boxes, valid_scores, nms_thr) 154 | if len(keep) > 0: 155 | cls_inds = np.ones((len(keep), 1)) * cls_ind 156 | dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1) 157 | final_dets.append(dets) 158 | if len(final_dets) == 0: 159 | return None 160 | return np.concatenate(final_dets, 0) 161 | 162 | @display_process_time 163 | def pre_process(self, image): 164 | image_padded = np.ones([self.input_size[0], self.input_size[1], 3], dtype=np.float32) * 114.0 165 | r = min(self.input_size[0]/image.shape[0], self.input_size[1]/image.shape[1]) 166 | image_resized = cv2.resize(image, (int(image.shape[1] * r), int(image.shape[0] * r)), cv2.INTER_LINEAR) 167 | image_padded[:int(image.shape[0] * r), :int(image.shape[1] * r), :] = image_resized 168 | 169 | img = image_padded[:, :, ::-1] 170 | img = (img - self.mean)/self.std 171 | img = np.expand_dims(img.transpose(2, 0, 1), axis=0) 172 | inp = np.ascontiguousarray(img, dtype=np.float32) 173 | return inp, r, image_padded.astype(np.uint8) 174 | 175 | @display_process_time 176 | def inferece(self, inp): 177 | self.model.setInput(inp) 178 | outs = self.model.forward(self.model.getUnconnectedOutLayersNames()) 179 | if len(outs) == 1: 180 | prediction = outs[0].squeeze() 181 | elif len(outs) > 1: 182 | prediction = np.concatenate(outs, axis=-2).squeeze() 183 | else: 184 | prediction = None 185 | return prediction 186 | 187 | @display_process_time 188 | def post_process(self, prediction): 189 | prediction[..., :2] = (prediction[..., :2] + self.grids) * self.expanded_strides 190 | prediction[..., 2:4] = np.exp(prediction[..., 2:4]) * self.expanded_strides 191 | 192 | bboxes = prediction[..., :4] 193 | scores = prediction[..., 4:5] * prediction[..., 5:] 194 | bboxes = self._xywh2xyxy(bboxes) 195 | dets = self._multiclass_nms(bboxes, scores, self.nms_thres, self.conf_thres) 196 | return dets 197 | 198 | 199 | def main(): 200 | image_dir = os.path.join(currentPath, "data") 201 | if not os.path.exists(OUTPUT_DIR): 202 | os.mkdir(OUTPUT_DIR) 203 | images_list = [os.path.join(image_dir, img) 204 | for img in os.listdir(image_dir) 205 | if os.path.splitext(img)[1] in ['.jpg', '.png', '.bmp']] 206 | 207 | my_yolox = YOLOX(model_path=MODEL_PATH, image_size=IMAGE_SIZE, 208 | conf_thres=CONF_TH, nms_thres=NMS_TH, 209 | strides=STRIDES, std_mean=IMG_STD_MEAN) 210 | 211 | for image_file in images_list: 212 | # Read image 213 | print('=== ' + os.path.basename(image_file) + '===') 214 | output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_file)) 215 | image = cv2.imread(image_file) 216 | inp, r, img_pad = my_yolox.pre_process(image) 217 | prediction = my_yolox.inferece(inp) 218 | dets = my_yolox.post_process(prediction) 219 | draw_dets(img_pad, dets, output_path, ratios=(image.shape[0]/image.shape[1], None)) 220 | 221 | 222 | if __name__ == '__main__': 223 | main() -------------------------------------------------------------------------------- /src/acl_yolox.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import functools 4 | import time 5 | 6 | import numpy as np 7 | import cv2 8 | 9 | path = os.path.dirname(os.path.abspath(__file__)) 10 | sys.path.append(os.path.join("../../")) 11 | 12 | currentPath = os.path.join(path, "..") 13 | OUTPUT_DIR = os.path.join(currentPath, 'outputs') 14 | MODEL_PATH = os.path.join(currentPath, "model/yolox_nano.om") 15 | 16 | import acl 17 | from atlas_utils.acl_resource import AclResource 18 | from atlas_utils.acl_model import Model 19 | from atlas_utils.acl_image import AclImage 20 | from atlas_utils.acl_dvpp import Dvpp 21 | import atlas_utils.constants as const 22 | import atlas_utils.utils as utils 23 | 24 | IMAGE_SIZE = (416, 416) 25 | CONF_TH = 0.3 26 | NMS_TH = 0.45 27 | CLASSES = 80 28 | STRIDES = (8, 16, 32) 29 | IMG_STD_MEAN = ((123.485, 116.28, 103.53), (58.395, 57.12, 57.375)) 30 | CLSNAMES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 31 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 32 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 33 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 34 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 35 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 36 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 37 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 38 | 'hair drier', 'toothbrush'] 39 | 40 | 41 | def display_process_time(func): 42 | @functools.wraps(func) 43 | def decorated(*args, **kwargs): 44 | s1 = time.time() 45 | res = func(*args, **kwargs) 46 | s2 = time.time() 47 | print('%s process time %f ms' % (func.__name__, 1000*(s2-s1))) 48 | return res 49 | 50 | return decorated 51 | 52 | 53 | def plot_one_box(x, img, color=None, label=None): 54 | c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) 55 | cv2.rectangle(img, c1, c2, color, 2) 56 | # cv2.rectangle(img, (int(x[0]), int(x[1]) - 15), (int(x[0]) + 100, int(x[1]) + 2), (255, 128, 128), -1) 57 | cv2.putText(img, label, (int(x[0]), int(x[1]) - 8), cv2.FONT_ITALIC, 0.8, (0, 255, 0), thickness=2, 58 | lineType=cv2.LINE_AA) 59 | 60 | 61 | def draw_dets(img, dets, dst, ratios=None): 62 | if dets is None: 63 | print('Nothing detect') 64 | return 65 | for x1, y1, x2, y2, conf, cls in dets: 66 | label = CLSNAMES[int(cls)] 67 | plot_one_box(x=[x1, y1, x2, y2], img=img, label=label, color=[0, 0, 255]) 68 | if ratios is not None: 69 | if ratios[0] > 1: 70 | w = int(img.shape[0] / ratios[0]) 71 | img = img[:, :w, :] 72 | if ratios[0] < 1: 73 | h = int(img.shape[1] * ratios[0]) 74 | img = img[:h, :, :] 75 | cv2.imencode('.jpg', img)[1].tofile(dst) 76 | 77 | 78 | class YOLOX(object): 79 | def __init__(self, model_path=MODEL_PATH, image_size=IMAGE_SIZE, 80 | conf_thres=CONF_TH, nms_thres=NMS_TH, strides=STRIDES, std_mean=IMG_STD_MEAN): 81 | self.model_path = model_path 82 | self.input_size = image_size 83 | self.conf_thres = conf_thres 84 | self.nms_thres = nms_thres 85 | self.strides = strides 86 | 87 | self.std = np.array(std_mean[0]).reshape(1, 1, -1) 88 | self.mean = np.array(std_mean[1]).reshape(1, 1, -1) 89 | 90 | self.model = None 91 | self.grids = [] 92 | self.expanded_strides = [] 93 | self._init() 94 | 95 | def _load_model(self): 96 | self.model = Model(self.model_path) 97 | return const.SUCCESS 98 | 99 | def _load_dvpp(self): 100 | self.dvpp = Dvpp() 101 | return const.SUCCESS 102 | 103 | def _init(self): 104 | self._load_model() 105 | self._load_dvpp() 106 | self._make_grids() 107 | self.grids = np.concatenate(self.grids, axis=-2) 108 | self.expanded_strides = np.concatenate(self.expanded_strides, axis=-2) 109 | return const.SUCCESS 110 | 111 | def _make_grids(self): 112 | for stride in self.strides: 113 | (x_step, y_step) = (self.input_size[1]//stride, self.input_size[0]//stride) 114 | x, y = np.arange(x_step), np.arange(y_step) 115 | xv, yv = np.meshgrid(*[x, y]) 116 | grid = np.stack((xv, yv), axis=2).reshape(1, -1, 2) 117 | self.grids.append(grid) 118 | self.expanded_strides.append(np.full((*grid.shape[:2], 1), stride)) 119 | 120 | def _xywh2xyxy(self, x): 121 | y = np.zeros_like(x) 122 | y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x 123 | y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y 124 | y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x 125 | y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y 126 | return y 127 | 128 | def _nms(self, boxes, scores, nms_thr): 129 | """Single class NMS implemented in Numpy.""" 130 | x1 = boxes[:, 0] 131 | y1 = boxes[:, 1] 132 | x2 = boxes[:, 2] 133 | y2 = boxes[:, 3] 134 | 135 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 136 | order = scores.argsort()[::-1] 137 | 138 | keep = [] 139 | while order.size > 0: 140 | i = order[0] 141 | keep.append(i) 142 | xx1 = np.maximum(x1[i], x1[order[1:]]) 143 | yy1 = np.maximum(y1[i], y1[order[1:]]) 144 | xx2 = np.minimum(x2[i], x2[order[1:]]) 145 | yy2 = np.minimum(y2[i], y2[order[1:]]) 146 | 147 | w = np.maximum(0.0, xx2 - xx1 + 1) 148 | h = np.maximum(0.0, yy2 - yy1 + 1) 149 | inter = w * h 150 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 151 | 152 | inds = np.where(ovr <= nms_thr)[0] 153 | order = order[inds + 1] 154 | 155 | return keep 156 | 157 | def _multiclass_nms(self, boxes, scores, nms_thr, score_thr): 158 | """Multiclass NMS implemented in Numpy""" 159 | final_dets = [] 160 | num_classes = scores.shape[-1] 161 | for cls_ind in range(num_classes): 162 | cls_scores = scores[:, cls_ind] 163 | valid_score_mask = cls_scores > score_thr 164 | if valid_score_mask.sum() == 0: 165 | continue 166 | else: 167 | valid_scores = cls_scores[valid_score_mask] 168 | valid_boxes = boxes[valid_score_mask] 169 | keep = self._nms(valid_boxes, valid_scores, nms_thr) 170 | if len(keep) > 0: 171 | cls_inds = np.ones((len(keep), 1)) * cls_ind 172 | dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1) 173 | final_dets.append(dets) 174 | if len(final_dets) == 0: 175 | return None 176 | return np.concatenate(final_dets, 0) 177 | 178 | @display_process_time 179 | def pre_process(self, image): 180 | image_padded = np.ones([self.input_size[0], self.input_size[1], 3], dtype=np.float32) * 114.0 181 | r = min(self.input_size[0]/image.shape[0], self.input_size[1]/image.shape[1]) 182 | image_resized = cv2.resize(image, (int(image.shape[1] * r), int(image.shape[0] * r)), cv2.INTER_LINEAR) 183 | image_padded[:int(image.shape[0] * r), :int(image.shape[1] * r), :] = image_resized 184 | 185 | img = image_padded[:, :, ::-1] 186 | img = (img - self.mean)/self.std 187 | img = np.concatenate([img[::2, ::2, :], img[1::2, ::2, :], 188 | img[::2, 1::2, :], img[1::2, 1::2, :]], axis=-1) 189 | img = np.expand_dims(img.transpose((2, 0, 1)), axis=0) 190 | inp = np.ascontiguousarray(img, dtype=np.float32) 191 | return inp, r, image_padded.astype(np.uint8) 192 | 193 | @display_process_time 194 | def inferece(self, inp): 195 | outs = self.model.execute([inp, ]) 196 | if len(outs) == 1: 197 | prediction = outs[0].squeeze() 198 | elif len(outs) > 1: 199 | prediction = np.concatenate(outs, axis=-2).squeeze() 200 | else: 201 | prediction = None 202 | return prediction 203 | 204 | @display_process_time 205 | def post_process(self, prediction): 206 | prediction[..., :2] = (prediction[..., :2] + self.grids) * self.expanded_strides 207 | prediction[..., 2:4] = np.exp(prediction[..., 2:4]) * self.expanded_strides 208 | 209 | bboxes = prediction[..., :4] 210 | scores = prediction[..., 4:5] * prediction[..., 5:] 211 | bboxes = self._xywh2xyxy(bboxes) 212 | dets = self._multiclass_nms(bboxes, scores, self.nms_thres, self.conf_thres) 213 | return dets 214 | 215 | def detect(self, image): 216 | inp, r, img = self.pre_process(image) 217 | prediction = self.inferece(inp) 218 | dets = self.post_process(prediction) 219 | 220 | return dets, r, img 221 | 222 | 223 | def main(): 224 | acl_resource = AclResource() 225 | acl_resource.init() 226 | """ 227 | main 228 | """ 229 | image_dir = os.path.join(currentPath, "data") 230 | if not os.path.exists(OUTPUT_DIR): 231 | os.mkdir(OUTPUT_DIR) 232 | images_list = [os.path.join(image_dir, img) 233 | for img in os.listdir(image_dir) 234 | if os.path.splitext(img)[1] in const.IMG_EXT] 235 | 236 | detector = YOLOX(model_path=MODEL_PATH, image_size=IMAGE_SIZE, 237 | conf_thres=CONF_TH, nms_thres=NMS_TH, 238 | strides=STRIDES, std_mean=IMG_STD_MEAN) 239 | 240 | for image_file in images_list: 241 | # Read image 242 | print('=== ' + os.path.basename(image_file) + '===') 243 | output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_file)) 244 | # image = Image.open(image_file) 245 | image = cv2.imread(image_file) 246 | dets, r, img = detector.detect(image) 247 | draw_dets(img, dets, output_path) 248 | 249 | 250 | if __name__ == '__main__': 251 | main() 252 | -------------------------------------------------------------------------------- /atlas_utils/dvpp_vdec.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import acl 3 | import queue 4 | 5 | import atlas_utils.constants as const 6 | import atlas_utils.utils as utils 7 | import atlas_utils.resource_list as resource_list 8 | import atlas_utils.acl_logger as acl_log 9 | from atlas_utils.acl_image import AclImage 10 | 11 | READ_TIMEOUT = 5 12 | WAIT_INTERVAL = 0.1 13 | 14 | 15 | class DvppVdec(object): 16 | """Decode h264/h265 stream by dvpp vdec 17 | Decode one frame of h264/h265 stream.The stream must be h264 main, baseline 18 | or high level, annex-b format, or h265 main level.Output image is yuv420sp 19 | Attributes: 20 | _channel_id: dvpp vdec channel parameter, must global unique 21 | _width: input frame width 22 | _height:input frame height 23 | _run_flag:deocde is running or not currently, callback thread daemon condition 24 | _callbak_tid: decode callback thread id 25 | _channel_desc: vdec channel desc handle 26 | _ctx: current thread acl context 27 | _entype: video stream encode type, dvpp vdec support: 28 | const.ENTYPE_H265_MAIN = 0 H265 main level 29 | const.ENTYPE_H264_BASE = 1 H264 baseline level 30 | const.ENTYPE_H264_MAIN = 2 H264 main level 31 | const.ENTYPE_H264_HIGH = 3 H264 high level 32 | _format: output frame image format, use yuv420sp 33 | _decod_complete_cnt: output decoded complete frames counter 34 | _decode_cnt: input frames counter 35 | _output_pic_size: output image data size 36 | _frame_queue: output decoded frame image queue 37 | """ 38 | 39 | def __init__(self, channel_id, width, height, entype, ctx, 40 | output_format=const.PIXEL_FORMAT_YUV_SEMIPLANAR_420): 41 | """Create dvpp vdec instance 42 | Args: 43 | channel_id: decode channel id, must be global unique 44 | width: frame width 45 | height: frame height 46 | entype: video stream encode type 47 | ctx: current thread acl context 48 | output_format: output image format, support yuv420 nv12 and nv21 49 | """ 50 | self._channel_id = channel_id 51 | self._width = width 52 | self._height = height 53 | self._run_flag = True 54 | self._callbak_tid = None 55 | self._channel_desc = None 56 | self._ctx = ctx 57 | self._entype = entype 58 | self._format = output_format 59 | self._decode_complete_cnt = 0 60 | self._decode_cnt = 0 61 | self._output_pic_size = (self._width * self._height * 3) // 2 62 | self._frame_queue = queue.Queue() 63 | self._frame_config = None 64 | 65 | def _callback_thread_entry(self, args_list): 66 | ret = acl.rt.set_context(self._ctx) 67 | while self._run_flag is True: 68 | ret = acl.rt.process_report(300) 69 | 70 | def _callback(self, input_stream_desc, output_pic_desc, user_data): 71 | self._decode_complete_cnt += 1 72 | #print("callback ", self._decode_complete_cnt) 73 | input_stream_data = acl.media.dvpp_get_stream_desc_data( 74 | input_stream_desc) 75 | input_stream_data_size = acl.media.dvpp_get_stream_desc_size( 76 | input_stream_desc) 77 | ret = acl.media.dvpp_destroy_stream_desc(input_stream_desc) 78 | 79 | self._get_pic_desc_data(output_pic_desc, user_data) 80 | 81 | def _get_pic_desc_data(self, pic_desc, user_data): 82 | pic_data = acl.media.dvpp_get_pic_desc_data(pic_desc) 83 | pic_data_size = acl.media.dvpp_get_pic_desc_size(pic_desc) 84 | ret_code = acl.media.dvpp_get_pic_desc_ret_code(pic_desc) 85 | if ret_code: 86 | channel_id, frame_id = user_data 87 | acl_log.log_error("Decode channel %d frame %d failed, error %d" 88 | % (channel_id, frame_id, ret_code)) 89 | acl.media.dvpp_free(pic_data) 90 | else: 91 | image = AclImage(pic_data, self._width, self._height, 92 | pic_data_size, const.MEMORY_DVPP) 93 | self._frame_queue.put(image) 94 | acl.media.dvpp_destroy_pic_desc(pic_desc) 95 | 96 | def init(self): 97 | """Init dvpp vdec 98 | Returns: 99 | const.SUCCESS: init success 100 | const.FAILED: init failed 101 | """ 102 | # print("0") 103 | self._channel_desc = acl.media.vdec_create_channel_desc() 104 | self._callbak_tid, ret = acl.util.start_thread( 105 | self._callback_thread_entry, []) 106 | acl.media.vdec_set_channel_desc_channel_id(self._channel_desc, 107 | self._channel_id) 108 | acl.media.vdec_set_channel_desc_thread_id(self._channel_desc, 109 | self._callbak_tid) 110 | 111 | # print("1") 112 | acl.media.vdec_set_channel_desc_callback(self._channel_desc, 113 | self._callback) 114 | 115 | acl.media.vdec_set_channel_desc_entype(self._channel_desc, 116 | self._entype) 117 | acl.media.vdec_set_channel_desc_out_pic_format(self._channel_desc, 118 | self._format) 119 | # print("3") 120 | out_mode = acl.media.vdec_get_channel_desc_out_mode(self._channel_desc) 121 | if out_mode != 0: 122 | acl_log.log_error("Dvpp vdec out mode(%d) is invalid" % (out_mode)) 123 | return const.FAILED 124 | 125 | acl.media.vdec_set_channel_desc_out_mode(self._channel_desc, 126 | out_mode) 127 | acl.media.vdec_create_channel(self._channel_desc) 128 | 129 | self._frame_config = acl.media.vdec_create_frame_config() 130 | if self._frame_config is None: 131 | acl_log.log_error("Create dvpp frame config failed") 132 | return const.FAILED 133 | 134 | return const.SUCCESS 135 | 136 | def _thread_join(self): 137 | if self._callbak_tid is not None: 138 | self._run_flag = False 139 | ret = acl.util.stop_thread(self._callbak_tid) 140 | self._callbak_tid = None 141 | 142 | def process(self, input_data, input_size, user_data): 143 | """Decode frame 144 | Args: 145 | input_data: input frame data 146 | input_size: input frame data size 147 | 148 | Returns: 149 | const.SUCCESS: process success 150 | const.FAILED: process failed 151 | """ 152 | input_stream_desc = self._create_input_pic_stream_desc(input_data, 153 | input_size) 154 | if input_stream_desc is None: 155 | acl_log.log_error("Dvpp vdec decode frame failed for " 156 | "create input stream desc error") 157 | return const.FAILED 158 | 159 | output_pic_desc = self._create_output_pic_desc() 160 | if output_pic_desc is None: 161 | acl_log.log_error("Dvpp vdec decode frame failed for create " 162 | "output pic desc failed") 163 | return const.FAILED 164 | 165 | ret = acl.media.vdec_send_frame(self._channel_desc, input_stream_desc, 166 | output_pic_desc, self._frame_config, 167 | user_data) 168 | if ret: 169 | acl_log.log_error("Dvpp vdec send frame failed, error ", ret) 170 | return const.FAILED 171 | 172 | self._decode_cnt += 1 173 | #print("send frame ", self._decode_cnt) 174 | 175 | return const.SUCCESS 176 | 177 | def _create_input_pic_stream_desc(self, input_data, input_size): 178 | stream_desc = acl.media.dvpp_create_stream_desc() 179 | if stream_desc is None: 180 | acl_log.log_error("Create dvpp vdec input pic stream desc failed") 181 | return None 182 | 183 | acl.media.dvpp_set_stream_desc_size(stream_desc, input_size) 184 | acl.media.dvpp_set_stream_desc_data(stream_desc, input_data) 185 | 186 | return stream_desc 187 | 188 | def _create_output_pic_desc(self): 189 | output_buffer, ret = acl.media.dvpp_malloc(self._output_pic_size) 190 | if (output_buffer is None) or ret: 191 | acl_log.log_error( 192 | "Dvpp vdec malloc output memory failed, " 193 | "size %d, error %d" % 194 | (self._output_pic_size, ret)) 195 | return None 196 | 197 | pic_desc = acl.media.dvpp_create_pic_desc() 198 | if pic_desc is None: 199 | acl_log.log_error("Create dvpp vdec output pic desc failed") 200 | return None 201 | 202 | acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) 203 | acl.media.dvpp_set_pic_desc_size(pic_desc, self._output_pic_size) 204 | acl.media.dvpp_set_pic_desc_format(pic_desc, self._format) 205 | 206 | return pic_desc 207 | 208 | def destroy(self): 209 | """Release dvpp vdec resource""" 210 | #print("vdec destroy****************") 211 | if self._channel_desc is not None: 212 | ret = acl.media.vdec_destroy_channel(self._channel_desc) 213 | self._channel_desc = None 214 | 215 | self._thread_join() 216 | 217 | if self._frame_config is not None: 218 | acl.media.vdec_destroy_frame_config(self._frame_config) 219 | self._frame_config = None 220 | 221 | def is_finished(self): 222 | """Video decode finished""" 223 | return ((self._decode_cnt > 0) and 224 | (self._decode_complete_cnt >= self._decode_cnt)) 225 | 226 | def read(self, no_wait=False): 227 | """Read decoded frame 228 | no_wait: Get image without wait. If set this arg True, and 229 | return image is None, should call is_finished() method 230 | to confirm decode finish or failed 231 | 232 | Returns: 233 | 1. const.SUCCESS, not None: get image success 234 | 2. const.SUCCESS, None: all frames decoded and be token off 235 | 3. const.FAILED, None: Has frame not decoded, but no image decoded, 236 | it means decode video failed 237 | """ 238 | image = None 239 | ret = const.SUCCESS 240 | # received eos frame and all received frame decode complete 241 | if no_wait or self.is_finished(): 242 | try: 243 | image = self._frame_queue.get_nowait() 244 | except: 245 | acl_log.log_info("No decode frame in queue anymore") 246 | else: 247 | try: 248 | image = self._frame_queue.get(timeout=READ_TIMEOUT) 249 | except: 250 | ret = const.FAILED 251 | acl_log.log_error("Read channel id %d frame timeout, " 252 | "receive frame %d, decoded %d" 253 | % (self._channel_id, self._decode_cnt, 254 | self._decode_complete_cnt)) 255 | return ret, image 256 | -------------------------------------------------------------------------------- /atlas_utils/acl_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (R) @huawei.com, all rights reserved 3 | -*- coding:utf-8 -*- 4 | CREATED: 2020-6-04 20:12:13 5 | MODIFIED: 2020-6-28 14:04:45 6 | """ 7 | 8 | import acl 9 | import struct 10 | import numpy as np 11 | import datetime 12 | import sys 13 | import os 14 | 15 | import atlas_utils.constants as const 16 | import atlas_utils.utils as utils 17 | from atlas_utils.acl_logger import log_error, log_info 18 | from atlas_utils.acl_image import AclImage 19 | from atlas_utils.resource_list import resource_list 20 | 21 | 22 | class Model(object): 23 | """ 24 | wrap acl model inference interface, include input dataset construction, 25 | execute, and output transform to numpy array 26 | Attributes: 27 | model_path: om offline mode file path 28 | """ 29 | 30 | def __init__(self, model_path): 31 | self._run_mode, ret = acl.rt.get_run_mode() 32 | utils.check_ret("acl.rt.get_run_mode", ret) 33 | self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE 34 | if self._run_mode == const.ACL_HOST: 35 | self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST 36 | 37 | self._model_path = model_path # string 38 | self._model_id = None # pointer 39 | self._input_num = 0 40 | self._input_buffer = [] 41 | self._input_dataset = None 42 | self._output_dataset = None 43 | self._model_desc = None # pointer when using 44 | self._output_size = 0 45 | self._init_resource() 46 | self._is_destroyed = False 47 | resource_list.register(self) 48 | 49 | def _init_resource(self): 50 | log_info("Init model resource start...") 51 | if not os.path.isfile(self._model_path): 52 | log_error( 53 | "model_path failed, please check. model_path=%s" % 54 | self._model_path) 55 | return const.FAILED 56 | 57 | self._model_id, ret = acl.mdl.load_from_file(self._model_path) 58 | utils.check_ret("acl.mdl.load_from_file", ret) 59 | self._model_desc = acl.mdl.create_desc() 60 | ret = acl.mdl.get_desc(self._model_desc, self._model_id) 61 | utils.check_ret("acl.mdl.get_desc", ret) 62 | # get outputs num of model 63 | self._output_size = acl.mdl.get_num_outputs(self._model_desc) 64 | # create output dataset 65 | self._gen_output_dataset(self._output_size) 66 | # recode input data address,if need malloc memory,the memory will be 67 | # reuseable 68 | self._init_input_buffer() 69 | 70 | log_info("Init model resource success") 71 | 72 | return const.SUCCESS 73 | 74 | def _gen_output_dataset(self, ouput_num): 75 | log_info("[Model] create model output dataset:") 76 | dataset = acl.mdl.create_dataset() 77 | for i in range(ouput_num): 78 | # malloc device memory for output 79 | size = acl.mdl.get_output_size_by_index(self._model_desc, i) 80 | buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) 81 | utils.check_ret("acl.rt.malloc", ret) 82 | # crate oputput data buffer 83 | dataset_buffer = acl.create_data_buffer(buf, size) 84 | _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) 85 | log_info("malloc output %d, size %d" % (i, size)) 86 | if ret: 87 | acl.rt.free(buf) 88 | acl.destroy_data_buffer(dataset_buffer) 89 | utils.check_ret("acl.destroy_data_buffer", ret) 90 | self._output_dataset = dataset 91 | log_info("Create model output dataset success") 92 | 93 | def _init_input_buffer(self): 94 | self._input_num = acl.mdl.get_num_inputs(self._model_desc) 95 | for i in range(self._input_num): 96 | item = {"addr": None, "size": 0} 97 | self._input_buffer.append(item) 98 | 99 | def _gen_input_dataset(self, input_list): 100 | ret = const.SUCCESS 101 | if len(input_list) != self._input_num: 102 | log_error("Current input data num %d unequal to model " 103 | "input num %d" % (len(input_list), self._input_num)) 104 | return const.FAILED 105 | 106 | self._input_dataset = acl.mdl.create_dataset() 107 | for i in range(self._input_num): 108 | item = input_list[i] 109 | data, size = self._parse_input_data(item, i) 110 | if (data is None) or (size == 0): 111 | ret = const.FAILED 112 | log_error("The %d input is invalid" % (i)) 113 | break 114 | dataset_buffer = acl.create_data_buffer(data, size) 115 | _, ret = acl.mdl.add_dataset_buffer(self._input_dataset, 116 | dataset_buffer) 117 | if ret: 118 | log_error("Add input dataset buffer failed") 119 | acl.destroy_data_buffer(self._input_dataset) 120 | ret = const.FAILED 121 | break 122 | if ret == const.FAILED: 123 | self._release_dataset(self._input_dataset) 124 | self._input_dataset = None 125 | 126 | return ret 127 | 128 | def _parse_input_data(self, input_data, index): 129 | data = None 130 | size = 0 131 | if isinstance(input_data, AclImage): 132 | size = input_data.size 133 | data = input_data.data() 134 | elif isinstance(input_data, np.ndarray): 135 | ptr = acl.util.numpy_to_ptr(input_data) 136 | size = input_data.size * input_data.itemsize 137 | data = self._copy_input_to_device(ptr, size, index) 138 | if data is None: 139 | size = 0 140 | log_error("Copy input to device failed") 141 | elif (isinstance(input_data, dict) and 142 | ('data' in input_data.keys()) and ('size' in input_data.keys())): 143 | size = input_data['size'] 144 | data = input_data['data'] 145 | else: 146 | log_error("Unsupport input") 147 | 148 | return data, size 149 | 150 | def _copy_input_to_device(self, input_ptr, size, index): 151 | buffer_item = self._input_buffer[index] 152 | data = None 153 | if buffer_item['addr'] is None: 154 | data = utils.copy_data_device_to_device(input_ptr, size) 155 | if data is None: 156 | log_error("Malloc memory and copy model %dth " 157 | "input to device failed" % (index)) 158 | return None 159 | buffer_item['addr'] = data 160 | buffer_item['size'] = size 161 | elif size == buffer_item['size']: 162 | ret = acl.rt.memcpy(buffer_item['addr'], size, 163 | input_ptr, size, 164 | const.ACL_MEMCPY_DEVICE_TO_DEVICE) 165 | if ret != const.ACL_ERROR_NONE: 166 | log_error("Copy model %dth input to device failed" % (index)) 167 | return None 168 | data = buffer_item['addr'] 169 | else: 170 | log_error("The model %dth input size %d is change," 171 | " before is %d" % (index, size, buffer_item['size'])) 172 | return None 173 | 174 | return data 175 | 176 | def execute(self, input_list): 177 | """ 178 | inference input data 179 | Args: 180 | input_list: input data list, support AclImage, 181 | numpy array and {'data': ,'size':} dict 182 | returns: 183 | inference result data, which is a numpy array list, 184 | each corresponse to a model output 185 | """ 186 | ret = self._gen_input_dataset(input_list) 187 | if ret == const.FAILED: 188 | log_error("Gen model input dataset failed") 189 | return None 190 | 191 | ret = acl.mdl.execute(self._model_id, 192 | self._input_dataset, 193 | self._output_dataset) 194 | if ret != const.ACL_ERROR_NONE: 195 | log_error("Execute model failed for acl.mdl.execute error ", ret) 196 | return None 197 | 198 | self._release_dataset(self._input_dataset) 199 | self._input_dataset = None 200 | 201 | return self._output_dataset_to_numpy() 202 | 203 | def _output_dataset_to_numpy(self): 204 | dataset = [] 205 | output_tensor_list = self._gen_output_tensor() 206 | num = acl.mdl.get_dataset_num_buffers(self._output_dataset) 207 | 208 | for i in range(num): 209 | buf = acl.mdl.get_dataset_buffer(self._output_dataset, i) 210 | data = acl.get_data_buffer_addr(buf) 211 | size = int(acl.get_data_buffer_size(buf)) 212 | output_ptr = output_tensor_list[i]["ptr"] 213 | output_tensor = output_tensor_list[i]["tensor"] 214 | ret = acl.rt.memcpy(output_ptr, 215 | output_tensor.size * output_tensor.itemsize, 216 | data, size, self._copy_policy) 217 | if ret != const.ACL_ERROR_NONE: 218 | log_error("Memcpy inference output to local failed") 219 | return None 220 | 221 | dataset.append(output_tensor) 222 | 223 | return dataset 224 | 225 | def _gen_output_tensor(self): 226 | output_tensor_list = [] 227 | for i in range(self._output_size): 228 | dims = acl.mdl.get_output_dims(self._model_desc, i) 229 | shape = tuple(dims[0]["dims"]) 230 | datatype = acl.mdl.get_output_data_type(self._model_desc, i) 231 | size = acl.mdl.get_output_size_by_index(self._model_desc, i) 232 | 233 | if datatype == const.ACL_FLOAT: 234 | np_type = np.float32 235 | output_tensor = np.zeros( 236 | size // 4, dtype=np_type).reshape(shape) 237 | elif datatype == const.ACL_INT32: 238 | np_type = np.int32 239 | output_tensor = np.zeros( 240 | size // 4, dtype=np_type).reshape(shape) 241 | elif datatype == const.ACL_UINT32: 242 | np_type = np.uint32 243 | output_tensor = np.zeros( 244 | size // 4, dtype=np_type).reshape(shape) 245 | elif datatype == const.ACL_FLOAT16: 246 | np_type = np.float16 247 | output_tensor = np.zeros( 248 | size // 2, dtype=np_type).reshape(shape) 249 | else: 250 | print("Unspport model output datatype ", datatype) 251 | return None 252 | 253 | if not output_tensor.flags['C_CONTIGUOUS']: 254 | output_tensor = np.ascontiguousarray(output_tensor) 255 | 256 | tensor_ptr = acl.util.numpy_to_ptr(output_tensor) 257 | output_tensor_list.append({"ptr": tensor_ptr, 258 | "tensor": output_tensor}) 259 | 260 | return output_tensor_list 261 | 262 | def _release_dataset(self, dataset, free_memory=False): 263 | if not dataset: 264 | return 265 | 266 | num = acl.mdl.get_dataset_num_buffers(dataset) 267 | for i in range(num): 268 | data_buf = acl.mdl.get_dataset_buffer(dataset, i) 269 | if data_buf: 270 | self._release_databuffer(data_buf, free_memory) 271 | 272 | ret = acl.mdl.destroy_dataset(dataset) 273 | if ret != const.ACL_ERROR_NONE: 274 | log_error("Destroy data buffer error ", ret) 275 | 276 | def _release_databuffer(self, data_buffer, free_memory=False): 277 | if free_memory: 278 | data_addr = acl.get_data_buffer_addr(data_buffer) 279 | if data_addr: 280 | acl.rt.free(data_addr) 281 | 282 | ret = acl.destroy_data_buffer(data_buffer) 283 | if ret != const.ACL_ERROR_NONE: 284 | log_error("Destroy data buffer error ", ret) 285 | 286 | def destroy(self): 287 | """ 288 | release resource of model inference 289 | Args: 290 | null 291 | Returns: 292 | null 293 | """ 294 | if self._is_destroyed: 295 | return 296 | 297 | self._release_dataset(self._output_dataset, free_memory=True) 298 | if self._model_id: 299 | ret = acl.mdl.unload(self._model_id) 300 | if ret != const.ACL_ERROR_NONE: 301 | log_info("acl.mdl.unload error:", ret) 302 | 303 | if self._model_desc: 304 | ret = acl.mdl.destroy_desc(self._model_desc) 305 | if ret != const.ACL_ERROR_NONE: 306 | log_info("acl.mdl.destroy_desc error:", ret) 307 | 308 | self._is_destroyed = True 309 | resource_list.unregister(self) 310 | log_info("Model release source success") 311 | 312 | def __del__(self): 313 | self.destroy() 314 | -------------------------------------------------------------------------------- /atlas_utils/video.py: -------------------------------------------------------------------------------- 1 | import av 2 | import numpy as np 3 | import acl 4 | import time 5 | 6 | import atlas_utils.constants as const 7 | import atlas_utils.utils as utils 8 | import atlas_utils.resource_list as resource_list 9 | import atlas_utils.acl_logger as acl_log 10 | import atlas_utils.dvpp_vdec as dvpp_vdec 11 | from atlas_utils.acl_image import AclImage 12 | import atlas_utils.chanel_id_generator as channel_id_gen 13 | 14 | WAIT_INTERVAL = 0.01 15 | WAIT_READY_MAX = 10 16 | WAIT_FIRST_DECODED_FRAME = 0.02 17 | 18 | DECODE_STATUS_INIT = 0 19 | DECODE_STATUS_READY = 1 20 | DECODE_STATUS_RUNNING = 2 21 | DECODE_STATUS_PYAV_FINISH = 3 22 | DECODE_STATUS_ERROR = 4 23 | DECODE_STATUS_STOP = 5 24 | DECODE_STATUS_EXIT = 6 25 | 26 | 27 | class AclVideo(object): 28 | """Decode video by pyav and pyacl dvpp vdec 29 | This class only support decode annex-b h264 file or rtsp ip camera. 30 | You can use command: 31 | ffmpeg -i aaa.mp4 -codec copy -bsf: h264_mp4toannexb -f h264 aaa.h264 32 | to transform mp4 file to h264 stream file. 33 | If decode rtsp of ip camera or stream pull stream software, make sure 34 | the stream format is annex-b 35 | 36 | Attributes: 37 | _stream_name: video stream name 38 | _input_buffer: dvpp vdec decode input data buffer 39 | _ctx: decode thread acl context, use the same contxt with app 40 | _entype: video stream encode type, dvpp vdec support: 41 | const.ENTYPE_H265_MAIN = 0 H265 main level 42 | const.ENTYPE_H264_BASE = 1 H264 baseline level 43 | const.ENTYPE_H264_MAIN = 2 H264 main level 44 | const.ENTYPE_H264_HIGH = 3 H264 high level 45 | this attributes will read from video stream extradata 46 | _channel_id: dvpp vdec decode channel id parameter, global unique 47 | _vdec: pyacl dvpp vdec instance 48 | _is_opened: the video stream wether open or not 49 | _status: video decoder current status 50 | _run_mode: the device mode 51 | """ 52 | 53 | def __init__(self, strame_name): 54 | self._stream_name = strame_name 55 | self._input_buffer = None 56 | self._vdec = None 57 | self._is_opened = False 58 | self._width = 0 59 | self._height = 0 60 | self._decode_thread_id = None 61 | self._ctx, ret = acl.rt.get_context() 62 | if ret: 63 | acl_log.log_error("Get acl context failed when " 64 | "instance AclVideo, error ", ret) 65 | else: 66 | self._entype = const.ENTYPE_H264_MAIN 67 | self._channel_id = channel_id_gen.gen_unique_channel_id() 68 | self._status = DECODE_STATUS_INIT 69 | self._run_mode, ret = acl.rt.get_run_mode() 70 | if ret: 71 | acl_log.log_error("Get acl run mode failed when " 72 | "instance AclVideo, error ", ret) 73 | else: 74 | self._open() 75 | 76 | def __del__(self): 77 | self.destroy() 78 | 79 | def _open(self): 80 | # Get frame width, height, encode type by pyav 81 | if self._get_param(): 82 | acl_log.log_error("Decode %s failed for get stream " 83 | "parameters error" % (self._stream_name)) 84 | return 85 | 86 | # Create decode thread and prepare to decode 87 | self._decode_thread_id, ret = acl.util.start_thread( 88 | self._decode_thread_entry, []) 89 | if ret: 90 | acl_log.log_error("Create %s decode thread failed, error %d" 91 | % (self._stream_name, ret)) 92 | return 93 | 94 | # Wait decode thread decode ready 95 | for i in range(0, WAIT_READY_MAX): 96 | if self._status == DECODE_STATUS_INIT: 97 | time.sleep(WAIT_INTERVAL) 98 | 99 | if self._status == DECODE_STATUS_READY: 100 | self._is_opened = True 101 | acl_log.log_info("Ready to decode %s..." % (self._stream_name)) 102 | else: 103 | acl_log.log_error("Open %s failed for wait ready timeout" 104 | % (self._stream_name)) 105 | return 106 | 107 | def _get_param(self): 108 | container = av.open(self._stream_name) 109 | stream = [s for s in container.streams if s.type == 'video'] 110 | if len(stream) == 0: 111 | # The stream is not video 112 | acl_log.log_error("%s has no video stream" % (self._stream_name)) 113 | return const.FAILED 114 | 115 | ret, profile = self._get_profile(stream) 116 | if ret: 117 | acl_log.log_error("%s is not annex-b format, decode failed" 118 | % (self._stream_name)) 119 | return const.FAILED 120 | 121 | video_context = container.streams.video[0].codec_context 122 | codec_id_name = video_context.name 123 | ret, self._entype = self._get_entype(codec_id_name, profile) 124 | if ret: 125 | return const.FAILED 126 | 127 | self._width = video_context.width 128 | self._height = video_context.height 129 | 130 | acl_log.log_info( 131 | "Get %s infomation: width %d, height %d, profile %d, " 132 | "codec %s, entype %d" % 133 | (self._stream_name, 134 | self._width, 135 | self._height, 136 | profile, 137 | codec_id_name, 138 | self._entype)) 139 | 140 | container.close() 141 | 142 | return const.SUCCESS 143 | 144 | def _get_profile(self, stream): 145 | # Annex-b format h264 extradata is start with 0x000001 or 0x00000001 146 | extradata = np.frombuffer(stream[0].codec_context.extradata, np.ubyte) 147 | if (extradata[0:3] == [0, 0, 1]).all(): 148 | profile_id = extradata[4] 149 | elif (extradata[0:4] == [0, 0, 0, 1]).all(): 150 | profile_id = extradata[5] 151 | else: 152 | acl_log.log_error("The stream %s is not annex-b h264, " 153 | "can not decode it" % (self._stream_name)) 154 | return const.FAILED, None 155 | 156 | return const.SUCCESS, profile_id 157 | 158 | def _get_entype(self, codec_id_name, profile): 159 | # Dvpp vdec support h264 baseline, main and high level 160 | profile_entype_tbl = { 161 | 'h264': {const.FF_PROFILE_H264_BASELINE: const.ENTYPE_H264_BASE, 162 | const.FF_PROFILE_H264_MAIN: const.ENTYPE_H265_MAIN, 163 | const.FF_PROFILE_H264_HIGH: const.ENTYPE_H264_HIGH}, 164 | 'h265': {const.FF_PROFILE_HEVC_MAIN: const.ENTYPE_H265_MAIN}} 165 | entype = None 166 | ret = const.SUCCESS 167 | 168 | if codec_id_name in profile_entype_tbl.keys(): 169 | entype_tbl = profile_entype_tbl[codec_id_name] 170 | if profile in entype_tbl.keys(): 171 | entype = entype_tbl[profile] 172 | elif codec_id_name == 'h264': 173 | # if not support profile, try to decode as main 174 | entype = const.ENTYPE_H264_MAIN 175 | acl_log.log_error("Unsurpport h264 profile ", profile, 176 | ", decode as main level") 177 | else: 178 | entype = const.ENTYPE_H265_MAIN 179 | acl_log.log_error("Unsurpport h265 profile ", profile, 180 | ", decode as main level") 181 | else: 182 | # Not h264 or h265 183 | ret = const.FAILED 184 | acl_log.log_error("Unsupport codec type ", codec_id_name) 185 | 186 | return ret, entype 187 | 188 | def _pyav_vdec(self): 189 | frame = 0 190 | video = av.open(self._stream_name) 191 | stream = [s for s in video.streams if s.type == 'video'] 192 | acl_log.log_info("Start decode %s frames" % (self._stream_name)) 193 | for packet in video.demux([stream[0]]): 194 | # Get frame data from packet and copy to dvpp 195 | frame_data, data_size = self._prepare_frame_data(packet) 196 | if data_size == 0: 197 | # Last packet size is 0, no frame to decode anymore 198 | break 199 | 200 | if self._vdec.process(frame_data, data_size, 201 | [self._channel_id, frame]): 202 | acl_log.log_error("Dvpp vdec deocde frame %d failed, " 203 | "stop decode" % (frame)) 204 | self._status = DECODE_STATUS_ERROR 205 | break 206 | frame += 1 207 | 208 | # The status chang to stop when app stop decode 209 | if self._status != DECODE_STATUS_RUNNING: 210 | acl_log.log_info("Decode status change to %d, stop decode" 211 | % (self._status)) 212 | break 213 | 214 | def _prepare_frame_data(self, packet): 215 | in_frame_np = np.frombuffer(packet.to_bytes(), np.byte) 216 | size = in_frame_np.size 217 | if size == 0: 218 | # Last frame data is empty 219 | acl_log.log_info("Pyav decode finish") 220 | self._status = DECODE_STATUS_PYAV_FINISH 221 | return None, 0 222 | 223 | in_frame_ptr = acl.util.numpy_to_ptr(in_frame_np) 224 | policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE 225 | if self._run_mode == const.ACL_HOST: 226 | policy = const.ACL_MEMCPY_HOST_TO_DEVICE 227 | ret = acl.rt.memcpy(self._input_buffer, size, in_frame_ptr, size, 228 | policy) 229 | if ret: 230 | acl_log.log_error("Copy data to dvpp failed, policy %d, error %d" 231 | % (policy, ret)) 232 | self._status = DECODE_STATUS_ERROR 233 | return None, 0 234 | 235 | return self._input_buffer, size 236 | 237 | def _decode_thread_entry(self, arg_list): 238 | # Set acl context for decode thread 239 | if self._decode_thread_init(): 240 | acl_log.log_error("Decode thread init failed") 241 | return const.FAILED 242 | 243 | self._status = DECODE_STATUS_READY 244 | while (self._status == DECODE_STATUS_READY): 245 | time.sleep(WAIT_INTERVAL) 246 | 247 | self._pyav_vdec() 248 | self._decode_thread_join() 249 | 250 | return const.SUCCESS 251 | 252 | def _decode_thread_init(self): 253 | # Set acl context for decode thread 254 | ret = acl.rt.set_context(self._ctx) 255 | if ret: 256 | acl_log.log_error("%s decode thread init dvpp vdec failed") 257 | return const.FAILED 258 | 259 | # Instance dvpp vdec and init it 260 | self._vdec = dvpp_vdec.DvppVdec(self._channel_id, self._width, 261 | self._height, self._entype, self._ctx) 262 | if self._vdec.init(): 263 | acl_log.log_error("%s decode thread init dvpp vdec failed" 264 | % (self._stream_name)) 265 | return const.FAILED 266 | 267 | # Malloc dvpp vdec decode input dvpp memory 268 | self._input_buffer, ret = acl.media.dvpp_malloc( 269 | utils.rgbu8_size(self._width, self._height)) 270 | if ret: 271 | acl_log.log_error("%s decode thread malloc input memory failed, " 272 | "error %d. frame width %d, height %d, size %d" 273 | % (self._stream_name, ret, 274 | self._width, self._height, 275 | utils.rgbu8_size(self._width, self._height))) 276 | return const.FAILED 277 | 278 | return const.SUCCESS 279 | 280 | def _decode_thread_join(self): 281 | self.destroy() 282 | # Wait all decoded frame token off by read() 283 | while self._status < DECODE_STATUS_STOP: 284 | time.sleep(WAIT_INTERVAL) 285 | self._status = DECODE_STATUS_EXIT 286 | 287 | def is_finished(self): 288 | """Decode finished 289 | Pyav and dvpp vdec decoded all frame, and all deocde frames were 290 | token off. When read() return success but image is none, use this to 291 | confirm decode finished 292 | """ 293 | return self._status == DECODE_STATUS_EXIT 294 | 295 | def read(self, no_wait=False): 296 | """Read decoded frame 297 | Args: 298 | no_wait: Get image without wait. If set this arg True, and 299 | return image is None, should call is_finished() method 300 | to confirm decode finish or failed 301 | 302 | Returns: 303 | 1. const.SUCCESS, not None: get image success 304 | 2. const.SUCCESS, None: all frames decoded and be token off 305 | 3. const.FAILED, None: Has frame not decoded, but no image decoded, 306 | it means decode video failed 307 | """ 308 | # Pyav and dvpp vdec decoded all frame, 309 | # and all deocde frames were token off 310 | if self._status == DECODE_STATUS_EXIT: 311 | return const.SUCCESS, None 312 | 313 | # When call read first time, the decode thread only ready to decode, 314 | # but not decoding already. Set status to DECODE_STATUS_RUNNING will 315 | # cause pyav and dvpp vdec start decode actually 316 | if self._status == DECODE_STATUS_READY: 317 | self._status = DECODE_STATUS_RUNNING 318 | # The decode just begin, need wait the first frame to be decoded 319 | time.sleep(WAIT_FIRST_DECODED_FRAME) 320 | 321 | ret, image = self._vdec.read(no_wait) 322 | 323 | # Decode finish or stopped, and all decode frames were token off 324 | if (image is None) and (self._status > DECODE_STATUS_RUNNING): 325 | self._status = DECODE_STATUS_EXIT 326 | 327 | return ret, image 328 | 329 | def destroy(self): 330 | """Release all decode resource""" 331 | if self._vdec is not None: 332 | self._vdec.destroy() 333 | if self._input_buffer is not None: 334 | acl.media.dvpp_free(self._input_buffer) 335 | self._input_buffer = None 336 | 337 | -------------------------------------------------------------------------------- /atlas_utils/acl_dvpp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (R) @huawei.com, all rights reserved 3 | -*- coding:utf-8 -*- 4 | CREATED: 2021-01-20 20:12:13 5 | MODIFIED: 2021-01-29 14:04:45 6 | """ 7 | import numpy as np 8 | import acl 9 | import atlas_utils.utils as utils 10 | from atlas_utils.acl_image import AclImage 11 | from atlas_utils.acl_logger import log_error, log_info 12 | from atlas_utils.resource_list import resource_list 13 | import atlas_utils.constants as constants 14 | 15 | 16 | class Dvpp(object): 17 | """ 18 | dvpp class 19 | """ 20 | 21 | def __init__(self, acl_resource=None): 22 | if acl_resource is None: 23 | self._stream, ret = acl.rt.create_stream() 24 | utils.check_ret("acl.rt.create_stream", ret) 25 | self._run_mode, ret = acl.rt.get_run_mode() 26 | utils.check_ret("acl.rt.get_run_mode", ret) 27 | else: 28 | self._stream = acl_resource.stream 29 | self._run_mode = acl_resource.run_mode 30 | self._dvpp_channel_desc = None 31 | self._crop_config = None 32 | self._paste_config = None 33 | 34 | self._init_resource() 35 | 36 | # Dvpp involves acl resources, which need to be released \ 37 | # before the acl ends when the program exits, \ 38 | # register here to the resource table to ensure the release timing 39 | self._is_destroyed = False 40 | resource_list.register(self) 41 | 42 | def _init_resource(self): 43 | # Create dvpp channel 44 | self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc() 45 | ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc) 46 | utils.check_ret("acl.media.dvpp_create_channel", ret) 47 | 48 | # Create a resize configuration 49 | self._resize_config = acl.media.dvpp_create_resize_config() 50 | 51 | # Create yuv to jpeg configuration 52 | self._jpege_config = acl.media.dvpp_create_jpege_config() 53 | ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100) 54 | utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret) 55 | 56 | def _gen_input_pic_desc(self, image, 57 | width_align_factor=16, height_align_factor=2): 58 | # Create input image 59 | stride_width = utils.align_up(image.width, width_align_factor) 60 | stride_height = utils.align_up(image.height, height_align_factor) 61 | 62 | pic_desc = acl.media.dvpp_create_pic_desc() 63 | acl.media.dvpp_set_pic_desc_data(pic_desc, image.data()) 64 | acl.media.dvpp_set_pic_desc_format( 65 | pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) 66 | acl.media.dvpp_set_pic_desc_width(pic_desc, image.width) 67 | acl.media.dvpp_set_pic_desc_height(pic_desc, image.height) 68 | acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width) 69 | acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height) 70 | acl.media.dvpp_set_pic_desc_size(pic_desc, image.size) 71 | 72 | return pic_desc 73 | 74 | def _gen_output_pic_desc(self, width, height, 75 | output_buffer, output_buffer_size, 76 | width_align_factor=16, height_align_factor=2): 77 | # Create output image 78 | stride_width = utils.align_up(width, width_align_factor) 79 | stride_height = utils.align_up(height, height_align_factor) 80 | 81 | pic_desc = acl.media.dvpp_create_pic_desc() 82 | acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer) 83 | acl.media.dvpp_set_pic_desc_format( 84 | pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) 85 | acl.media.dvpp_set_pic_desc_width(pic_desc, width) 86 | acl.media.dvpp_set_pic_desc_height(pic_desc, height) 87 | acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width) 88 | acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height) 89 | acl.media.dvpp_set_pic_desc_size(pic_desc, output_buffer_size) 90 | 91 | return pic_desc 92 | 93 | def _stride_yuv_size(self, width, height, 94 | width_align_factor=16, height_align_factor=2): 95 | stride_width = utils.align_up(width, width_align_factor) 96 | stride_height = utils.align_up(height, height_align_factor) 97 | stride_size = utils.yuv420sp_size(stride_width, stride_height) 98 | 99 | return stride_width, stride_height, stride_size 100 | 101 | def jpegd(self, image): 102 | """ 103 | jepg image to yuv image 104 | """ 105 | # Create conversion output image desc 106 | output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image) 107 | ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc, 108 | image.data(), 109 | image.size, 110 | output_desc, 111 | self._stream) 112 | if ret != constants.ACL_ERROR_NONE: 113 | log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) 114 | return None 115 | 116 | ret = acl.rt.synchronize_stream(self._stream) 117 | if ret != constants.ACL_ERROR_NONE: 118 | log_error("dvpp_jpeg_decode_async failed ret={}".format(ret)) 119 | return None 120 | 121 | # Return the decoded AclImage instance 122 | stride_width = utils.align_up128(image.width) 123 | stride_height = utils.align_up16(image.height) 124 | stride_size = utils.yuv420sp_size(stride_width, stride_height) 125 | return AclImage(out_buffer, stride_width, 126 | stride_height, stride_size, constants.MEMORY_DVPP) 127 | 128 | def _gen_jpegd_out_pic_desc(self, image): 129 | # Predict the memory size required to decode jpeg into yuv pictures 130 | ret, out_buffer_size = self._get_jpegd_memory_size(image) 131 | if not ret: 132 | return None 133 | # Apply for memory for storing decoded yuv pictures 134 | out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) 135 | if ret != constants.ACL_ERROR_NONE: 136 | log_error("Dvpp malloc failed, error: ", ret) 137 | return None 138 | # Create output image desc 139 | pic_desc = self._gen_output_pic_desc( 140 | image.width, 141 | image.height, 142 | out_buffer, 143 | out_buffer_size, 144 | width_align_factor=128, 145 | height_align_factor=16) 146 | return pic_desc, out_buffer 147 | 148 | def _get_jpegd_memory_size(self, image): 149 | if image.is_local(): 150 | size, ret = acl.media.dvpp_jpeg_predict_dec_size( 151 | image.data(), image.size, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420) 152 | if ret != constants.ACL_ERROR_NONE: 153 | log_error("Predict jpeg decode size failed, return ", ret) 154 | return False, 0 155 | return True, size 156 | else: 157 | return True, int( 158 | utils.yuv420sp_size( 159 | image.width, image.height) * 3) 160 | 161 | def resize(self, image, resize_width, resize_height): 162 | """ 163 | Scale yuvsp420 picture to specified size 164 | """ 165 | # Generate input picture desc 166 | input_desc = self._gen_input_pic_desc(image) 167 | # Calculate the image size after scaling 168 | stride_width = utils.align_up16(resize_width) 169 | stride_height = utils.align_up2(resize_height) 170 | output_size = utils.yuv420sp_size(stride_width, stride_height) 171 | # Request memory for the zoomed picture 172 | out_buffer, ret = acl.media.dvpp_malloc(output_size) 173 | if ret != constants.ACL_ERROR_NONE: 174 | log_error("Dvpp malloc failed, error: ", ret) 175 | return None 176 | # Create output image 177 | output_desc = self._gen_output_pic_desc(resize_width, resize_height, 178 | out_buffer, output_size) 179 | if output_desc is None: 180 | log_error("Gen resize output desc failed") 181 | return None 182 | # Call dvpp asynchronous zoom interface to zoom pictures 183 | ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc, 184 | input_desc, 185 | output_desc, 186 | self._resize_config, 187 | self._stream) 188 | if ret != constants.ACL_ERROR_NONE: 189 | log_error("Vpc resize async failed, error: ", ret) 190 | return None 191 | # Wait for the zoom operation to complete 192 | ret = acl.rt.synchronize_stream(self._stream) 193 | if ret != constants.ACL_ERROR_NONE: 194 | log_error("Resize synchronize stream failed, error: ", ret) 195 | return None 196 | # Release the resources requested for scaling 197 | acl.media.dvpp_destroy_pic_desc(input_desc) 198 | acl.media.dvpp_destroy_pic_desc(output_desc) 199 | return AclImage(out_buffer, stride_width, 200 | stride_height, output_size, constants.MEMORY_DVPP) 201 | 202 | def _gen_resize_out_pic_desc(self, resize_width, 203 | resize_height, output_size): 204 | out_buffer, ret = acl.media.dvpp_malloc(output_size) 205 | if ret != constants.ACL_ERROR_NONE: 206 | log_error("Dvpp malloc failed, error: ", ret) 207 | return None 208 | pic_desc = self._gen_output_pic_desc(resize_width, resize_height, 209 | out_buffer, output_size) 210 | return pic_desc, out_buffer 211 | 212 | def crop_and_paste( 213 | self, 214 | image, 215 | width, 216 | height, 217 | crop_and_paste_width, 218 | crop_and_paste_height): 219 | """ 220 | crop_and_paste 221 | """ 222 | print('[Dvpp] vpc crop and paste stage:') 223 | input_desc = self._gen_input_pic_desc(image) 224 | stride_width = utils.align_up16(crop_and_paste_width) 225 | stride_height = utils.align_up2(crop_and_paste_height) 226 | out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) 227 | out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) 228 | output_desc = self._gen_output_pic_desc( 229 | crop_and_paste_width, 230 | crop_and_paste_height, 231 | out_buffer, 232 | out_buffer_size) 233 | self._crop_config = acl.media.dvpp_create_roi_config( 234 | 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) 235 | # set crop area: 236 | rx = float(width) / float(crop_and_paste_width) 237 | ry = float(height) / float(crop_and_paste_height) 238 | if rx > ry: 239 | dx = 0 240 | r = rx 241 | dy = int((crop_and_paste_height - height / r) / 2) 242 | else: 243 | dy = 0 244 | r = ry 245 | dx = int((crop_and_paste_width - width / r) / 2) 246 | pasteRightOffset = int(crop_and_paste_width - 2 * dx) 247 | pasteBottomOffset = int(crop_and_paste_height - 2 * dy) 248 | if (pasteRightOffset % 2) == 0: 249 | pasteRightOffset = pasteRightOffset - 1 250 | if (pasteBottomOffset % 2) == 0: 251 | pasteBottomOffset = pasteBottomOffset - 1 252 | self._paste_config = acl.media.dvpp_create_roi_config( 253 | 0, pasteRightOffset, 0, pasteBottomOffset) 254 | ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, 255 | input_desc, 256 | output_desc, 257 | self._crop_config, 258 | self._paste_config, 259 | self._stream) 260 | utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) 261 | ret = acl.rt.synchronize_stream(self._stream) 262 | utils.check_ret("acl.rt.synchronize_stream", ret) 263 | print('[Dvpp] vpc crop and paste stage success') 264 | stride_width = crop_and_paste_width - 2 * dx 265 | stride_height = crop_and_paste_height - 2 * dy 266 | #stride_width = utils.align_up16(crop_and_paste_width) 267 | #stride_height = utils.align_up2(crop_and_paste_height) 268 | 269 | return AclImage(out_buffer, stride_width, 270 | stride_height, out_buffer_size, constants.MEMORY_DVPP) 271 | 272 | def crop_and_paste_get_roi( 273 | self, 274 | image, 275 | width, 276 | height, 277 | crop_and_paste_width, 278 | crop_and_paste_height): 279 | """ 280 | :image: input image 281 | :width: input image width 282 | :height: input image height 283 | :crop_and_paste_width: crop_and_paste_width 284 | :crop_and_paste_height: crop_and_paste_height 285 | :return: return AclImage 286 | """ 287 | print('[Dvpp] vpc crop and paste stage:') 288 | input_desc = self._gen_input_pic_desc(image) 289 | stride_width = utils.align_up16(crop_and_paste_width) 290 | stride_height = utils.align_up2(crop_and_paste_height) 291 | out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) 292 | out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) 293 | output_desc = self._gen_output_pic_desc( 294 | crop_and_paste_width, 295 | crop_and_paste_height, 296 | out_buffer, 297 | out_buffer_size) 298 | self._crop_config = acl.media.dvpp_create_roi_config( 299 | 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) 300 | self._paste_config = acl.media.dvpp_create_roi_config( 301 | 0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1) 302 | ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, 303 | input_desc, 304 | output_desc, 305 | self._crop_config, 306 | self._paste_config, 307 | self._stream) 308 | utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) 309 | ret = acl.rt.synchronize_stream(self._stream) 310 | utils.check_ret("acl.rt.synchronize_stream", ret) 311 | print('[Dvpp] vpc crop and paste stage success') 312 | stride_width = utils.align_up16(crop_and_paste_width) 313 | stride_height = utils.align_up2(crop_and_paste_height) 314 | return AclImage(out_buffer, stride_width, 315 | stride_height, out_buffer_size, constants.MEMORY_DVPP) 316 | 317 | def jpege(self, image): 318 | """ 319 | Convert yuv420sp pictures to jpeg pictures 320 | """ 321 | # create input image 322 | input_desc = self._gen_input_pic_desc(image) 323 | # Predict the memory size required for conversion 324 | output_size, ret = acl.media.dvpp_jpeg_predict_enc_size( 325 | input_desc, self._jpege_config) 326 | if (ret != constants.ACL_ERROR_NONE): 327 | log_error("Predict jpege output size failed") 328 | return None 329 | # Request memory required for conversion 330 | output_buffer, ret = acl.media.dvpp_malloc(output_size) 331 | if (ret != constants.ACL_ERROR_NONE): 332 | log_error("Malloc jpege output memory failed") 333 | return None 334 | output_size_array = np.array([output_size], dtype=np.int32) 335 | output_size_ptr = acl.util.numpy_to_ptr(output_size_array) 336 | 337 | # Call jpege asynchronous interface to convert pictures 338 | ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc, 339 | input_desc, output_buffer, 340 | output_size_ptr, 341 | self._jpege_config, 342 | self._stream) 343 | if (ret != constants.ACL_ERROR_NONE): 344 | log_error("Jpege failed, ret ", ret) 345 | return None 346 | # Wait for the conversion to complete 347 | ret = acl.rt.synchronize_stream(self._stream) 348 | if (ret != constants.ACL_ERROR_NONE): 349 | print("Jpege synchronize stream, failed, ret ", ret) 350 | return None 351 | # Release resources 352 | acl.media.dvpp_destroy_pic_desc(input_desc) 353 | return AclImage( 354 | output_buffer, image.width, image.height, int( 355 | output_size_array[0]), constants.MEMORY_DVPP) 356 | 357 | def destroy(self): 358 | """ 359 | dvpp resource release 360 | """ 361 | if self._is_destroyed: 362 | return 363 | 364 | if self._resize_config: 365 | acl.media.dvpp_destroy_resize_config(self._resize_config) 366 | 367 | if self._dvpp_channel_desc: 368 | acl.media.dvpp_destroy_channel(self._dvpp_channel_desc) 369 | acl.media.dvpp_destroy_channel_desc(self._dvpp_channel_desc) 370 | 371 | if self._jpege_config: 372 | acl.media.dvpp_destroy_jpege_config(self._jpege_config) 373 | self._is_destroyed = True 374 | resource_list.unregister(self) 375 | log_info("dvpp resource release success") 376 | 377 | def __del__(self): 378 | self.destroy() 379 | -------------------------------------------------------------------------------- /atlas_utils/presenteragent/presenter_message_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: presenter_message.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf.internal import enum_type_wrapper 7 | from google.protobuf import descriptor as _descriptor 8 | from google.protobuf import message as _message 9 | from google.protobuf import reflection as _reflection 10 | from google.protobuf import symbol_database as _symbol_database 11 | from google.protobuf import descriptor_pb2 12 | # @@protoc_insertion_point(imports) 13 | 14 | _sym_db = _symbol_database.Default() 15 | 16 | 17 | 18 | 19 | DESCRIPTOR = _descriptor.FileDescriptor( 20 | name='presenter_message.proto', 21 | package='ascend.presenter.proto', 22 | syntax='proto3', 23 | serialized_pb=_b('\n\x17presenter_message.proto\x12\x16\x61scend.presenter.proto\"l\n\x12OpenChannelRequest\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t\x12@\n\x0c\x63ontent_type\x18\x02 \x01(\x0e\x32*.ascend.presenter.proto.ChannelContentType\"n\n\x13OpenChannelResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.OpenChannelErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\x12\n\x10HeartbeatMessage\"\"\n\nCoordinate\x12\t\n\x01x\x18\x01 \x01(\r\x12\t\n\x01y\x18\x02 \x01(\r\"\x94\x01\n\x0eRectangle_Attr\x12\x34\n\x08left_top\x18\x01 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x38\n\x0cright_bottom\x18\x02 \x01(\x0b\x32\".ascend.presenter.proto.Coordinate\x12\x12\n\nlabel_text\x18\x03 \x01(\t\"\xb7\x01\n\x13PresentImageRequest\x12\x33\n\x06\x66ormat\x18\x01 \x01(\x0e\x32#.ascend.presenter.proto.ImageFormat\x12\r\n\x05width\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12>\n\x0erectangle_list\x18\x05 \x03(\x0b\x32&.ascend.presenter.proto.Rectangle_Attr\"o\n\x14PresentImageResponse\x12@\n\nerror_code\x18\x01 \x01(\x0e\x32,.ascend.presenter.proto.PresentDataErrorCode\x12\x15\n\rerror_message\x18\x02 \x01(\t*\xa5\x01\n\x14OpenChannelErrorCode\x12\x19\n\x15kOpenChannelErrorNone\x10\x00\x12\"\n\x1ekOpenChannelErrorNoSuchChannel\x10\x01\x12)\n%kOpenChannelErrorChannelAlreadyOpened\x10\x02\x12#\n\x16kOpenChannelErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01*P\n\x12\x43hannelContentType\x12\x1c\n\x18kChannelContentTypeImage\x10\x00\x12\x1c\n\x18kChannelContentTypeVideo\x10\x01*#\n\x0bImageFormat\x12\x14\n\x10kImageFormatJpeg\x10\x00*\xa4\x01\n\x14PresentDataErrorCode\x12\x19\n\x15kPresentDataErrorNone\x10\x00\x12$\n kPresentDataErrorUnsupportedType\x10\x01\x12&\n\"kPresentDataErrorUnsupportedFormat\x10\x02\x12#\n\x16kPresentDataErrorOther\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x62\x06proto3') 24 | ) 25 | 26 | _OPENCHANNELERRORCODE = _descriptor.EnumDescriptor( 27 | name='OpenChannelErrorCode', 28 | full_name='ascend.presenter.proto.OpenChannelErrorCode', 29 | filename=None, 30 | file=DESCRIPTOR, 31 | values=[ 32 | _descriptor.EnumValueDescriptor( 33 | name='kOpenChannelErrorNone', index=0, number=0, 34 | options=None, 35 | type=None), 36 | _descriptor.EnumValueDescriptor( 37 | name='kOpenChannelErrorNoSuchChannel', index=1, number=1, 38 | options=None, 39 | type=None), 40 | _descriptor.EnumValueDescriptor( 41 | name='kOpenChannelErrorChannelAlreadyOpened', index=2, number=2, 42 | options=None, 43 | type=None), 44 | _descriptor.EnumValueDescriptor( 45 | name='kOpenChannelErrorOther', index=3, number=-1, 46 | options=None, 47 | type=None), 48 | ], 49 | containing_type=None, 50 | options=None, 51 | serialized_start=780, 52 | serialized_end=945, 53 | ) 54 | _sym_db.RegisterEnumDescriptor(_OPENCHANNELERRORCODE) 55 | 56 | OpenChannelErrorCode = enum_type_wrapper.EnumTypeWrapper(_OPENCHANNELERRORCODE) 57 | _CHANNELCONTENTTYPE = _descriptor.EnumDescriptor( 58 | name='ChannelContentType', 59 | full_name='ascend.presenter.proto.ChannelContentType', 60 | filename=None, 61 | file=DESCRIPTOR, 62 | values=[ 63 | _descriptor.EnumValueDescriptor( 64 | name='kChannelContentTypeImage', index=0, number=0, 65 | options=None, 66 | type=None), 67 | _descriptor.EnumValueDescriptor( 68 | name='kChannelContentTypeVideo', index=1, number=1, 69 | options=None, 70 | type=None), 71 | ], 72 | containing_type=None, 73 | options=None, 74 | serialized_start=947, 75 | serialized_end=1027, 76 | ) 77 | _sym_db.RegisterEnumDescriptor(_CHANNELCONTENTTYPE) 78 | 79 | ChannelContentType = enum_type_wrapper.EnumTypeWrapper(_CHANNELCONTENTTYPE) 80 | _IMAGEFORMAT = _descriptor.EnumDescriptor( 81 | name='ImageFormat', 82 | full_name='ascend.presenter.proto.ImageFormat', 83 | filename=None, 84 | file=DESCRIPTOR, 85 | values=[ 86 | _descriptor.EnumValueDescriptor( 87 | name='kImageFormatJpeg', index=0, number=0, 88 | options=None, 89 | type=None), 90 | ], 91 | containing_type=None, 92 | options=None, 93 | serialized_start=1029, 94 | serialized_end=1064, 95 | ) 96 | _sym_db.RegisterEnumDescriptor(_IMAGEFORMAT) 97 | 98 | ImageFormat = enum_type_wrapper.EnumTypeWrapper(_IMAGEFORMAT) 99 | _PRESENTDATAERRORCODE = _descriptor.EnumDescriptor( 100 | name='PresentDataErrorCode', 101 | full_name='ascend.presenter.proto.PresentDataErrorCode', 102 | filename=None, 103 | file=DESCRIPTOR, 104 | values=[ 105 | _descriptor.EnumValueDescriptor( 106 | name='kPresentDataErrorNone', index=0, number=0, 107 | options=None, 108 | type=None), 109 | _descriptor.EnumValueDescriptor( 110 | name='kPresentDataErrorUnsupportedType', index=1, number=1, 111 | options=None, 112 | type=None), 113 | _descriptor.EnumValueDescriptor( 114 | name='kPresentDataErrorUnsupportedFormat', index=2, number=2, 115 | options=None, 116 | type=None), 117 | _descriptor.EnumValueDescriptor( 118 | name='kPresentDataErrorOther', index=3, number=-1, 119 | options=None, 120 | type=None), 121 | ], 122 | containing_type=None, 123 | options=None, 124 | serialized_start=1067, 125 | serialized_end=1231, 126 | ) 127 | _sym_db.RegisterEnumDescriptor(_PRESENTDATAERRORCODE) 128 | 129 | PresentDataErrorCode = enum_type_wrapper.EnumTypeWrapper(_PRESENTDATAERRORCODE) 130 | kOpenChannelErrorNone = 0 131 | kOpenChannelErrorNoSuchChannel = 1 132 | kOpenChannelErrorChannelAlreadyOpened = 2 133 | kOpenChannelErrorOther = -1 134 | kChannelContentTypeImage = 0 135 | kChannelContentTypeVideo = 1 136 | kImageFormatJpeg = 0 137 | kPresentDataErrorNone = 0 138 | kPresentDataErrorUnsupportedType = 1 139 | kPresentDataErrorUnsupportedFormat = 2 140 | kPresentDataErrorOther = -1 141 | 142 | 143 | 144 | _OPENCHANNELREQUEST = _descriptor.Descriptor( 145 | name='OpenChannelRequest', 146 | full_name='ascend.presenter.proto.OpenChannelRequest', 147 | filename=None, 148 | file=DESCRIPTOR, 149 | containing_type=None, 150 | fields=[ 151 | _descriptor.FieldDescriptor( 152 | name='channel_name', full_name='ascend.presenter.proto.OpenChannelRequest.channel_name', index=0, 153 | number=1, type=9, cpp_type=9, label=1, 154 | has_default_value=False, default_value=_b("").decode('utf-8'), 155 | message_type=None, enum_type=None, containing_type=None, 156 | is_extension=False, extension_scope=None, 157 | options=None, file=DESCRIPTOR), 158 | _descriptor.FieldDescriptor( 159 | name='content_type', full_name='ascend.presenter.proto.OpenChannelRequest.content_type', index=1, 160 | number=2, type=14, cpp_type=8, label=1, 161 | has_default_value=False, default_value=0, 162 | message_type=None, enum_type=None, containing_type=None, 163 | is_extension=False, extension_scope=None, 164 | options=None, file=DESCRIPTOR), 165 | ], 166 | extensions=[ 167 | ], 168 | nested_types=[], 169 | enum_types=[ 170 | ], 171 | options=None, 172 | is_extendable=False, 173 | syntax='proto3', 174 | extension_ranges=[], 175 | oneofs=[ 176 | ], 177 | serialized_start=51, 178 | serialized_end=159, 179 | ) 180 | 181 | 182 | _OPENCHANNELRESPONSE = _descriptor.Descriptor( 183 | name='OpenChannelResponse', 184 | full_name='ascend.presenter.proto.OpenChannelResponse', 185 | filename=None, 186 | file=DESCRIPTOR, 187 | containing_type=None, 188 | fields=[ 189 | _descriptor.FieldDescriptor( 190 | name='error_code', full_name='ascend.presenter.proto.OpenChannelResponse.error_code', index=0, 191 | number=1, type=14, cpp_type=8, label=1, 192 | has_default_value=False, default_value=0, 193 | message_type=None, enum_type=None, containing_type=None, 194 | is_extension=False, extension_scope=None, 195 | options=None, file=DESCRIPTOR), 196 | _descriptor.FieldDescriptor( 197 | name='error_message', full_name='ascend.presenter.proto.OpenChannelResponse.error_message', index=1, 198 | number=2, type=9, cpp_type=9, label=1, 199 | has_default_value=False, default_value=_b("").decode('utf-8'), 200 | message_type=None, enum_type=None, containing_type=None, 201 | is_extension=False, extension_scope=None, 202 | options=None, file=DESCRIPTOR), 203 | ], 204 | extensions=[ 205 | ], 206 | nested_types=[], 207 | enum_types=[ 208 | ], 209 | options=None, 210 | is_extendable=False, 211 | syntax='proto3', 212 | extension_ranges=[], 213 | oneofs=[ 214 | ], 215 | serialized_start=161, 216 | serialized_end=271, 217 | ) 218 | 219 | 220 | _HEARTBEATMESSAGE = _descriptor.Descriptor( 221 | name='HeartbeatMessage', 222 | full_name='ascend.presenter.proto.HeartbeatMessage', 223 | filename=None, 224 | file=DESCRIPTOR, 225 | containing_type=None, 226 | fields=[ 227 | ], 228 | extensions=[ 229 | ], 230 | nested_types=[], 231 | enum_types=[ 232 | ], 233 | options=None, 234 | is_extendable=False, 235 | syntax='proto3', 236 | extension_ranges=[], 237 | oneofs=[ 238 | ], 239 | serialized_start=273, 240 | serialized_end=291, 241 | ) 242 | 243 | 244 | _COORDINATE = _descriptor.Descriptor( 245 | name='Coordinate', 246 | full_name='ascend.presenter.proto.Coordinate', 247 | filename=None, 248 | file=DESCRIPTOR, 249 | containing_type=None, 250 | fields=[ 251 | _descriptor.FieldDescriptor( 252 | name='x', full_name='ascend.presenter.proto.Coordinate.x', index=0, 253 | number=1, type=13, cpp_type=3, label=1, 254 | has_default_value=False, default_value=0, 255 | message_type=None, enum_type=None, containing_type=None, 256 | is_extension=False, extension_scope=None, 257 | options=None, file=DESCRIPTOR), 258 | _descriptor.FieldDescriptor( 259 | name='y', full_name='ascend.presenter.proto.Coordinate.y', index=1, 260 | number=2, type=13, cpp_type=3, label=1, 261 | has_default_value=False, default_value=0, 262 | message_type=None, enum_type=None, containing_type=None, 263 | is_extension=False, extension_scope=None, 264 | options=None, file=DESCRIPTOR), 265 | ], 266 | extensions=[ 267 | ], 268 | nested_types=[], 269 | enum_types=[ 270 | ], 271 | options=None, 272 | is_extendable=False, 273 | syntax='proto3', 274 | extension_ranges=[], 275 | oneofs=[ 276 | ], 277 | serialized_start=293, 278 | serialized_end=327, 279 | ) 280 | 281 | 282 | _RECTANGLE_ATTR = _descriptor.Descriptor( 283 | name='Rectangle_Attr', 284 | full_name='ascend.presenter.proto.Rectangle_Attr', 285 | filename=None, 286 | file=DESCRIPTOR, 287 | containing_type=None, 288 | fields=[ 289 | _descriptor.FieldDescriptor( 290 | name='left_top', full_name='ascend.presenter.proto.Rectangle_Attr.left_top', index=0, 291 | number=1, type=11, cpp_type=10, label=1, 292 | has_default_value=False, default_value=None, 293 | message_type=None, enum_type=None, containing_type=None, 294 | is_extension=False, extension_scope=None, 295 | options=None, file=DESCRIPTOR), 296 | _descriptor.FieldDescriptor( 297 | name='right_bottom', full_name='ascend.presenter.proto.Rectangle_Attr.right_bottom', index=1, 298 | number=2, type=11, cpp_type=10, label=1, 299 | has_default_value=False, default_value=None, 300 | message_type=None, enum_type=None, containing_type=None, 301 | is_extension=False, extension_scope=None, 302 | options=None, file=DESCRIPTOR), 303 | _descriptor.FieldDescriptor( 304 | name='label_text', full_name='ascend.presenter.proto.Rectangle_Attr.label_text', index=2, 305 | number=3, type=9, cpp_type=9, label=1, 306 | has_default_value=False, default_value=_b("").decode('utf-8'), 307 | message_type=None, enum_type=None, containing_type=None, 308 | is_extension=False, extension_scope=None, 309 | options=None, file=DESCRIPTOR), 310 | ], 311 | extensions=[ 312 | ], 313 | nested_types=[], 314 | enum_types=[ 315 | ], 316 | options=None, 317 | is_extendable=False, 318 | syntax='proto3', 319 | extension_ranges=[], 320 | oneofs=[ 321 | ], 322 | serialized_start=330, 323 | serialized_end=478, 324 | ) 325 | 326 | 327 | _PRESENTIMAGEREQUEST = _descriptor.Descriptor( 328 | name='PresentImageRequest', 329 | full_name='ascend.presenter.proto.PresentImageRequest', 330 | filename=None, 331 | file=DESCRIPTOR, 332 | containing_type=None, 333 | fields=[ 334 | _descriptor.FieldDescriptor( 335 | name='format', full_name='ascend.presenter.proto.PresentImageRequest.format', index=0, 336 | number=1, type=14, cpp_type=8, label=1, 337 | has_default_value=False, default_value=0, 338 | message_type=None, enum_type=None, containing_type=None, 339 | is_extension=False, extension_scope=None, 340 | options=None, file=DESCRIPTOR), 341 | _descriptor.FieldDescriptor( 342 | name='width', full_name='ascend.presenter.proto.PresentImageRequest.width', index=1, 343 | number=2, type=13, cpp_type=3, label=1, 344 | has_default_value=False, default_value=0, 345 | message_type=None, enum_type=None, containing_type=None, 346 | is_extension=False, extension_scope=None, 347 | options=None, file=DESCRIPTOR), 348 | _descriptor.FieldDescriptor( 349 | name='height', full_name='ascend.presenter.proto.PresentImageRequest.height', index=2, 350 | number=3, type=13, cpp_type=3, label=1, 351 | has_default_value=False, default_value=0, 352 | message_type=None, enum_type=None, containing_type=None, 353 | is_extension=False, extension_scope=None, 354 | options=None, file=DESCRIPTOR), 355 | _descriptor.FieldDescriptor( 356 | name='data', full_name='ascend.presenter.proto.PresentImageRequest.data', index=3, 357 | number=4, type=12, cpp_type=9, label=1, 358 | has_default_value=False, default_value=_b(""), 359 | message_type=None, enum_type=None, containing_type=None, 360 | is_extension=False, extension_scope=None, 361 | options=None, file=DESCRIPTOR), 362 | _descriptor.FieldDescriptor( 363 | name='rectangle_list', full_name='ascend.presenter.proto.PresentImageRequest.rectangle_list', index=4, 364 | number=5, type=11, cpp_type=10, label=3, 365 | has_default_value=False, default_value=[], 366 | message_type=None, enum_type=None, containing_type=None, 367 | is_extension=False, extension_scope=None, 368 | options=None, file=DESCRIPTOR), 369 | ], 370 | extensions=[ 371 | ], 372 | nested_types=[], 373 | enum_types=[ 374 | ], 375 | options=None, 376 | is_extendable=False, 377 | syntax='proto3', 378 | extension_ranges=[], 379 | oneofs=[ 380 | ], 381 | serialized_start=481, 382 | serialized_end=664, 383 | ) 384 | 385 | 386 | _PRESENTIMAGERESPONSE = _descriptor.Descriptor( 387 | name='PresentImageResponse', 388 | full_name='ascend.presenter.proto.PresentImageResponse', 389 | filename=None, 390 | file=DESCRIPTOR, 391 | containing_type=None, 392 | fields=[ 393 | _descriptor.FieldDescriptor( 394 | name='error_code', full_name='ascend.presenter.proto.PresentImageResponse.error_code', index=0, 395 | number=1, type=14, cpp_type=8, label=1, 396 | has_default_value=False, default_value=0, 397 | message_type=None, enum_type=None, containing_type=None, 398 | is_extension=False, extension_scope=None, 399 | options=None, file=DESCRIPTOR), 400 | _descriptor.FieldDescriptor( 401 | name='error_message', full_name='ascend.presenter.proto.PresentImageResponse.error_message', index=1, 402 | number=2, type=9, cpp_type=9, label=1, 403 | has_default_value=False, default_value=_b("").decode('utf-8'), 404 | message_type=None, enum_type=None, containing_type=None, 405 | is_extension=False, extension_scope=None, 406 | options=None, file=DESCRIPTOR), 407 | ], 408 | extensions=[ 409 | ], 410 | nested_types=[], 411 | enum_types=[ 412 | ], 413 | options=None, 414 | is_extendable=False, 415 | syntax='proto3', 416 | extension_ranges=[], 417 | oneofs=[ 418 | ], 419 | serialized_start=666, 420 | serialized_end=777, 421 | ) 422 | 423 | _OPENCHANNELREQUEST.fields_by_name['content_type'].enum_type = _CHANNELCONTENTTYPE 424 | _OPENCHANNELRESPONSE.fields_by_name['error_code'].enum_type = _OPENCHANNELERRORCODE 425 | _RECTANGLE_ATTR.fields_by_name['left_top'].message_type = _COORDINATE 426 | _RECTANGLE_ATTR.fields_by_name['right_bottom'].message_type = _COORDINATE 427 | _PRESENTIMAGEREQUEST.fields_by_name['format'].enum_type = _IMAGEFORMAT 428 | _PRESENTIMAGEREQUEST.fields_by_name['rectangle_list'].message_type = _RECTANGLE_ATTR 429 | _PRESENTIMAGERESPONSE.fields_by_name['error_code'].enum_type = _PRESENTDATAERRORCODE 430 | DESCRIPTOR.message_types_by_name['OpenChannelRequest'] = _OPENCHANNELREQUEST 431 | DESCRIPTOR.message_types_by_name['OpenChannelResponse'] = _OPENCHANNELRESPONSE 432 | DESCRIPTOR.message_types_by_name['HeartbeatMessage'] = _HEARTBEATMESSAGE 433 | DESCRIPTOR.message_types_by_name['Coordinate'] = _COORDINATE 434 | DESCRIPTOR.message_types_by_name['Rectangle_Attr'] = _RECTANGLE_ATTR 435 | DESCRIPTOR.message_types_by_name['PresentImageRequest'] = _PRESENTIMAGEREQUEST 436 | DESCRIPTOR.message_types_by_name['PresentImageResponse'] = _PRESENTIMAGERESPONSE 437 | DESCRIPTOR.enum_types_by_name['OpenChannelErrorCode'] = _OPENCHANNELERRORCODE 438 | DESCRIPTOR.enum_types_by_name['ChannelContentType'] = _CHANNELCONTENTTYPE 439 | DESCRIPTOR.enum_types_by_name['ImageFormat'] = _IMAGEFORMAT 440 | DESCRIPTOR.enum_types_by_name['PresentDataErrorCode'] = _PRESENTDATAERRORCODE 441 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 442 | 443 | OpenChannelRequest = _reflection.GeneratedProtocolMessageType('OpenChannelRequest', (_message.Message,), dict( 444 | DESCRIPTOR = _OPENCHANNELREQUEST, 445 | __module__ = 'presenter_message_pb2' 446 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelRequest) 447 | )) 448 | _sym_db.RegisterMessage(OpenChannelRequest) 449 | 450 | OpenChannelResponse = _reflection.GeneratedProtocolMessageType('OpenChannelResponse', (_message.Message,), dict( 451 | DESCRIPTOR = _OPENCHANNELRESPONSE, 452 | __module__ = 'presenter_message_pb2' 453 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.OpenChannelResponse) 454 | )) 455 | _sym_db.RegisterMessage(OpenChannelResponse) 456 | 457 | HeartbeatMessage = _reflection.GeneratedProtocolMessageType('HeartbeatMessage', (_message.Message,), dict( 458 | DESCRIPTOR = _HEARTBEATMESSAGE, 459 | __module__ = 'presenter_message_pb2' 460 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.HeartbeatMessage) 461 | )) 462 | _sym_db.RegisterMessage(HeartbeatMessage) 463 | 464 | Coordinate = _reflection.GeneratedProtocolMessageType('Coordinate', (_message.Message,), dict( 465 | DESCRIPTOR = _COORDINATE, 466 | __module__ = 'presenter_message_pb2' 467 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Coordinate) 468 | )) 469 | _sym_db.RegisterMessage(Coordinate) 470 | 471 | Rectangle_Attr = _reflection.GeneratedProtocolMessageType('Rectangle_Attr', (_message.Message,), dict( 472 | DESCRIPTOR = _RECTANGLE_ATTR, 473 | __module__ = 'presenter_message_pb2' 474 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.Rectangle_Attr) 475 | )) 476 | _sym_db.RegisterMessage(Rectangle_Attr) 477 | 478 | PresentImageRequest = _reflection.GeneratedProtocolMessageType('PresentImageRequest', (_message.Message,), dict( 479 | DESCRIPTOR = _PRESENTIMAGEREQUEST, 480 | __module__ = 'presenter_message_pb2' 481 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageRequest) 482 | )) 483 | _sym_db.RegisterMessage(PresentImageRequest) 484 | 485 | PresentImageResponse = _reflection.GeneratedProtocolMessageType('PresentImageResponse', (_message.Message,), dict( 486 | DESCRIPTOR = _PRESENTIMAGERESPONSE, 487 | __module__ = 'presenter_message_pb2' 488 | # @@protoc_insertion_point(class_scope:ascend.presenter.proto.PresentImageResponse) 489 | )) 490 | _sym_db.RegisterMessage(PresentImageResponse) 491 | 492 | 493 | # @@protoc_insertion_point(module_scope) 494 | --------------------------------------------------------------------------------