├── runtime_config.json ├── src ├── config_parser.cc ├── preprocess_op.cc ├── main.cc └── movenet_detector.cc ├── README.md ├── include ├── config_parser.h ├── movenet_detector.h └── preprocess_op.h ├── Makefile ├── deploy_guide.md └── LICENSE /runtime_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "model_dir_det": "./model_det/", 3 | "batch_size_det": 1, 4 | "threshold_det": 0.5, 5 | "image_file": "./demo.jpg", 6 | "image_dir": "", 7 | "run_benchmark": true, 8 | "cpu_threads": 4 9 | } 10 | 11 | -------------------------------------------------------------------------------- /src/config_parser.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "include/config_parser.h" 16 | 17 | namespace PaddleDetection { 18 | 19 | void load_jsonf(std::string jsonfile, Json::Value &jsondata) { 20 | std::ifstream ifs; 21 | ifs.open(jsonfile); 22 | 23 | Json::CharReaderBuilder builder; 24 | builder["collectComments"] = true; 25 | JSONCPP_STRING errs; 26 | if (!parseFromStream(builder, ifs, &jsondata, &errs)) { 27 | std::cout << errs << std::endl; 28 | return; 29 | } 30 | } 31 | 32 | } // namespace PaddleDetection 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MoveNet-PaddleLite 2 | 3 | Adapted from [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection); 4 | 5 | Movenet cpp deploy based on [PaddleLite](https://github.com/PaddlePaddle/Paddle-Lite); 6 | 7 | Movenet model transformed from tensorflow; 8 | 9 | 10 | 11 | ## 简介 12 | 13 | Movenet是近年的优秀开源模型代表,原模型以Tensorflow模型格式开源。 14 | 15 | 这里经转换后得到Paddle存储格式的模型。 16 | 17 | 本文件夹使用转换后的Paddle格式模型,基于PaddleLite实现c++部署。可实现移动端单张图片或图片文件夹的关键点预测功能。 18 | 19 | 部署时所需文件结构展示: 20 | 21 | ``` 22 | movenet_deploy/ 23 | |-- model_det/ 24 | | |--movenet.nb movenet模型文件 25 | | |--infer_cfg.json 检测器模型配置文件 26 | |-- movenet 生成的移动端执行文件 27 | |-- runtime_config.json 移动端执行时参数配置文件 28 | |-- libpaddle_light_api_shared.so Paddle-Lite库文件 29 | ``` 30 | 31 | **注:Paddle-Lite需使用最新版本v2.10,低版本不支持,也可从下面完整测试工程文件夹包种获取** 32 | 33 | 34 | 35 | 36 | ## movenet部署模型下载地址 37 | 38 | ### 完整测试工程文件夹包 39 | 40 | 下载后可直接在移动端部署测试, 除上述工程部署文件结构中提到的文件外,还包括两个文件夹[model_det_singlepose]、[model_det_multipose],分别是[model_det]文件夹的单人模型版本和多人模型版本。 41 | 42 | [movenet_deploy.tar.gz](https://bj.bcebos.com/v1/paddledet/models/keypoint/movenet_deploy.zip) 43 | 44 | 45 | 46 | ### 模型单独下载地址 47 | 48 | | 模型类别 | singlepose模型 | multipose模型 | 49 | | ------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | 50 | | PaddleLite部署模型 | [movenet_s_paddlelite](https://bj.bcebos.com/v1/paddledet/models/keypoint/model_det_singlepose.zip) | [movenet_m_paddlelite](https://bj.bcebos.com/v1/paddledet/models/keypoint/model_det_multipose.zip) | 51 | | 经onnx转换后的Paddle模型 | [movenet_s_paddle](https://bj.bcebos.com/v1/paddledet/models/keypoint/movenet_s_paddle.tar.gz) | [movenet_m_paddle](https://bj.bcebos.com/v1/paddledet/models/keypoint/movenet_m_paddle.tar.gz) | 52 | | movenet原始模型 | [movenet_singlepose](https://tfhub.dev/google/movenet/singlepose/lightning/4) | [movenet_multipose](https://tfhub.dev/google/movenet/multipose/lightning/1) | 53 | 54 | 55 | 56 | ## 完整部署操作步骤及使用说明 57 | 58 | [deploy_guide](deploy_guide.md) 59 | -------------------------------------------------------------------------------- /include/config_parser.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include "json/json.h" 23 | 24 | #ifdef _WIN32 25 | #define OS_PATH_SEP "\\" 26 | #else 27 | #define OS_PATH_SEP "/" 28 | #endif 29 | 30 | namespace PaddleDetection { 31 | 32 | void load_jsonf(std::string jsonfile, Json::Value& jsondata); 33 | 34 | // Inference model configuration parser 35 | class ConfigPaser { 36 | public: 37 | ConfigPaser() {} 38 | 39 | ~ConfigPaser() {} 40 | 41 | bool load_config(const std::string& model_dir, 42 | const std::string& cfg = "infer_cfg") { 43 | Json::Value config; 44 | load_jsonf(model_dir + OS_PATH_SEP + cfg + ".json", config); 45 | 46 | // Get model arch : YOLO, SSD, RetinaNet, RCNN, Face 47 | if (config.isMember("arch")) { 48 | arch_ = config["arch"].as(); 49 | } else { 50 | std::cerr << "Please set model arch," 51 | << "support value : YOLO, SSD, RetinaNet, RCNN, Face." 52 | << std::endl; 53 | return false; 54 | } 55 | 56 | // Get draw_threshold for visualization 57 | if (config.isMember("draw_threshold")) { 58 | draw_threshold_ = config["draw_threshold"].as(); 59 | } else { 60 | std::cerr << "Please set draw_threshold." << std::endl; 61 | return false; 62 | } 63 | // Get Preprocess for preprocessing 64 | if (config.isMember("Preprocess")) { 65 | preprocess_info_ = config["Preprocess"]; 66 | } else { 67 | std::cerr << "Please set Preprocess." << std::endl; 68 | return false; 69 | } 70 | // Get label_list for visualization 71 | if (config.isMember("label_list")) { 72 | label_list_.clear(); 73 | for (auto item : config["label_list"]) { 74 | label_list_.emplace_back(item.as()); 75 | } 76 | } else { 77 | std::cerr << "Please set label_list." << std::endl; 78 | return false; 79 | } 80 | 81 | return true; 82 | } 83 | float draw_threshold_; 84 | std::string arch_; 85 | Json::Value preprocess_info_; 86 | std::vector label_list_; 87 | }; 88 | 89 | } // namespace PaddleDetection 90 | -------------------------------------------------------------------------------- /include/movenet_detector.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | #include 25 | #include 26 | 27 | #include "paddle_api.h" // NOLINT 28 | 29 | #include "include/config_parser.h" 30 | #include "include/preprocess_op.h" 31 | 32 | using namespace paddle::lite_api; // NOLINT 33 | 34 | namespace PaddleDetection { 35 | // Object Detection Result 36 | struct ObjectResult { 37 | // Rectangle coordinates of detected object: left, right, top, down 38 | std::vector rect; 39 | // Class id of detected object 40 | int class_id; 41 | // Confidence of detected object 42 | float confidence; 43 | std::vector kpts; 44 | }; 45 | 46 | // Generate visualization colormap for each class 47 | std::vector GenerateColorMap(int num_class); 48 | 49 | // Visualiztion Detection Result 50 | cv::Mat VisualizeResult(const cv::Mat& img, 51 | const std::vector& results, 52 | const std::vector& colormap, 53 | float threshold = 0.2); 54 | 55 | class ObjectDetector { 56 | public: 57 | explicit ObjectDetector(const std::string& model_dir, 58 | int cpu_threads = 1, 59 | const int batch_size = 1) { 60 | config_.load_config(model_dir); 61 | threshold_ = config_.draw_threshold_; 62 | preprocessor_.Init(config_.preprocess_info_); 63 | LoadModel(model_dir, cpu_threads); 64 | } 65 | 66 | // Load Paddle inference model 67 | void LoadModel(std::string model_file, int num_theads); 68 | 69 | // Run predictor 70 | void Predict(const std::vector& imgs, 71 | const double threshold = 0.5, 72 | const int warmup = 0, 73 | const int repeats = 1, 74 | std::vector* result = nullptr, 75 | std::vector* times = nullptr); 76 | 77 | // Get Model Label list 78 | const std::vector& GetLabelList() const { 79 | return config_.label_list_; 80 | } 81 | 82 | private: 83 | // Preprocess image and copy data to input buffer 84 | void Preprocess(const cv::Mat& image_mat); 85 | // Postprocess result 86 | void Postprocess(const std::vector mats, 87 | std::vector* result, 88 | int personnum); 89 | 90 | std::shared_ptr predictor_; 91 | Preprocessor preprocessor_; 92 | ImageBlob inputs_; 93 | std::vector output_data_; 94 | float threshold_; 95 | ConfigPaser config_; 96 | }; 97 | 98 | } // namespace PaddleDetection 99 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ARM_ABI = arm8#[arm7/arm8] 2 | export ARM_ABI 3 | 4 | ifeq ($(ARM_ABI), arm8) 5 | ARM_PLAT=arm64-v8a 6 | else 7 | ARM_PLAT=armeabi-v7a 8 | endif 9 | ${info ARM_ABI: ${ARM_ABI}} 10 | ${info ARM_PLAT: ${ARM_PLAT}; option[arm7/arm8]} 11 | 12 | include ../Makefile.def 13 | 14 | LITE_ROOT=../../../ 15 | ${info LITE_ROOT: $(abspath ${LITE_ROOT})} 16 | 17 | THIRD_PARTY_DIR=third_party 18 | ${info THIRD_PARTY_DIR: $(abspath ${THIRD_PARTY_DIR})} 19 | 20 | 21 | OPENCV_VERSION=opencv4.1.0 22 | OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_imgcodecs.a \ 23 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_imgproc.a \ 24 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/libs/libopencv_core.a \ 25 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libtegra_hal.a \ 26 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/liblibjpeg-turbo.a \ 27 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/liblibwebp.a \ 28 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/liblibpng.a \ 29 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/liblibjasper.a \ 30 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/liblibtiff.a \ 31 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libIlmImf.a \ 32 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libtbb.a \ 33 | ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/3rdparty/libs/libcpufeatures.a 34 | 35 | 36 | LITE_LIBS = -L${LITE_ROOT}/cxx/lib/ -lpaddle_light_api_shared 37 | ############################################################### 38 | # How to use one of static libaray: # 39 | # `libpaddle_api_full_bundled.a` # 40 | # `libpaddle_api_light_bundled.a` # 41 | ############################################################### 42 | # Note: default use lite's shared library. # 43 | ############################################################### 44 | # 1. Comment above line using `libpaddle_light_api_shared.so` 45 | # 2. Undo comment below line using `libpaddle_api_light_bundled.a` 46 | # LITE_LIBS = ${LITE_ROOT}/cxx/lib/libpaddle_api_light_bundled.a 47 | 48 | CXX_LIBS = $(LITE_LIBS) ${OPENCV_LIBS} $(SYSTEM_LIBS) 49 | 50 | LOCAL_DIRSRCS=$(wildcard src/*.cc) 51 | LOCAL_SRCS=$(notdir $(LOCAL_DIRSRCS)) 52 | LOCAL_OBJS=$(patsubst %.cpp, %.o, $(patsubst %.cc, %.o, $(LOCAL_SRCS))) 53 | 54 | JSON_OBJS = json_reader.o json_value.o json_writer.o 55 | 56 | movenet: $(LOCAL_OBJS) $(JSON_OBJS) fetch_opencv 57 | $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) $(LOCAL_OBJS) $(JSON_OBJS) -o movenet $(CXX_LIBS) $(LDFLAGS) 58 | 59 | fetch_opencv: 60 | @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR} 61 | @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \ 62 | (echo "fetch opencv libs" && \ 63 | wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz) 64 | @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \ 65 | tar -zxf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR} 66 | 67 | fetch_json_code: 68 | @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR} 69 | @ test -e ${THIRD_PARTY_DIR}/jsoncpp_code.tar.gz || \ 70 | (echo "fetch jsoncpp_code.tar.gz" && \ 71 | wget -P ${THIRD_PARTY_DIR} https://bj.bcebos.com/v1/paddledet/deploy/jsoncpp_code.tar.gz ) 72 | @ test -d ${THIRD_PARTY_DIR}/jsoncpp_code || \ 73 | tar -zxf ${THIRD_PARTY_DIR}/jsoncpp_code.tar.gz -C ${THIRD_PARTY_DIR} 74 | 75 | LOCAL_INCLUDES = -I./ -Iinclude 76 | OPENCV_INCLUDE = -I${THIRD_PARTY_DIR}/${OPENCV_VERSION}/${ARM_PLAT}/include 77 | JSON_INCLUDE = -I${THIRD_PARTY_DIR}/jsoncpp_code/include 78 | CXX_INCLUDES = ${LOCAL_INCLUDES} ${INCLUDES} ${OPENCV_INCLUDE} ${JSON_INCLUDE} -I$(LITE_ROOT)/cxx/include 79 | 80 | 81 | $(LOCAL_OBJS): %.o: src/%.cc fetch_opencv fetch_json_code 82 | $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -c $< -o $@ 83 | 84 | $(JSON_OBJS): %.o: ${THIRD_PARTY_DIR}/jsoncpp_code/%.cpp fetch_json_code 85 | $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -c $< -o $@ 86 | 87 | .PHONY: clean fetch_opencv fetch_json_code 88 | clean: 89 | rm -rf $(LOCAL_OBJS) $(JSON_OBJS) 90 | rm -f main 91 | -------------------------------------------------------------------------------- /src/preprocess_op.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | #include "include/preprocess_op.h" 20 | 21 | namespace PaddleDetection { 22 | 23 | void InitInfo::Run(cv::Mat* im, ImageBlob* data) { 24 | data->im_shape_ = {static_cast(im->rows), 25 | static_cast(im->cols)}; 26 | data->scale_factor_ = {1., 1.}; 27 | data->in_net_shape_ = {static_cast(im->rows), 28 | static_cast(im->cols)}; 29 | } 30 | 31 | void NormalizeImage::Run(cv::Mat* im, ImageBlob* data) { 32 | double e = 1.0; 33 | if (is_scale_) { 34 | e *= 1. / 255.0; 35 | } 36 | (*im).convertTo(*im, CV_32FC3, e); 37 | for (int h = 0; h < im->rows; h++) { 38 | for (int w = 0; w < im->cols; w++) { 39 | im->at(h, w)[0] = 40 | (im->at(h, w)[0] - mean_[0]) / scale_[0]; 41 | im->at(h, w)[1] = 42 | (im->at(h, w)[1] - mean_[1]) / scale_[1]; 43 | im->at(h, w)[2] = 44 | (im->at(h, w)[2] - mean_[2]) / scale_[2]; 45 | } 46 | } 47 | } 48 | 49 | void Permute::Run(cv::Mat* im, ImageBlob* data) { 50 | (*im).convertTo(*im, CV_32FC3); 51 | int rh = im->rows; 52 | int rw = im->cols; 53 | int rc = im->channels(); 54 | (data->im_data_).resize(rc * rh * rw); 55 | auto base = (data->im_data_).data(); 56 | for (int i = 0; i < rc; ++i) { 57 | cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i); 58 | } 59 | } 60 | 61 | void Resize::Run(cv::Mat* im, ImageBlob* data) { 62 | auto resize_scale = GenerateScale(*im); 63 | cv::resize( 64 | *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); 65 | int target_size_max = 66 | *std::max_element(target_size_.begin(), target_size_.end()); 67 | int xpad = target_size_max - im->cols; 68 | int ypad = target_size_max - im->rows; 69 | cv::copyMakeBorder(*im, 70 | *im, 71 | 0, 72 | ypad, 73 | 0, 74 | xpad, 75 | cv::BorderTypes::BORDER_CONSTANT, 76 | cv::Scalar(0, 0, 0)); 77 | 78 | data->im_shape_ = { 79 | static_cast(im->rows), static_cast(im->cols), 80 | }; 81 | data->in_net_shape_ = { 82 | static_cast(im->rows), static_cast(im->cols), 83 | }; 84 | data->scale_factor_ = { 85 | resize_scale.second, resize_scale.first, 86 | }; 87 | } 88 | 89 | std::pair Resize::GenerateScale(const cv::Mat& im) { 90 | std::pair resize_scale; 91 | int origin_w = im.cols; 92 | int origin_h = im.rows; 93 | 94 | int im_size_max = std::max(origin_w, origin_h); 95 | int target_size_max = 96 | *std::max_element(target_size_.begin(), target_size_.end()); 97 | 98 | float scale_max = 99 | static_cast(target_size_max) / static_cast(im_size_max); 100 | float scale_ratio = scale_max; 101 | resize_scale = {scale_ratio, scale_ratio}; 102 | 103 | return resize_scale; 104 | } 105 | 106 | void PadStride::Run(cv::Mat* im, ImageBlob* data) { 107 | if (stride_ <= 0) { 108 | return; 109 | } 110 | int rc = im->channels(); 111 | int rh = im->rows; 112 | int rw = im->cols; 113 | int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_; 114 | int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_; 115 | cv::copyMakeBorder( 116 | *im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT, cv::Scalar(0)); 117 | data->in_net_shape_ = { 118 | static_cast(im->rows), static_cast(im->cols), 119 | }; 120 | } 121 | 122 | // Preprocessor op running order 123 | const std::vector Preprocessor::RUN_ORDER = { 124 | "InitInfo", "Resize", "NormalizeImage", "PadStride", "Permute"}; 125 | 126 | void Preprocessor::Run(cv::Mat* im, ImageBlob* data) { 127 | for (const auto& name : RUN_ORDER) { 128 | if (ops_.find(name) != ops_.end()) { 129 | ops_[name]->Run(im, data); 130 | } 131 | } 132 | } 133 | 134 | } // namespace PaddleDetection 135 | -------------------------------------------------------------------------------- /include/preprocess_op.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | #include 27 | #include "json/json.h" 28 | 29 | namespace PaddleDetection { 30 | 31 | // Object for storing all preprocessed data 32 | class ImageBlob { 33 | public: 34 | // image width and height 35 | std::vector im_shape_; 36 | // Buffer for image data after preprocessing 37 | std::vector im_data_; 38 | // in net data shape(after pad) 39 | std::vector in_net_shape_; 40 | // Evaluation image width and height 41 | // std::vector eval_im_size_f_; 42 | // Scale factor for image size to origin image size 43 | std::vector scale_factor_; 44 | }; 45 | 46 | // Abstraction of preprocessing opration class 47 | class PreprocessOp { 48 | public: 49 | virtual void Init(const Json::Value& item) = 0; 50 | virtual void Run(cv::Mat* im, ImageBlob* data) = 0; 51 | }; 52 | 53 | class InitInfo : public PreprocessOp { 54 | public: 55 | virtual void Init(const Json::Value& item) {} 56 | virtual void Run(cv::Mat* im, ImageBlob* data); 57 | }; 58 | 59 | class NormalizeImage : public PreprocessOp { 60 | public: 61 | virtual void Init(const Json::Value& item) { 62 | mean_.clear(); 63 | scale_.clear(); 64 | for (auto tmp : item["mean"]) { 65 | mean_.emplace_back(tmp.as()); 66 | } 67 | for (auto tmp : item["std"]) { 68 | scale_.emplace_back(tmp.as()); 69 | } 70 | is_scale_ = item["is_scale"].as(); 71 | } 72 | 73 | virtual void Run(cv::Mat* im, ImageBlob* data); 74 | 75 | private: 76 | // CHW or HWC 77 | std::vector mean_; 78 | std::vector scale_; 79 | bool is_scale_; 80 | }; 81 | 82 | class Permute : public PreprocessOp { 83 | public: 84 | virtual void Init(const Json::Value& item) {} 85 | virtual void Run(cv::Mat* im, ImageBlob* data); 86 | }; 87 | 88 | class Resize : public PreprocessOp { 89 | public: 90 | virtual void Init(const Json::Value& item) { 91 | interp_ = item["interp"].as(); 92 | // max_size_ = item["target_size"].as(); 93 | keep_ratio_ = item["keep_ratio"].as(); 94 | target_size_.clear(); 95 | for (auto tmp : item["target_size"]) { 96 | target_size_.emplace_back(tmp.as()); 97 | } 98 | } 99 | 100 | // Compute best resize scale for x-dimension, y-dimension 101 | std::pair GenerateScale(const cv::Mat& im); 102 | 103 | virtual void Run(cv::Mat* im, ImageBlob* data); 104 | 105 | private: 106 | int interp_; 107 | bool keep_ratio_; 108 | std::vector target_size_; 109 | std::vector in_net_shape_; 110 | }; 111 | 112 | // Models with FPN need input shape % stride == 0 113 | class PadStride : public PreprocessOp { 114 | public: 115 | virtual void Init(const Json::Value& item) { 116 | stride_ = item["stride"].as(); 117 | } 118 | 119 | virtual void Run(cv::Mat* im, ImageBlob* data); 120 | 121 | private: 122 | int stride_; 123 | }; 124 | 125 | class Preprocessor { 126 | public: 127 | void Init(const Json::Value& config_node) { 128 | // initialize image info at first 129 | ops_["InitInfo"] = std::make_shared(); 130 | for (const auto& item : config_node) { 131 | auto op_name = item["type"].as(); 132 | 133 | ops_[op_name] = CreateOp(op_name); 134 | ops_[op_name]->Init(item); 135 | } 136 | } 137 | 138 | std::shared_ptr CreateOp(const std::string& name) { 139 | if (name == "Resize") { 140 | return std::make_shared(); 141 | } else if (name == "Permute") { 142 | return std::make_shared(); 143 | } else if (name == "NormalizeImage") { 144 | return std::make_shared(); 145 | } else if (name == "PadStride") { 146 | // use PadStride instead of PadBatch 147 | return std::make_shared(); 148 | } 149 | std::cerr << "can not find function of OP: " << name 150 | << " and return: nullptr" << std::endl; 151 | return nullptr; 152 | } 153 | 154 | void Run(cv::Mat* im, ImageBlob* data); 155 | 156 | public: 157 | static const std::vector RUN_ORDER; 158 | 159 | private: 160 | std::unordered_map> ops_; 161 | }; 162 | 163 | } // namespace PaddleDetection 164 | -------------------------------------------------------------------------------- /src/main.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #include "include/config_parser.h" 26 | #include "include/movenet_detector.h" 27 | #include "include/preprocess_op.h" 28 | #include "json/json.h" 29 | 30 | Json::Value RT_Config; 31 | 32 | void PrintBenchmarkLog(std::vector det_time, int img_num) { 33 | std::cout << "----------------------- Config info -----------------------" 34 | << std::endl; 35 | std::cout << "num_threads: " << RT_Config["cpu_threads"].as() 36 | << std::endl; 37 | std::cout << "----------------------- Data info -----------------------" 38 | << std::endl; 39 | std::cout << "batch_size_det: " << RT_Config["batch_size_det"].as() 40 | << std::endl; 41 | std::cout << "----------------------- Model info -----------------------" 42 | << std::endl; 43 | RT_Config["model_dir_det"].as().erase( 44 | RT_Config["model_dir_det"].as().find_last_not_of("/") + 1); 45 | std::cout 46 | << "detection model_name: " 47 | << RT_Config["model_dir_det"].as().substr( 48 | RT_Config["model_dir_det"].as().find_last_of('/') + 1) 49 | << std::endl; 50 | std::cout << "----------------------- Perf info ------------------------" 51 | << std::endl; 52 | std::cout << "Total number of predicted data: " << img_num 53 | << " and total time spent(ms): " 54 | << std::accumulate(det_time.begin(), det_time.end(), 0) 55 | << std::endl; 56 | std::cout << "preproce_time(ms): " << det_time[0] / img_num 57 | << ", inference_time(ms): " << det_time[1] / img_num 58 | << ", postprocess_time(ms): " << det_time[2] / img_num << std::endl; 59 | } 60 | 61 | static std::string DirName(const std::string& filepath) { 62 | auto pos = filepath.rfind(OS_PATH_SEP); 63 | if (pos == std::string::npos) { 64 | return ""; 65 | } 66 | return filepath.substr(0, pos); 67 | } 68 | 69 | static bool PathExists(const std::string& path) { 70 | struct stat buffer; 71 | return (stat(path.c_str(), &buffer) == 0); 72 | } 73 | 74 | static void MkDir(const std::string& path) { 75 | if (PathExists(path)) return; 76 | int ret = 0; 77 | ret = mkdir(path.c_str(), 0755); 78 | if (ret != 0) { 79 | std::string path_error(path); 80 | path_error += " mkdir failed!"; 81 | throw std::runtime_error(path_error); 82 | } 83 | } 84 | 85 | static void MkDirs(const std::string& path) { 86 | if (path.empty()) return; 87 | if (PathExists(path)) return; 88 | 89 | MkDirs(DirName(path)); 90 | MkDir(path); 91 | } 92 | 93 | void PredictImage(const std::vector all_img_paths, 94 | const int batch_size_det, 95 | const double threshold_det, 96 | const bool run_benchmark, 97 | PaddleDetection::ObjectDetector* det, 98 | const std::string& output_dir = "output") { 99 | std::vector det_t = {0, 0, 0}; 100 | int steps = ceil(float(all_img_paths.size()) / batch_size_det); 101 | int kpts_imgs = 0; 102 | for (int idx = 0; idx < steps; idx++) { 103 | std::vector batch_imgs; 104 | int left_image_cnt = all_img_paths.size() - idx * batch_size_det; 105 | if (left_image_cnt > batch_size_det) { 106 | left_image_cnt = batch_size_det; 107 | } 108 | for (int bs = 0; bs < left_image_cnt; bs++) { 109 | std::string image_file_path = all_img_paths.at(idx * batch_size_det + bs); 110 | cv::Mat im = cv::imread(image_file_path, 1); 111 | batch_imgs.insert(batch_imgs.end(), im); 112 | } 113 | // Store all detected result 114 | std::vector result; 115 | std::vector det_times; 116 | if (run_benchmark) { 117 | det->Predict(batch_imgs, threshold_det, 10, 10, &result, &det_times); 118 | } else { 119 | det->Predict(batch_imgs, threshold_det, 0, 1, &result, &det_times); 120 | } 121 | // get labels and colormap 122 | auto colormap = PaddleDetection::GenerateColorMap(20); 123 | for (int i = 0; i < left_image_cnt; i++) { 124 | cv::Mat im = batch_imgs[i]; 125 | int detect_num = result.size(); 126 | 127 | std::vector compression_params; 128 | compression_params.push_back(cv::IMWRITE_JPEG_QUALITY); 129 | compression_params.push_back(95); 130 | std::string output_path(output_dir); 131 | if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) { 132 | output_path += OS_PATH_SEP; 133 | } 134 | std::string image_file_path = all_img_paths.at(idx * batch_size_det + i); 135 | // Visualization result 136 | cv::Mat vis_img = PaddleDetection::VisualizeResult(im, result, colormap); 137 | std::string det_savepath = 138 | output_path + 139 | image_file_path.substr(image_file_path.find_last_of('/') + 1); 140 | cv::imwrite(det_savepath, vis_img, compression_params); 141 | } 142 | 143 | det_t[0] += det_times[0]; 144 | det_t[1] += det_times[1]; 145 | det_t[2] += det_times[2]; 146 | } 147 | PrintBenchmarkLog(det_t, all_img_paths.size()); 148 | } 149 | 150 | int main(int argc, char** argv) { 151 | std::cout << "Usage: " << argv[0] 152 | << " [config_path](option) [image_dir](option)\n"; 153 | std::string config_path = "runtime_config.json"; 154 | std::string img_path = ""; 155 | if (argc >= 2) { 156 | config_path = argv[1]; 157 | if (argc >= 3) { 158 | img_path = argv[2]; 159 | } 160 | } 161 | // Parsing command-line 162 | PaddleDetection::load_jsonf(config_path, RT_Config); 163 | if (RT_Config["model_dir_det"].as().empty()) { 164 | std::cout << "Please set [model_det_dir] in " << config_path << std::endl; 165 | return -1; 166 | } 167 | if (RT_Config["image_file"].as().empty() && 168 | RT_Config["image_dir"].as().empty() && img_path.empty()) { 169 | std::cout << "Please set [image_file] or [image_dir] in " << config_path 170 | << " Or use command: <" << argv[0] << " [image_dir]>" 171 | << std::endl; 172 | return -1; 173 | } 174 | if (!img_path.empty()) { 175 | std::cout << "Use image_dir in command line overide the path in config file" 176 | << std::endl; 177 | RT_Config["image_dir"] = img_path; 178 | RT_Config["image_file"] = ""; 179 | } 180 | // Load model and create a object detector 181 | PaddleDetection::ObjectDetector det( 182 | RT_Config["model_dir_det"].as(), 183 | RT_Config["cpu_threads"].as(), 184 | RT_Config["batch_size_det"].as()); 185 | // Do inference on input image 186 | 187 | if (!RT_Config["image_file"].as().empty() || 188 | !RT_Config["image_dir"].as().empty()) { 189 | if (RT_Config["output_dir"].as().empty()) { 190 | RT_Config["output_dir"] = "output"; 191 | } 192 | if (!PathExists(RT_Config["output_dir"].as())) { 193 | MkDirs(RT_Config["output_dir"].as()); 194 | } 195 | std::vector all_img_paths; 196 | std::vector cv_all_img_paths; 197 | if (!RT_Config["image_file"].as().empty()) { 198 | all_img_paths.push_back(RT_Config["image_file"].as()); 199 | if (RT_Config["batch_size_det"].as() > 1) { 200 | std::cout << "batch_size_det should be 1, when set `image_file`." 201 | << std::endl; 202 | return -1; 203 | } 204 | } else { 205 | cv::glob(RT_Config["image_dir"].as(), cv_all_img_paths); 206 | for (const auto& img_path : cv_all_img_paths) { 207 | all_img_paths.push_back(img_path); 208 | } 209 | } 210 | PredictImage(all_img_paths, 211 | RT_Config["batch_size_det"].as(), 212 | RT_Config["threshold_det"].as(), 213 | RT_Config["run_benchmark"].as(), 214 | &det, 215 | RT_Config["output_dir"].as()); 216 | } 217 | return 0; 218 | } 219 | -------------------------------------------------------------------------------- /deploy_guide.md: -------------------------------------------------------------------------------- 1 | # Paddle-Lite端侧部署 2 | 3 | 本教程将介绍基于[Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite) 在移动端部署PaddleDetection模型的详细步骤。 4 | 5 | Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理能力,并广泛整合跨平台硬件,为端侧部署及应用落地问题提供轻量化的部署方案。 6 | 7 | ## 1. 准备环境 8 | 9 | ### 运行准备 10 | - 电脑(编译Paddle Lite) 11 | - 安卓手机(armv7或armv8) 12 | 13 | ### 1.1 准备交叉编译环境 14 | 交叉编译环境用于编译 Paddle Lite 和 PaddleDetection 的C++ demo。 15 | 支持多种开发环境,不同开发环境的编译流程请参考对应文档,请确保安装完成Java jdk、Android NDK(R17以上)。 16 | 17 | 1. [Docker](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#docker) 18 | 2. [Linux](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#linux) 19 | 3. [MAC OS](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_env.html#mac-os) 20 | 21 | ### 1.2 准备预测库 22 | 23 | 预测库有两种获取方式: 24 | 1. [**建议**]直接下载,预测库下载链接如下(movenet请使用v2.9版本编译库): 25 | |平台|预测库下载链接| 26 | |-|-| 27 | |Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv7.gcc.c++_static.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv.tar.gz)| 28 | 29 | **注意**:1. 如果是从 Paddle-Lite [官方文档](https://paddle-lite.readthedocs.io/zh/latest/quick_start/release_lib.html#android-toolchain-gcc)下载的预测库,注意选择`with_extra=ON,with_cv=ON`的下载链接。2. 目前只提供Android端demo,IOS端demo可以参考[Paddle-Lite IOS demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/master/PaddleLite-ios-demo) 30 | 31 | 32 | 2. 编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下(movenet支持有问题,不推荐): 33 | ```shell 34 | git clone https://github.com/PaddlePaddle/Paddle-Lite.git 35 | cd Paddle-Lite 36 | # 如果使用编译方式,建议使用develop分支编译预测库 37 | git checkout develop 38 | ./lite/tools/build_android.sh --arch=armv8 --with_cv=ON --with_extra=ON 39 | ``` 40 | 41 | **注意**:编译Paddle-Lite获得预测库时,需要打开`--with_cv=ON --with_extra=ON`两个选项,`--arch`表示`arm`版本,这里指定为armv8,更多编译命令介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/source_compile/compile_andriod.html#id2)。 42 | 43 | 直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv/`文件夹,通过编译Paddle-Lite得到的预测库位于`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。 44 | 预测库的文件目录如下: 45 | 46 | ``` 47 | inference_lite_lib.android.armv8/ 48 | |-- cxx C++ 预测库和头文件 49 | | |-- include C++ 头文件 50 | | | |-- paddle_api.h 51 | | | |-- paddle_image_preprocess.h 52 | | | |-- paddle_lite_factory_helper.h 53 | | | |-- paddle_place.h 54 | | | |-- paddle_use_kernels.h 55 | | | |-- paddle_use_ops.h 56 | | | `-- paddle_use_passes.h 57 | | `-- lib C++预测库 58 | | |-- libpaddle_api_light_bundled.a C++静态库 59 | | `-- libpaddle_light_api_shared.so C++动态库 60 | |-- java Java预测库 61 | | |-- jar 62 | | | `-- PaddlePredictor.jar 63 | | |-- so 64 | | | `-- libpaddle_lite_jni.so 65 | | `-- src 66 | |-- demo C++和Java示例代码 67 | | |-- cxx C++ 预测库demo 68 | | `-- java Java 预测库demo 69 | ``` 70 | 71 | ## 2 开始运行 72 | 73 | ### 2.1 模型优化 74 | 75 | Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括量化、子图融合、混合调度、Kernel优选等方法,使用Paddle-Lite的`opt`工具可以自动对inference模型进行优化,目前支持两种优化方式,优化后的模型更轻量,模型运行速度更快。 76 | 77 | **注意**:如果已经准备好了 `.nb` 结尾的模型文件,可以跳过此步骤。 78 | 79 | #### 2.1.1 安装paddle_lite_opt工具(同样请使用v2.9版本opt) 80 | 安装paddle_lite_opt工具有如下两种方法: 81 | 1. [**建议**]pip安装paddlelite并进行转换 82 | ```shell 83 | pip install paddlelite 84 | ``` 85 | 86 | 2. 源码编译Paddle-Lite生成opt工具 87 | 88 | 模型优化需要Paddle-Lite的`opt`可执行文件,可以通过编译Paddle-Lite源码获得,编译步骤如下: 89 | ```shell 90 | # 如果准备环境时已经clone了Paddle-Lite,则不用重新clone Paddle-Lite 91 | git clone https://github.com/PaddlePaddle/Paddle-Lite.git 92 | cd Paddle-Lite 93 | git checkout develop 94 | # 启动编译 95 | ./lite/tools/build.sh build_optimize_tool 96 | ``` 97 | 98 | 编译完成后,`opt`文件位于`build.opt/lite/api/`下,可通过如下方式查看`opt`的运行选项和使用方式; 99 | ```shell 100 | cd build.opt/lite/api/ 101 | ./opt 102 | ``` 103 | 104 | `opt`的使用方式与参数与上面的`paddle_lite_opt`完全一致。 105 | 106 | 之后使用`paddle_lite_opt`工具可以进行inference模型的转换。`paddle_lite_opt`的部分参数如下: 107 | 108 | |选项|说明| 109 | |-|-| 110 | |--model_file|待优化的PaddlePaddle模型(combined形式)的网络结构文件路径| 111 | |--param_file|待优化的PaddlePaddle模型(combined形式)的权重文件路径| 112 | |--optimize_out_type|输出模型类型,目前支持两种类型:protobuf和naive_buffer,其中naive_buffer是一种更轻量级的序列化/反序列化实现,默认为naive_buffer| 113 | |--optimize_out|优化模型的输出路径| 114 | |--valid_targets|指定模型可执行的backend,默认为arm。目前可支持x86、arm、opencl、npu、xpu,可以同时指定多个backend(以空格分隔),Model Optimize Tool将会自动选择最佳方式。如果需要支持华为NPU(Kirin 810/990 Soc搭载的达芬奇架构NPU),应当设置为npu, arm| 115 | 116 | 更详细的`paddle_lite_opt`工具使用说明请参考[使用opt转化模型文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/opt/opt_bin.html) 117 | 118 | `--model_file`表示inference模型的model文件地址,`--param_file`表示inference模型的param文件地址;`optimize_out`用于指定输出文件的名称(不需要添加`.nb`的后缀)。直接在命令行中运行`paddle_lite_opt`,也可以查看所有参数及其说明。 119 | 120 | 121 | #### 2.1.3 转换示例 122 | 123 | 下面介绍在PaddleDetection工程下使用`paddle_lite_opt`完成预训练模型到inference模型,再到Paddle-Lite优化模型的转换。 124 | 125 | ```shell 126 | # 进入PaddleDetection根目录 127 | cd PaddleDetection_root_path 128 | 129 | # 将inference模型转化为Paddle-Lite优化模型 130 | paddle_lite_opt --valid_targets=arm --optimize_out_type=naive_buffe --model_file=output_inference/movenet/model.pdmodel --param_file=output_inference/movenet/model.pdiparams --optimize_out=output_inference/movenet/movenet 131 | 132 | ``` 133 | 134 | 最终在output_inference/movenet/文件夹下生成`movenet.nb` 和 `infer_cfg.json`的文件。 135 | 136 | **注意**:`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb`;`--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。 137 | 138 | ### 2.2 与手机联调 139 | 140 | 首先需要进行一些准备工作。 141 | 1. 准备一台arm8的安卓手机,如果编译的预测库和opt文件是armv7,则需要arm7的手机,并修改Makefile中`ARM_ABI = arm7`。 142 | 2. 电脑上安装ADB工具,用于调试。 ADB安装方式如下: 143 | 144 | 2.1. MAC电脑安装ADB: 145 | 146 | ```shell 147 | brew cask install android-platform-tools 148 | ``` 149 | 2.2. Linux安装ADB 150 | ```shell 151 | sudo apt update 152 | sudo apt install -y wget adb 153 | ``` 154 | 2.3. Window安装ADB 155 | 156 | win上安装需要去谷歌的安卓平台下载ADB软件包进行安装:[链接](https://developer.android.com/studio) 157 | 158 | 3. 手机连接电脑后,开启手机`USB调试`选项,选择`文件传输`模式,在电脑终端中输入: 159 | 160 | ```shell 161 | adb devices 162 | ``` 163 | 如果有device输出,则表示安装成功,如下所示: 164 | ``` 165 | List of devices attached 166 | 744be294 device 167 | ``` 168 | 169 | 4. 编译lite部署代码生成移动端可执行文件 170 | 171 | ```shell 172 | cd {PadddleDetection_Root} 173 | cd deploy/lite/ 174 | 175 | inference_lite_path=/{lite prediction library path}/inference_lite_lib.android.armv8.gcc.c++_static.with_extra.with_cv/ 176 | mkdir $inference_lite_path/demo/cxx/lite 177 | 178 | cp -r Makefile src/ include/ runtime_config.json $inference_lite_path/demo/cxx/lite 179 | 180 | cd $inference_lite_path/demo/cxx/lite 181 | 182 | # 执行编译,等待完成后得到可执行文件main 183 | make ARM_ABI = arm8 184 | #如果是arm7,则执行 make ARM_ABI = arm7 (或者在Makefile中修改该项) 185 | 186 | ``` 187 | 188 | 5. 准备优化后的模型、预测库文件、测试图像。 189 | 190 | ```shell 191 | mdkir deploy 192 | cp movenet runtime_config.json deploy/ 193 | cd deploy 194 | mkdir model_det 195 | 196 | # 将优化后的模型、预测库文件、测试图像放置在预测库中的demo/cxx/detection文件夹下 197 | cp {PadddleDetection_Root}/output_inference/movenet/movenet.nb ./model_det/ 198 | cp {PadddleDetection_Root}/output_inference/movenet/infer_cfg.json ./model_det/ 199 | 200 | # 将测试图像复制到deploy文件夹中 201 | cp [your_test_img].jpg ./demo.jpg 202 | 203 | # 将C++预测动态库so文件复制到deploy文件夹中 204 | cp ../../../cxx/lib/libpaddle_light_api_shared.so ./ 205 | ``` 206 | 207 | 执行完成后,deploy文件夹下将有如下文件格式: 208 | 209 | ``` 210 | deploy/ 211 | |-- model_det/ 212 | | |--movenet.nb movenet模型文件 213 | | |--infer_cfg.json 检测器模型配置文件 214 | |-- movenet 生成的移动端执行文件 215 | |-- runtime_config.json 移动端执行时参数配置文件 216 | |-- libpaddle_light_api_shared.so Paddle-Lite库文件 217 | ``` 218 | 219 | **注意:** 220 | * `runtime_config.json` 包含了检测器的超参数,请按需进行修改(注意配置中路径及文件需存在): 221 | 222 | ```shell 223 | { 224 | "model_dir_det": "./model_det/", #检测器模型路径 225 | "batch_size_det": 1, #检测预测时batchsize 226 | "threshold_det": 0.5, #检测器输出阈值 227 | "image_file": "demo.jpg", #测试图片 228 | "image_dir": "", #测试图片文件夹 229 | "run_benchmark": true, #性能测试开关,速度测试需置为true(多次预测取时间平均值) 230 | "cpu_threads": 1 #线程数 231 | } 232 | ``` 233 | 234 | 6. 启动调试,上述步骤完成后就可以使用ADB将文件夹 `deploy/` push到手机上运行,步骤如下: 235 | 236 | ```shell 237 | # 将上述deploy文件夹push到手机上 238 | adb push deploy /data/local/tmp/ 239 | 240 | adb shell 241 | cd /data/local/tmp/deploy 242 | export LD_LIBRARY_PATH=/data/local/tmp/deploy:$LD_LIBRARY_PATH 243 | 244 | # 修改权限为可执行 245 | chmod 777 movenet 246 | # 执行程序 247 | ./movenet 248 | ``` 249 | 250 | 如果对代码做了修改,则需要重新编译并push到手机上。 251 | 252 | 运行效果如下: 253 | 254 |
255 | 256 |
257 | 258 | 259 | ## FAQ 260 | Q1:如果想更换模型怎么办,需要重新按照流程走一遍吗? 261 | A1:如果已经走通了上述步骤,更换模型只需要替换 `.nb` 模型文件即可,同时要注意修改下配置文件中的 `.nb` 文件路径以及类别映射文件(如有必要)。 262 | 263 | Q2:换一个图测试怎么做? 264 | A2:替换 deploy 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。 265 | -------------------------------------------------------------------------------- /src/movenet_detector.cc: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | #include 15 | // for setprecision 16 | #include 17 | #include 18 | #include "include/movenet_detector.h" 19 | 20 | namespace PaddleDetection { 21 | 22 | // Load Model and create model predictor 23 | void ObjectDetector::LoadModel(std::string model_file, int num_theads) { 24 | MobileConfig config; 25 | config.set_threads(num_theads); 26 | config.set_model_from_file(model_file + "/movenet.nb"); 27 | config.set_power_mode(LITE_POWER_HIGH); 28 | 29 | predictor_ = CreatePaddlePredictor(config); 30 | } 31 | 32 | // Visualiztion MaskDetector results 33 | cv::Mat VisualizeResult(const cv::Mat& img, 34 | const std::vector& results, 35 | const std::vector& colormap, 36 | float threshold) { 37 | cv::Mat vis_img = img.clone(); 38 | printf("\nINFO: Detect person number: %d\n", results.size()); 39 | if (results.size() > 1) { 40 | for (int i = 0; i < results.size(); ++i) { 41 | printf("INFO: Number {%d} rect :[ %d %d %d %d ]\n", 42 | i + 1, 43 | static_cast(results[i].rect[0]), 44 | static_cast(results[i].rect[1]), 45 | static_cast(results[i].rect[2]), 46 | static_cast(results[i].rect[3])); 47 | // Configure color and text size 48 | std::ostringstream oss; 49 | oss << std::setiosflags(std::ios::fixed) << std::setprecision(4); 50 | oss << results[i].confidence; 51 | std::string text = oss.str(); 52 | int c1 = colormap[i * 3]; 53 | int c2 = colormap[i * 3 + 1]; 54 | int c3 = colormap[i * 3 + 2]; 55 | cv::Scalar roi_color = cv::Scalar(c1, c2, c3); 56 | int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL; 57 | double font_scale = 0.5f; 58 | float thickness = 0.5; 59 | cv::Size text_size = 60 | cv::getTextSize(text, font_face, font_scale, thickness, nullptr); 61 | cv::Point origin; 62 | 63 | int w = results[i].rect[2] - results[i].rect[0]; 64 | int h = results[i].rect[3] - results[i].rect[1]; 65 | cv::Rect roi = cv::Rect(results[i].rect[0], results[i].rect[1], w, h); 66 | // Draw roi object, text, and background 67 | cv::rectangle(vis_img, roi, roi_color, 2); 68 | 69 | origin.x = results[i].rect[0]; 70 | origin.y = results[i].rect[1]; 71 | 72 | // Configure text background 73 | cv::Rect text_back = cv::Rect(results[i].rect[0], 74 | results[i].rect[1] - text_size.height, 75 | text_size.width, 76 | text_size.height); 77 | // Draw text, and background 78 | cv::rectangle(vis_img, text_back, roi_color, -1); 79 | cv::putText(vis_img, 80 | text, 81 | origin, 82 | font_face, 83 | font_scale, 84 | cv::Scalar(255, 255, 255), 85 | thickness); 86 | } 87 | } 88 | 89 | const int edge[][2] = {{0, 1}, 90 | {0, 2}, 91 | {1, 3}, 92 | {2, 4}, 93 | {3, 5}, 94 | {4, 6}, 95 | {5, 7}, 96 | {6, 8}, 97 | {7, 9}, 98 | {8, 10}, 99 | {5, 11}, 100 | {6, 12}, 101 | {11, 13}, 102 | {12, 14}, 103 | {13, 15}, 104 | {14, 16}, 105 | {11, 12}}; 106 | for (int batchid = 0; batchid < results.size(); batchid++) { 107 | for (int i = 0; i < 17; i++) { 108 | int c1 = colormap[i * 3]; 109 | int c2 = colormap[i * 3 + 1]; 110 | int c3 = colormap[i * 3 + 2]; 111 | cv::Scalar roi_color = cv::Scalar(c1, c2, c3); 112 | 113 | if (results[batchid].kpts[i * 3] > threshold) { 114 | int x_coord = int(results[batchid].kpts[i * 3 + 1]); 115 | int y_coord = int(results[batchid].kpts[i * 3 + 2]); 116 | cv::circle(vis_img, cv::Point2d(x_coord, y_coord), 1, roi_color, 2); 117 | } 118 | } 119 | for (int i = 0; i < 17; i++) { 120 | int c1 = colormap[i * 3]; 121 | int c2 = colormap[i * 3 + 1]; 122 | int c3 = colormap[i * 3 + 2]; 123 | cv::Scalar roi_color = cv::Scalar(c1, c2, c3); 124 | 125 | if (results[batchid].kpts[edge[i][0] * 3] > threshold && 126 | results[batchid].kpts[edge[i][1] * 3] > threshold) { 127 | int x_start = int(results[batchid].kpts[edge[i][0] * 3 + 1]); 128 | int y_start = int(results[batchid].kpts[edge[i][0] * 3 + 2]); 129 | int x_end = int(results[batchid].kpts[edge[i][1] * 3 + 1]); 130 | int y_end = int(results[batchid].kpts[edge[i][1] * 3 + 2]); 131 | cv::line(vis_img, 132 | cv::Point2d(x_start, y_start), 133 | cv::Point2d(x_end, y_end), 134 | roi_color, 135 | 3); 136 | } 137 | } 138 | } 139 | return vis_img; 140 | } 141 | 142 | void ObjectDetector::Preprocess(const cv::Mat& ori_im) { 143 | // Clone the image : keep the original mat for postprocess 144 | cv::Mat im = ori_im.clone(); 145 | cv::cvtColor(im, im, cv::COLOR_BGR2RGB); 146 | preprocessor_.Run(&im, &inputs_); 147 | } 148 | 149 | void ObjectDetector::Postprocess(const std::vector mats, 150 | std::vector* result, 151 | int personnum) { 152 | int h = mats[0].rows; 153 | int w = mats[0].cols; 154 | if (h > w) { 155 | w = h; 156 | } 157 | if (w > h) { 158 | h = w; 159 | } 160 | 161 | for (int i = 0; i < personnum; i++) { 162 | float conf = 1.; 163 | if (personnum > 1) { 164 | conf = output_data_[55 + i * 56]; 165 | if (conf < threshold_) { 166 | continue; 167 | } 168 | } 169 | ObjectResult itemres; 170 | itemres.rect.resize(4); 171 | itemres.kpts.resize(17 * 3); 172 | itemres.confidence = conf; 173 | if (personnum > 1) { 174 | itemres.rect[0] = output_data_[52 + i * 56] * w; 175 | itemres.rect[1] = output_data_[51 + i * 56] * h; 176 | itemres.rect[2] = output_data_[54 + i * 56] * w; 177 | itemres.rect[3] = output_data_[53 + i * 56] * h; 178 | } 179 | for (int j = 0; j < 17; j++) { 180 | itemres.kpts[j * 3] = output_data_[j * 3 + 2 + i * 56]; 181 | itemres.kpts[j * 3 + 1] = output_data_[j * 3 + 1 + i * 56] * w; 182 | itemres.kpts[j * 3 + 2] = output_data_[j * 3 + i * 56] * h; 183 | } 184 | result->emplace_back(itemres); 185 | } 186 | } 187 | 188 | void ObjectDetector::Predict(const std::vector& imgs, 189 | const double threshold, 190 | const int warmup, 191 | const int repeats, 192 | std::vector* result, 193 | std::vector* times) { 194 | auto preprocess_start = std::chrono::steady_clock::now(); 195 | int batch_size = imgs.size(); 196 | 197 | // in_data_batch 198 | std::vector in_data_all; 199 | std::vector im_shape_all(batch_size * 2); 200 | std::vector scale_factor_all(batch_size * 2); 201 | // Preprocess image 202 | for (int bs_idx = 0; bs_idx < batch_size; bs_idx++) { 203 | cv::Mat im = imgs.at(bs_idx); 204 | Preprocess(im); 205 | im_shape_all[bs_idx * 2] = inputs_.im_shape_[0]; 206 | im_shape_all[bs_idx * 2 + 1] = inputs_.im_shape_[1]; 207 | 208 | scale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0]; 209 | scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1]; 210 | 211 | // TODO: reduce cost time 212 | in_data_all.insert( 213 | in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end()); 214 | } 215 | auto preprocess_end = std::chrono::steady_clock::now(); 216 | // Prepare input tensor 217 | auto input_names = predictor_->GetInputNames(); 218 | for (const auto& tensor_name : input_names) { 219 | auto in_tensor = predictor_->GetInputByName(tensor_name); 220 | int rh = inputs_.in_net_shape_[0]; 221 | int rw = inputs_.in_net_shape_[1]; 222 | in_tensor->Resize({batch_size, 3, rh, rw}); 223 | auto* inptr = in_tensor->mutable_data(); 224 | std::copy_n(in_data_all.data(), in_data_all.size(), inptr); 225 | } 226 | 227 | // Run predictor 228 | // warmup 229 | for (int i = 0; i < warmup; i++) { 230 | predictor_->Run(); 231 | // Get output tensor 232 | auto output_names = predictor_->GetOutputNames(); 233 | auto out_tensor = predictor_->GetTensor(output_names[0]); 234 | } 235 | 236 | auto inference_start = std::chrono::steady_clock::now(); 237 | int personnum = 1; 238 | for (int i = 0; i < repeats; i++) { 239 | predictor_->Run(); 240 | // Get output tensor 241 | auto output_names = predictor_->GetOutputNames(); 242 | auto output_tensor = predictor_->GetTensor(output_names[0]); 243 | auto output_shape = output_tensor->shape(); 244 | // Calculate output length 245 | int output_size = 1; 246 | for (int j = 0; j < output_shape.size(); ++j) { 247 | output_size *= output_shape[j]; 248 | } 249 | personnum = output_shape[1]; 250 | 251 | if (output_size < 6) { 252 | std::cerr << "[WARNING] No object detected." << std::endl; 253 | } 254 | output_data_.resize(output_size); 255 | std::copy_n( 256 | output_tensor->mutable_data(), output_size, output_data_.data()); 257 | } 258 | auto inference_end = std::chrono::steady_clock::now(); 259 | auto postprocess_start = std::chrono::steady_clock::now(); 260 | // Postprocessing result 261 | result->clear(); 262 | Postprocess(imgs, result, personnum); 263 | auto postprocess_end = std::chrono::steady_clock::now(); 264 | 265 | std::chrono::duration preprocess_diff = 266 | preprocess_end - preprocess_start; 267 | times->push_back(double(preprocess_diff.count() * 1000)); 268 | std::chrono::duration inference_diff = inference_end - inference_start; 269 | times->push_back(double(inference_diff.count() / repeats * 1000)); 270 | std::chrono::duration postprocess_diff = 271 | postprocess_end - postprocess_start; 272 | times->push_back(double(postprocess_diff.count() * 1000)); 273 | } 274 | 275 | std::vector GenerateColorMap(int num_class) { 276 | auto colormap = std::vector(3 * num_class, 0); 277 | for (int i = 0; i < num_class; ++i) { 278 | int j = 0; 279 | int lab = i; 280 | while (lab) { 281 | colormap[i * 3] |= (((lab >> 0) & 1) << (7 - j)); 282 | colormap[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)); 283 | colormap[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)); 284 | ++j; 285 | lab >>= 3; 286 | } 287 | } 288 | return colormap; 289 | } 290 | 291 | } // namespace PaddleDetection 292 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------