├── .bazelrc ├── .gitignore ├── .gitlab-ci.yml ├── BUILD ├── LICENSE ├── README.md ├── README_zh.md ├── WORKSPACE ├── aibench ├── BUILD ├── aibench.bzl ├── benchmark │ ├── BUILD │ ├── benchmark.cc │ ├── benchmark.h │ ├── benchmark_main.cc │ └── imagenet │ │ ├── imagenet_blacklist.txt │ │ ├── imagenet_groundtruth_labels.txt │ │ ├── imagenet_postprocessor.cc │ │ ├── imagenet_postprocessor.h │ │ ├── imagenet_preprocessor.cc │ │ ├── imagenet_preprocessor.h │ │ └── mobilenet_model_labels.txt ├── executors │ ├── BUILD │ ├── base_executor.cc │ ├── base_executor.h │ ├── hiai │ │ ├── hiai_executor.cc │ │ └── hiai_executor.h │ ├── mace │ │ ├── mace_executor.cc │ │ └── mace_executor.h │ ├── mnn │ │ ├── mnn_executor.cc │ │ └── mnn_executor.h │ ├── ncnn │ │ ├── ncnn_executor.cc │ │ └── ncnn_executor.h │ ├── snpe │ │ ├── snpe_executor.cc │ │ └── snpe_executor.h │ ├── tflite │ │ ├── tflite_executor.cc │ │ └── tflite_executor.h │ └── tnn │ │ ├── tnn_executor.cc │ │ └── tnn_executor.h ├── port │ ├── BUILD.bazel │ ├── android │ │ ├── BUILD.bazel │ │ ├── env.cc │ │ ├── env.h │ │ ├── logger.cc │ │ ├── logger.h │ │ ├── malloc_logger.cc │ │ └── malloc_logger.h │ ├── env.cc │ ├── linux │ │ ├── BUILD.bazel │ │ ├── env.cc │ │ └── env.h │ ├── linux_base │ │ ├── BUILD.bazel │ │ ├── env.cc │ │ └── env.h │ ├── logger.cc │ └── posix │ │ ├── BUILD.bazel │ │ ├── backtrace.h │ │ ├── file_system.cc │ │ ├── file_system.h │ │ └── time.h ├── proto │ ├── BUILD │ ├── aibench.proto │ ├── base.proto │ ├── benchmark.meta │ ├── model.meta │ └── test.py ├── python │ ├── BUILD │ ├── bench_engine.py │ ├── benchmark.py │ ├── device │ │ ├── adb_device.py │ │ ├── device.py │ │ ├── device_manager.py │ │ ├── host_device.py │ │ ├── huawei_adb_device.py │ │ ├── qualcomm_adb_device.py │ │ └── ssh_device.py │ ├── evaluators │ │ ├── base_evaluator.py │ │ ├── coco_evaluator.py │ │ └── evaluator_test.py │ └── utils │ │ ├── bench_utils.py │ │ ├── common.py │ │ └── sh_commands.py └── utils │ ├── BUILD.bazel │ └── status.cc ├── include ├── BUILD.bazel └── aibench │ ├── port │ ├── env.h │ ├── file_system.h │ └── logger.h │ ├── public │ └── aibench.h │ └── utils │ ├── logging.h │ ├── macros.h │ ├── memory.h │ └── string_util.h ├── logo.png ├── report ├── csv_to_html.py └── index.html ├── requirements.txt ├── third_party ├── compilers │ ├── aarch64_compiler.BUILD │ └── arm_compiler.BUILD ├── gflags │ └── COPYING.txt ├── googletest │ ├── LICENSE │ └── googletest.BUILD ├── hiai │ └── hiai.BUILD ├── mace │ ├── BUILD │ └── LICENSE ├── mnn │ ├── BUILD │ └── LICENSE ├── ncnn │ ├── LICENSE.txt │ └── ncnn.BUILD ├── opencv │ ├── LICENSE.txt │ └── opencv.BUILD ├── qspower │ └── LICENSE.txt ├── six │ ├── LICENSE │ └── six.BUILD ├── snpe │ ├── LICENSE.txt │ └── snpe.BUILD ├── tflite │ ├── BUILD │ └── LICENSE └── tnn │ ├── LICENSE │ └── tnn.BUILD └── tools ├── aarch64_compiler ├── BUILD ├── CROSSTOOL └── linaro_linux_gcc │ ├── BUILD │ ├── aarch64-linux-gnu-ar │ ├── aarch64-linux-gnu-as │ ├── aarch64-linux-gnu-gcc │ ├── aarch64-linux-gnu-gcov │ ├── aarch64-linux-gnu-ld │ ├── aarch64-linux-gnu-nm │ ├── aarch64-linux-gnu-objcopy │ ├── aarch64-linux-gnu-objdump │ └── aarch64-linux-gnu-strip ├── arm_compiler ├── BUILD ├── CROSSTOOL └── linaro_linux_gcc │ ├── BUILD │ ├── arm-linux-gnueabihf-ar │ ├── arm-linux-gnueabihf-as │ ├── arm-linux-gnueabihf-gcc │ ├── arm-linux-gnueabihf-gcov │ ├── arm-linux-gnueabihf-ld │ ├── arm-linux-gnueabihf-nm │ ├── arm-linux-gnueabihf-objcopy │ ├── arm-linux-gnueabihf-objdump │ └── arm-linux-gnueabihf-strip ├── bazel.rc ├── benchmark.sh ├── build_mace.sh ├── cmake_toolchain ├── BUILD ├── cmake.bzl └── toolchain.cmake.tpl ├── common.py ├── configs.yml ├── device.py ├── google-format.sh └── power.sh /.bazelrc: -------------------------------------------------------------------------------- 1 | # To support new bazelrc file list: 2 | # https://github.com/bazelbuild/bazel/issues/4502 3 | import %workspace%/tools/bazel.rc 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | bazel-* 3 | *.pyc 4 | .project/ 5 | output/ 6 | mace-models/ 7 | mace/ 8 | nohup.out 9 | third_party/mace/include/ 10 | third_party/mace/lib/ 11 | third_party/tflite/flatbuffers/ 12 | third_party/tflite/tensorflow/ 13 | cmake-build-debug/ 14 | dataset/ 15 | CMakeLists.txt 16 | .python-version 17 | *~ 18 | *swp 19 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - cpplint 3 | - pycodestyle 4 | - benchmark 5 | 6 | cpplint: 7 | stage: cpplint 8 | script: 9 | - cpplint --linelength=80 --counting=detailed $(find aibench -name "*.h" -or -name "*.cc") 10 | 11 | pycodestyle: 12 | stage: pycodestyle 13 | script: 14 | - pycodestyle $(find aibench/python -name "*.py") 15 | 16 | benchmark: 17 | stage: benchmark 18 | script: 19 | - set -x 20 | - pwd 21 | - ifconfig 22 | - DATE_STR=`date "+%Y_%m_%d"` 23 | - CI_PROJECT_OUTPUT_PATH=/mace-build-output/$CI_PROJECT_NAME/${DATE_STR}_${CI_PIPELINE_ID} 24 | - mkdir -p $CI_PROJECT_OUTPUT_PATH 25 | - ANDROID_NDK_HOME=/opt/android-ndk-r17b/ 26 | - DAY_OF_WEEK=`date +%u` 27 | - > 28 | git clone git@v9.git.n.xiaomi.com:deep-computing/generic-mobile-devices.git; 29 | if [ $DAY_OF_WEEK -lt 6 ]; then 30 | bash tools/benchmark.sh --output_dir=$CI_PROJECT_OUTPUT_PATH --benchmark_option=Precision --target_abis=armeabi-v7a,arm64-v8a,aarch64,armhf --max_time_per_lock=30 --executors=MACE,SNPE --input_dir=http --num_targets=1; 31 | bash tools/benchmark.sh --output_dir=$CI_PROJECT_OUTPUT_PATH --target_abis=armeabi-v7a,arm64-v8a,aarch64,armhf --max_time_per_lock=30 --num_targets=1; 32 | else 33 | bash tools/benchmark.sh --output_dir=$CI_PROJECT_OUTPUT_PATH --benchmark_option=Precision --target_abis=armeabi-v7a,arm64-v8a,aarch64,armhf --max_time_per_lock=30 --executors=MACE,SNPE --input_dir=http --all_devices_at_once; 34 | bash tools/benchmark.sh --output_dir=$CI_PROJECT_OUTPUT_PATH --target_abis=armeabi-v7a,arm64-v8a,aarch64,armhf --max_time_per_lock=30 --all_devices_at_once; 35 | fi 36 | - > 37 | cp $CI_PROJECT_OUTPUT_PATH/*_report.csv .; 38 | echo "------------------------------------------------------------------"; 39 | echo "Prepare Time (see FAQ section in README for more explanations)"; 40 | echo "------------------------------------------------------------------"; 41 | python -c 'import prettytable,sys;print(prettytable.from_csv(sys.stdin));' 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "aibench/port/logger.h" 25 | #include "aibench/executors/base_executor.h" 26 | 27 | namespace aibench { 28 | namespace benchmark { 29 | 30 | class Benchmark { 31 | public: 32 | Benchmark(BaseExecutor *executor, 33 | const ModelName &model_name, 34 | const bool quantize, 35 | const std::vector &input_names, 36 | const std::vector> &input_shapes, 37 | const std::vector &output_names, 38 | const std::vector> &output_shapes, 39 | const int run_interval, 40 | const int num_threads); 41 | 42 | virtual Status Run() = 0; 43 | virtual ~Benchmark() = default; 44 | 45 | protected: 46 | virtual std::string GetBenchmarkInfo() const; 47 | Status LogResult(const std::string &result); 48 | 49 | BaseExecutor *executor_; 50 | ModelName model_name_; 51 | bool quantize_; 52 | std::vector input_names_; 53 | std::vector> input_shapes_; 54 | std::vector output_names_; 55 | std::vector> output_shapes_; 56 | int run_interval_; 57 | int num_threads_; 58 | }; 59 | 60 | class PerformanceBenchmark : public Benchmark { 61 | public: 62 | PerformanceBenchmark(BaseExecutor *executor, 63 | const ModelName &model_name, 64 | const bool quantize, 65 | const std::vector &input_names, 66 | const std::vector> &input_shapes, 67 | const std::vector &output_names, 68 | const std::vector> &output_shapes, 69 | const int run_interval, 70 | const int num_threads) 71 | : Benchmark(executor, 72 | model_name, 73 | quantize, 74 | input_names, 75 | input_shapes, 76 | output_names, 77 | output_shapes, 78 | run_interval, 79 | num_threads) {} 80 | 81 | public: 82 | Status Run() override; 83 | 84 | private: 85 | Status Run(double *init_seconds, double *run_seconds); 86 | }; 87 | 88 | class PreProcessor { 89 | public: 90 | virtual Status Run(const std::string &filename, 91 | std::map *inputs) = 0; 92 | }; 93 | class PostProcessor { 94 | public: 95 | virtual Status Run(const std::string &filename, 96 | const std::map &outputs) = 0; 97 | virtual std::string GetResult() = 0; 98 | }; 99 | 100 | class PrecisionBenchmark : public Benchmark { 101 | public: 102 | PrecisionBenchmark( 103 | BaseExecutor *executor, 104 | const ModelName &model_name, 105 | const bool quantize, 106 | const std::vector &input_names, 107 | const std::vector> &input_shapes, 108 | const std::vector &output_names, 109 | const std::vector> &output_shapes, 110 | const int run_interval, 111 | const int num_threads, 112 | std::unique_ptr pre_processor, 113 | std::unique_ptr post_processor, 114 | const MetricEvaluator_MetricEvaluatorType metric_evaluator_type) 115 | : Benchmark(executor, 116 | model_name, 117 | quantize, 118 | input_names, 119 | input_shapes, 120 | output_names, 121 | output_shapes, 122 | run_interval, 123 | num_threads), 124 | pre_processor_(std::move(pre_processor)), 125 | post_processor_(std::move(post_processor)), 126 | metric_evaluator_type_(metric_evaluator_type) {} 127 | 128 | Status Run() override; 129 | 130 | protected: 131 | std::string GetBenchmarkInfo() const override; 132 | 133 | private: 134 | Status Evaluate(); 135 | 136 | std::unique_ptr pre_processor_; 137 | std::unique_ptr post_processor_; 138 | MetricEvaluator_MetricEvaluatorType metric_evaluator_type_; 139 | }; 140 | 141 | int64_t NowMicros(); 142 | 143 | } // namespace benchmark 144 | } // namespace aibench 145 | 146 | #endif // AIBENCH_BENCHMARK_BENCHMARK_H_ 147 | -------------------------------------------------------------------------------- /aibench/benchmark/imagenet/imagenet_postprocessor.cc: -------------------------------------------------------------------------------- 1 | 2 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #include "aibench/benchmark/imagenet/imagenet_postprocessor.h" 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | #include "aibench/utils/logging.h" 30 | #include "aibench/benchmark/imagenet/imagenet_preprocessor.h" 31 | 32 | namespace aibench { 33 | namespace benchmark { 34 | namespace imagenet { 35 | 36 | void GetLabels(const std::string &filename, 37 | std::vector *label_list) { 38 | std::ifstream in_file(filename, std::ios::in); 39 | if (in_file.is_open()) { 40 | std::string label; 41 | while (std::getline(in_file, label)) { 42 | label_list->emplace_back(label); 43 | } 44 | in_file.close(); 45 | } else { 46 | LOG(FATAL) << filename << " not found."; 47 | } 48 | } 49 | 50 | int GetFileNum(const std::string &filename) { 51 | size_t head_len = std::string(imagenet::kImageNameHead).length(); 52 | return atoi(filename.substr(head_len, 53 | head_len + imagenet::kImageNameNumLen).c_str()); 54 | } 55 | 56 | } // namespace imagenet 57 | 58 | ImageNetPostProcessor::ImageNetPostProcessor() 59 | : total_count_(0), 60 | correct_count_(0) { 61 | imagenet::GetLabels("imagenet_groundtruth_labels.txt", &groundtruth_labels_); 62 | imagenet::GetLabels("mobilenet_model_labels.txt", &model_labels_); 63 | } 64 | 65 | Status ImageNetPostProcessor::Run( 66 | const std::string &filename, 67 | const std::map &outputs) { 68 | AIBENCH_CHECK(outputs.size() == 1); 69 | auto output = outputs.begin(); 70 | int64_t output_size = output->second.size(); 71 | AIBENCH_CHECK(output_size == 1001 || output_size == 1000, 72 | "Output size should be 1001 or 1000."); 73 | float *output_data = output->second.data().get(); 74 | auto output_iter = std::max_element(output_data, 75 | output_data + output_size); 76 | auto output_index = std::distance(output_data, output_iter); 77 | std::string output_label = 78 | model_labels_[output_size == 1001 ? output_index : output_index + 1]; 79 | std::string groundtruth_label = 80 | groundtruth_labels_[imagenet::GetFileNum(filename) - 1]; 81 | if (output_label == groundtruth_label) { 82 | ++correct_count_; 83 | } 84 | ++total_count_; 85 | 86 | return Status::SUCCESS; 87 | } 88 | 89 | std::string ImageNetPostProcessor::GetResult() { 90 | std::stringstream stream; 91 | stream << std::fixed << std::setprecision(4) 92 | << 1.0 * correct_count_ / total_count_ << "," 93 | << total_count_ << "," << correct_count_; 94 | return stream.str(); 95 | } 96 | 97 | } // namespace benchmark 98 | } // namespace aibench 99 | 100 | -------------------------------------------------------------------------------- /aibench/benchmark/imagenet/imagenet_postprocessor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_BENCHMARK_IMAGENET_IMAGENET_POSTPROCESSOR_H_ 16 | #define AIBENCH_BENCHMARK_IMAGENET_IMAGENET_POSTPROCESSOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/benchmark/benchmark.h" 23 | 24 | namespace aibench { 25 | namespace benchmark { 26 | 27 | class ImageNetPostProcessor : public PostProcessor { 28 | public: 29 | ImageNetPostProcessor(); 30 | Status Run(const std::string &filename, 31 | const std::map &outputs) override; 32 | std::string GetResult() override; 33 | 34 | private: 35 | std::vector groundtruth_labels_; 36 | std::vector model_labels_; 37 | int total_count_; 38 | int correct_count_; 39 | }; 40 | 41 | } // namespace benchmark 42 | } // namespace aibench 43 | 44 | #endif // AIBENCH_BENCHMARK_IMAGENET_IMAGENET_POSTPROCESSOR_H_ 45 | -------------------------------------------------------------------------------- /aibench/benchmark/imagenet/imagenet_preprocessor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_BENCHMARK_IMAGENET_IMAGENET_PREPROCESSOR_H_ 16 | #define AIBENCH_BENCHMARK_IMAGENET_IMAGENET_PREPROCESSOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "aibench/benchmark/benchmark.h" 24 | 25 | namespace aibench { 26 | namespace benchmark { 27 | namespace imagenet { 28 | 29 | extern const char *kImageNameHead; 30 | extern const char *kImageNameTail; 31 | extern const int kImageNameNumLen; 32 | 33 | } // namespace imagenet 34 | 35 | class ImageNetPreProcessor : public PreProcessor { 36 | public: 37 | ImageNetPreProcessor(const std::vector &data_formats, 38 | const std::vector> &input_means, 39 | const std::vector &input_var, 40 | const ChannelOrder channel_order); 41 | Status Run(const std::string &filename, 42 | std::map *inputs) override; 43 | 44 | private: 45 | std::vector data_formats_; 46 | std::vector> input_means_; 47 | std::vector input_var_; 48 | ChannelOrder channel_order_; 49 | std::unordered_set blacklist_; 50 | }; 51 | 52 | } // namespace benchmark 53 | } // namespace aibench 54 | 55 | #endif // AIBENCH_BENCHMARK_IMAGENET_IMAGENET_PREPROCESSOR_H_ 56 | -------------------------------------------------------------------------------- /aibench/executors/BUILD: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | load( 8 | "//aibench:aibench.bzl", 9 | "if_aarch64_linux", 10 | "if_android", 11 | "if_android_arm64", 12 | "if_android_armv7", 13 | "if_armhf_linux", 14 | "if_not_android", 15 | ) 16 | 17 | cc_library( 18 | name = "base_executor", 19 | srcs = [ 20 | "base_executor.cc", 21 | ], 22 | hdrs = [ 23 | "base_executor.h", 24 | ], 25 | copts = [ 26 | "-Werror", 27 | "-Wextra", 28 | "-Wno-missing-field-initializers", 29 | ], 30 | deps = [ 31 | "//aibench/proto:aibench_proto_cc", 32 | "//include:public_headers", 33 | ], 34 | ) 35 | 36 | cc_library( 37 | name = "mace_executor", 38 | srcs = [ 39 | "mace/mace_executor.cc", 40 | ], 41 | hdrs = [ 42 | "mace/mace_executor.h", 43 | ], 44 | copts = [ 45 | "-Werror", 46 | "-Wextra", 47 | "-Wno-missing-field-initializers", 48 | ], 49 | linkopts = if_android([ 50 | "-llog", 51 | ]), 52 | deps = [ 53 | ":base_executor", 54 | ] + if_android_armv7([ 55 | "//third_party/mace:mace_armeabi-v7a", 56 | ]) + if_android_arm64([ 57 | "//third_party/mace:mace_arm64-v8a", 58 | ]) + if_aarch64_linux([ 59 | "//third_party/mace:mace_aarch64", 60 | ]) + if_armhf_linux([ 61 | "//third_party/mace:mace_armhf", 62 | ]), 63 | ) 64 | 65 | cc_library( 66 | name = "snpe_executor", 67 | srcs = [ 68 | "snpe/snpe_executor.cc", 69 | ], 70 | hdrs = [ 71 | "snpe/snpe_executor.h", 72 | ], 73 | copts = [ 74 | "-Werror", 75 | "-Wextra", 76 | "-Wno-missing-field-initializers", 77 | ], 78 | linkopts = [ 79 | "-pie", 80 | "-lm", 81 | ], 82 | deps = [ 83 | ":base_executor", 84 | ] + if_android_armv7([ 85 | "@snpe//:snpe_armeabi-v7a", 86 | ]) + if_android_arm64([ 87 | "@snpe//:snpe_arm64-v8a", 88 | ]) + if_aarch64_linux([ 89 | "@snpe//:snpe_aarch64", 90 | ]) + if_armhf_linux([ 91 | "@snpe//:snpe_armhf", 92 | ]), 93 | ) 94 | 95 | cc_library( 96 | name = "ncnn_executor", 97 | srcs = glob([ 98 | "ncnn/ncnn_executor.cc", 99 | ]), 100 | hdrs = [ 101 | "ncnn/ncnn_executor.h", 102 | ], 103 | copts = [ 104 | "-Werror", 105 | "-Wextra", 106 | "-Wno-missing-field-initializers", 107 | ], 108 | linkopts = [ 109 | "-fopenmp", 110 | ], 111 | deps = [ 112 | ":base_executor", 113 | "@ncnn", 114 | ], 115 | ) 116 | 117 | cc_library( 118 | name = "tnn_executor", 119 | srcs = glob([ 120 | "tnn/tnn_executor.cc", 121 | ]), 122 | hdrs = [ 123 | "tnn/tnn_executor.h", 124 | ], 125 | copts = [ 126 | "-Werror", 127 | "-Wextra", 128 | "-Wno-missing-field-initializers", 129 | ], 130 | deps = [ 131 | ":base_executor", 132 | ] + if_android_armv7([ 133 | "@tnn//:tnn_armeabi-v7a", 134 | ]) + if_android_arm64([ 135 | "@tnn//:tnn_arm64-v8a", 136 | ]), 137 | ) 138 | 139 | cc_library( 140 | name = "tflite_executor", 141 | srcs = glob([ 142 | "tflite/tflite_executor.cc", 143 | ]), 144 | hdrs = [ 145 | "tflite/tflite_executor.h", 146 | ], 147 | deps = [ 148 | ":base_executor", 149 | ] + if_android_armv7([ 150 | "//third_party/tflite:tflite_armeabi-v7a", 151 | ]) + if_android_arm64([ 152 | "//third_party/tflite:tflite_arm64-v8a", 153 | ]), 154 | ) 155 | 156 | cc_library( 157 | name = "hiai_executor", 158 | srcs = glob([ 159 | "hiai/hiai_executor.cc", 160 | ]), 161 | hdrs = [ 162 | "hiai/hiai_executor.h", 163 | ], 164 | linkopts = [ 165 | "-lstdc++", 166 | ], 167 | deps = [ 168 | ":base_executor", 169 | "//aibench/utils:utils", 170 | ] + if_android_arm64([ 171 | "@hiai//:hiai_arm64-v8a", 172 | ]), 173 | ) 174 | 175 | cc_library( 176 | name = "mnn_executor", 177 | srcs = glob([ 178 | "mnn/mnn_executor.cc", 179 | ]), 180 | hdrs = [ 181 | "mnn/mnn_executor.h", 182 | ], 183 | deps = [ 184 | ":base_executor", 185 | ] + if_android_armv7([ 186 | "//third_party/mnn:mnn_armeabi-v7a", 187 | ]) + if_android_arm64([ 188 | "//third_party/mnn:mnn_arm64-v8a", 189 | ]), 190 | ) 191 | -------------------------------------------------------------------------------- /aibench/executors/base_executor.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/executors/base_executor.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | namespace aibench { 24 | 25 | class BaseTensor::Impl { 26 | public: 27 | std::vector shape; 28 | std::shared_ptr data; 29 | }; 30 | 31 | BaseTensor::BaseTensor(const std::vector &shape, 32 | std::shared_ptr data) { 33 | impl_ = std::unique_ptr(new BaseTensor::Impl()); 34 | impl_->shape = shape; 35 | impl_->data = data; 36 | } 37 | 38 | BaseTensor::BaseTensor() { 39 | impl_ = std::unique_ptr(new BaseTensor::Impl()); 40 | } 41 | 42 | BaseTensor::BaseTensor(const BaseTensor &other) { 43 | impl_ = std::unique_ptr(new BaseTensor::Impl()); 44 | impl_->shape = other.shape(); 45 | impl_->data = other.data(); 46 | } 47 | 48 | BaseTensor::BaseTensor(const BaseTensor &&other) { 49 | impl_ = std::unique_ptr(new BaseTensor::Impl()); 50 | impl_->shape = other.shape(); 51 | impl_->data = other.data(); 52 | } 53 | 54 | BaseTensor &BaseTensor::operator=(const BaseTensor &other) { 55 | impl_->shape = other.shape(); 56 | impl_->data = other.data(); 57 | return *this; 58 | } 59 | 60 | BaseTensor &BaseTensor::operator=(const BaseTensor &&other) { 61 | impl_->shape = other.shape(); 62 | impl_->data = other.data(); 63 | return *this; 64 | } 65 | 66 | BaseTensor::~BaseTensor() = default; 67 | 68 | const std::vector &BaseTensor::shape() const { return impl_->shape; } 69 | 70 | const std::shared_ptr BaseTensor::data() const { return impl_->data; } 71 | 72 | int64_t BaseTensor::size() const { 73 | return std::accumulate(shape().begin(), shape().end(), 1, 74 | std::multiplies()); 75 | } 76 | 77 | std::shared_ptr BaseTensor::data() { return impl_->data; } 78 | 79 | 80 | } // namespace aibench 81 | -------------------------------------------------------------------------------- /aibench/executors/base_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_BASE_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_BASE_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "aibench/proto/aibench.pb.h" 24 | #include "aibench/proto/base.pb.h" 25 | #include "aibench/utils/logging.h" 26 | 27 | namespace aibench { 28 | 29 | // input/output tensor 30 | class BaseTensor { 31 | public: 32 | // shape - the shape of the tensor, with size n 33 | // data - the buffer of the tensor, must not be null with size equals 34 | // shape[0] * shape[1] * ... * shape[n-1] 35 | explicit BaseTensor(const std::vector &shape, 36 | std::shared_ptr data); 37 | BaseTensor(); 38 | BaseTensor(const BaseTensor &other); 39 | BaseTensor(const BaseTensor &&other); 40 | BaseTensor &operator=(const BaseTensor &other); 41 | BaseTensor &operator=(const BaseTensor &&other); 42 | ~BaseTensor(); 43 | 44 | const std::vector &shape() const; 45 | const std::shared_ptr data() const; 46 | int64_t size() const; 47 | std::shared_ptr data(); 48 | 49 | private: 50 | class Impl; 51 | std::unique_ptr impl_; 52 | }; 53 | 54 | class BaseExecutor { 55 | public: 56 | explicit BaseExecutor(ExecutorType executor, 57 | DeviceType device_type, 58 | const std::string &model_file, 59 | const std::string &weight_file) 60 | : executor_(executor), 61 | device_type_(device_type), 62 | model_file_(model_file), 63 | weight_file_(weight_file) {} 64 | 65 | virtual ~BaseExecutor() = default; 66 | 67 | // If your executor needs to initialize something other than loading 68 | // model or creating an engine, you can put it here, e.g., Mace needs 69 | // to compile OpenCL kernel once per target. 70 | virtual Status Init(int num_threads) = 0; 71 | 72 | // Load model and prepare to run. 73 | virtual Status Prepare() = 0; 74 | 75 | // Run the model. 76 | virtual Status Run(const std::map &inputs, 77 | std::map *outputs) = 0; 78 | 79 | // Unload model and free the memory after running the model. 80 | virtual void Finish() = 0; 81 | ExecutorType GetExecutorType() {return executor_;} 82 | DeviceType GetDeviceType() {return device_type_;} 83 | std::string &GetModelFile() {return model_file_;} 84 | std::string &GetWeightFile() {return weight_file_;} 85 | 86 | private: 87 | ExecutorType executor_; 88 | DeviceType device_type_; 89 | std::string model_file_; 90 | std::string weight_file_; 91 | }; 92 | 93 | } // namespace aibench 94 | 95 | #endif // AIBENCH_EXECUTORS_BASE_EXECUTOR_H_ 96 | -------------------------------------------------------------------------------- /aibench/executors/hiai/hiai_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_HIAI_HIAI_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_HIAI_HIAI_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "aibench/executors/base_executor.h" 25 | #include "HIAIMixModel.h" 26 | 27 | namespace aibench { 28 | 29 | class HiAiExecutor : public BaseExecutor { 30 | public: 31 | explicit HiAiExecutor(const std::string &model_file, 32 | const std::string &model_name) 33 | : BaseExecutor(HIAI, NPU, model_file, ""), 34 | model_name_(model_name), 35 | mix_model_manager_(nullptr, HIAI_MixModelManager_Destroy), 36 | mix_model_buffer_(nullptr, HIAI_MixModelBuffer_Destroy) {} 37 | 38 | virtual Status Init(int num_threads); 39 | 40 | virtual Status Prepare(); 41 | 42 | virtual Status Run(const std::map &inputs, 43 | std::map *outputs); 44 | virtual void Finish(); 45 | 46 | private: 47 | Status CreateHiAiTensorFromBaseTensor( 48 | const std::map *base_tensor_map, 49 | std::vector *tensor_buffer_vec); 50 | 51 | Status CopyDataBetweenHiAiAndBaseTensors( 52 | std::map *base_tensor_map, 53 | std::vector *tensor_buffer_vec, 54 | bool base_to_hiai); 55 | 56 | void DestroyHiAiTensor( 57 | std::vector *tensor_buffer_vec); 58 | 59 | typedef std::unique_ptr< 60 | HIAI_MixModelManager, decltype(HIAI_MixModelManager_Destroy) *> 61 | MixModelManagerUniquePtr; 62 | 63 | typedef std::unique_ptr< 64 | HIAI_MixModelBuffer, decltype(HIAI_MixModelBuffer_Destroy) *> 65 | MixModelBufferUniquePtr; 66 | 67 | MixModelManagerUniquePtr mix_model_manager_; 68 | MixModelBufferUniquePtr mix_model_buffer_; 69 | std::vector input_tensors_; 70 | std::vector output_tensors_; 71 | 72 | std::string model_name_; 73 | }; 74 | 75 | } // namespace aibench 76 | 77 | #endif // AIBENCH_EXECUTORS_HIAI_HIAI_EXECUTOR_H_ 78 | -------------------------------------------------------------------------------- /aibench/executors/mace/mace_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_MACE_MACE_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_MACE_MACE_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "aibench/executors/base_executor.h" 24 | #include "build/include/mace/public/mace.h" 25 | #include "mace/port/file_system.h" 26 | 27 | namespace aibench { 28 | 29 | class MaceExecutor : public BaseExecutor { 30 | public: 31 | MaceExecutor(DeviceType device_type, 32 | const std::string &model_file, 33 | const std::string &weight_file, 34 | const std::vector &input_names, 35 | const std::vector &output_names) 36 | : BaseExecutor(MACE, device_type, model_file, weight_file), 37 | input_names_(input_names), 38 | output_names_(output_names) {} 39 | 40 | virtual Status Init(int num_threads); 41 | 42 | virtual Status Prepare(); 43 | 44 | virtual Status Run(const std::map &inputs, 45 | std::map *outputs); 46 | virtual void Finish(); 47 | 48 | Status CreateEngine(std::shared_ptr *engine); 49 | 50 | private: 51 | std::vector input_names_; 52 | std::vector output_names_; 53 | std::shared_ptr engine_; 54 | int num_threads_; 55 | std::shared_ptr gpu_context_; 56 | std::unique_ptr model_weights_data_; 57 | }; 58 | 59 | } // namespace aibench 60 | 61 | #endif // AIBENCH_EXECUTORS_MACE_MACE_EXECUTOR_H_ 62 | -------------------------------------------------------------------------------- /aibench/executors/mnn/mnn_executor.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/executors/mnn/mnn_executor.h" 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | namespace aibench { 22 | 23 | Status MnnExecutor::Init(int num_threads) { 24 | config_.numThread = num_threads; 25 | DeviceType device_type = GetDeviceType(); 26 | if (device_type == DeviceType::CPU) { 27 | config_.type = MNN_FORWARD_CPU; 28 | } else if (device_type == DeviceType::GPU) { 29 | config_.type = MNN_FORWARD_OPENCL; 30 | } else if (device_type == DeviceType::VULKAN) { 31 | config_.type = MNN_FORWARD_VULKAN; 32 | } else { 33 | return Status::UNSUPPORTED; 34 | } 35 | return Status::SUCCESS; 36 | } 37 | 38 | Status MnnExecutor::Prepare() { 39 | net_ = std::shared_ptr( 40 | MNN::Interpreter::createFromFile(GetModelFile().c_str())); 41 | session_ = net_->createSession(config_); 42 | net_->releaseModel(); 43 | return Status::SUCCESS; 44 | } 45 | 46 | Status MnnExecutor::Run(const std::map &inputs, 47 | std::map *outputs) { 48 | (void) inputs; 49 | (void) outputs; 50 | auto input = net_->getSessionInput(session_, NULL); 51 | const MNN::Backend* inBackend = net_->getBackend(session_, input); 52 | std::shared_ptr givenTensor( 53 | MNN::Tensor::createHostTensorFromDevice(input, false)); 54 | auto outputTensor = net_->getSessionOutput(session_, NULL); 55 | std::shared_ptr expectTensor( 56 | MNN::Tensor::createHostTensorFromDevice(outputTensor, false)); 57 | input->copyFromHostTensor(givenTensor.get()); 58 | net_->runSession(session_); 59 | outputTensor->copyToHostTensor(expectTensor.get()); 60 | return Status::SUCCESS; 61 | } 62 | 63 | void MnnExecutor::Finish() { 64 | } 65 | 66 | } // namespace aibench 67 | -------------------------------------------------------------------------------- /aibench/executors/mnn/mnn_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_MNN_MNN_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_MNN_MNN_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/executors/base_executor.h" 23 | #include "include/MNN/Interpreter.hpp" 24 | #include "include/MNN/MNNDefine.h" 25 | #include "include/MNN/Tensor.hpp" 26 | #include "include/MNN/MNNForwardType.h" 27 | 28 | namespace aibench { 29 | 30 | class MnnExecutor : public BaseExecutor { 31 | public: 32 | explicit MnnExecutor(DeviceType device_type, 33 | const std::string &model_file) 34 | : BaseExecutor(MNN, device_type, model_file, "") {} 35 | 36 | virtual Status Init(int num_threads); 37 | 38 | virtual Status Prepare(); 39 | 40 | virtual Status Run(const std::map &inputs, 41 | std::map *outputs); 42 | 43 | virtual void Finish(); 44 | private: 45 | std::shared_ptr net_; 46 | MNN::ScheduleConfig config_; 47 | MNN::Session* session_; 48 | }; 49 | 50 | } // namespace aibench 51 | 52 | #endif // AIBENCH_EXECUTORS_MNN_MNN_EXECUTOR_H_ 53 | -------------------------------------------------------------------------------- /aibench/executors/ncnn/ncnn_executor.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/executors/ncnn/ncnn_executor.h" 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "ncnn/include/cpu.h" 22 | 23 | namespace ncnn { 24 | 25 | int BenchNet::load_model() { 26 | ModelBinFromEmpty mb; 27 | for (size_t i = 0; i < layers.size(); ++i) { 28 | Layer *layer = layers[i]; 29 | int ret = layer->load_model(mb); 30 | if (ret != 0) { 31 | fprintf(stderr, "layer load_model %d failed\n", static_cast(i)); 32 | return -1; 33 | } 34 | } 35 | 36 | return 0; 37 | } 38 | 39 | } // namespace ncnn 40 | 41 | namespace aibench { 42 | 43 | Status NcnnExecutor::Init(int num_threads) { 44 | static ncnn::UnlockedPoolAllocator g_blob_pool_allocator; 45 | static ncnn::PoolAllocator g_workspace_pool_allocator; 46 | 47 | ncnn::set_cpu_powersave(0); 48 | ncnn::set_omp_dynamic(0); 49 | ncnn::set_omp_num_threads(num_threads); 50 | 51 | g_blob_pool_allocator.clear(); 52 | g_workspace_pool_allocator.clear(); 53 | 54 | ncnn::Option opt; 55 | opt.lightmode = true; 56 | opt.num_threads = num_threads; 57 | opt.blob_allocator = &g_blob_pool_allocator; 58 | opt.workspace_allocator = &g_workspace_pool_allocator; 59 | ncnn::set_default_option(opt); 60 | return Status::SUCCESS; 61 | } 62 | 63 | Status NcnnExecutor::Prepare() { 64 | int ret; 65 | 66 | ret = net.load_param(GetModelFile().c_str()); 67 | if (ret != 0) { 68 | return Status::UNSUPPORTED; 69 | } 70 | 71 | ret = net.load_model(); 72 | if (ret != 0) { 73 | return Status::RUNTIME_ERROR; 74 | } 75 | 76 | return Status::SUCCESS; 77 | } 78 | 79 | Status NcnnExecutor::Run(const std::map &inputs, 80 | std::map *outputs) { 81 | (void) outputs; 82 | // check inputs and outputs 83 | auto input = inputs.find("data"); 84 | if (input == inputs.end()) { 85 | return Status::RUNTIME_ERROR; 86 | } 87 | // transform inputs 88 | const std::vector &shape = input->second.shape(); 89 | const std::shared_ptr data = input->second.data(); 90 | ncnn::Mat in(static_cast(shape[1]), 91 | static_cast(shape[2]), 92 | static_cast(shape[3]), 93 | data.get()); 94 | 95 | // Execute the network inference and retrieve the result 96 | ncnn::Extractor ex = net.create_extractor(); 97 | ex.input("data", in); 98 | ncnn::Mat out; 99 | ex.extract("prob", out); 100 | 101 | return Status::SUCCESS; 102 | } 103 | 104 | void NcnnExecutor::Finish() { 105 | net.clear(); 106 | } 107 | 108 | } // namespace aibench 109 | -------------------------------------------------------------------------------- /aibench/executors/ncnn/ncnn_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_NCNN_NCNN_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_NCNN_NCNN_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "aibench/executors/base_executor.h" 22 | #include "ncnn/include/layer.h" 23 | #include "ncnn/include/mat.h" 24 | #include "ncnn/include/modelbin.h" 25 | #include "ncnn/include/net.h" 26 | 27 | namespace ncnn { 28 | 29 | // always return empty weights 30 | class ModelBinFromEmpty : public ModelBin { 31 | public: 32 | virtual Mat load(int w, int /*type*/) const { return Mat(w); } 33 | }; 34 | 35 | class BenchNet : public Net { 36 | public: 37 | int load_model(); 38 | }; 39 | 40 | } // namespace ncnn 41 | 42 | namespace aibench { 43 | 44 | class NcnnExecutor : public BaseExecutor { 45 | public: 46 | explicit NcnnExecutor(const std::string &model_file) 47 | : BaseExecutor(NCNN, CPU, model_file, "") {} 48 | 49 | virtual Status Init(int num_threads); 50 | 51 | virtual Status Prepare(); 52 | 53 | virtual Status Run(const std::map &inputs, 54 | std::map *outputs); 55 | 56 | virtual void Finish(); 57 | private: 58 | ncnn::BenchNet net; 59 | }; 60 | 61 | } // namespace aibench 62 | 63 | #endif // AIBENCH_EXECUTORS_NCNN_NCNN_EXECUTOR_H_ 64 | -------------------------------------------------------------------------------- /aibench/executors/snpe/snpe_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_SNPE_SNPE_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_SNPE_SNPE_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/executors/base_executor.h" 23 | #include "SNPE/SNPE.hpp" 24 | 25 | namespace aibench { 26 | 27 | class SnpeExecutor : public BaseExecutor { 28 | public: 29 | explicit SnpeExecutor(DeviceType device_type, 30 | const std::string &model_file) 31 | : BaseExecutor(SNPE, device_type, model_file, "") {} 32 | 33 | virtual Status Init(int num_threads); 34 | 35 | virtual Status Prepare(); 36 | 37 | virtual Status Run(const std::map &inputs, 38 | std::map *outputs); 39 | 40 | virtual void Finish(); 41 | private: 42 | std::unique_ptr snpe_; 43 | }; 44 | 45 | } // namespace aibench 46 | 47 | #endif // AIBENCH_EXECUTORS_SNPE_SNPE_EXECUTOR_H_ 48 | -------------------------------------------------------------------------------- /aibench/executors/tflite/tflite_executor.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/executors/tflite/tflite_executor.h" 16 | #include 17 | 18 | #include 19 | 20 | namespace aibench { 21 | TfLiteExecutor::TfLiteDelegatePtr CreateGPUDelegate( 22 | tflite::FlatBufferModel* model) { 23 | TfLiteGpuDelegateOptions options; 24 | options.metadata = TfLiteGpuDelegateGetModelMetadata(model->GetModel()); 25 | options.compile_options.precision_loss_allowed = 1; 26 | options.compile_options.preferred_gl_object_type = 27 | TFLITE_GL_OBJECT_TYPE_FASTEST; 28 | options.compile_options.dynamic_batch_enabled = 0; 29 | return TfLiteExecutor::TfLiteDelegatePtr( 30 | TfLiteGpuDelegateCreate(&options), &TfLiteGpuDelegateDelete); 31 | } 32 | 33 | Status TfLiteExecutor::Init(int num_threads) { 34 | num_threads_ = num_threads; 35 | return Status::SUCCESS; 36 | } 37 | 38 | Status TfLiteExecutor::Prepare() { 39 | DeviceType device_type = GetDeviceType(); 40 | model_ = tflite::FlatBufferModel::BuildFromFile(GetModelFile().c_str()); 41 | if (!model_) { 42 | std::cout << "Failed to mmap model_" << GetModelFile() << std::endl; 43 | return Status::RUNTIME_ERROR; 44 | } 45 | tflite::ops::builtin::BuiltinOpResolver resolver; 46 | tflite::InterpreterBuilder builder(*model_.get(), resolver); 47 | builder(&interpreter_); 48 | if (!interpreter_) { 49 | std::cout << "Failed to construct interpreter" << std::endl; 50 | return Status::RUNTIME_ERROR; 51 | } 52 | interpreter_->SetNumThreads(num_threads_); 53 | interpreter_->UseNNAPI(false); 54 | if (device_type == DeviceType::GPU) { 55 | TfLiteExecutor::TfLiteDelegatePtr delegate = 56 | CreateGPUDelegate(model_.get()); 57 | if (!delegate) { 58 | std::cout << "GPU acceleration is unsupported on this platform." 59 | << std::endl; 60 | } else { 61 | delegates_.emplace("GPU", std::move(delegate)); 62 | } 63 | for (const auto& delegate : delegates_) { 64 | if (interpreter_->ModifyGraphWithDelegate(delegate.second.get()) != 65 | kTfLiteOk) { 66 | std::cout << "Failed to apply" << delegate.first << "delegate."; 67 | } else { 68 | std::cout << "Applied" << delegate.first << "delegate."; 69 | } 70 | } 71 | } 72 | if (delegates_.empty() && interpreter_->AllocateTensors() != kTfLiteOk) { 73 | std::cout << "Failed to allocate tensors!" << std::endl; 74 | return Status::RUNTIME_ERROR; 75 | } 76 | 77 | return Status::SUCCESS; 78 | } 79 | 80 | Status TfLiteExecutor::Run(const std::map &inputs, 81 | std::map *outputs) { 82 | (void)inputs; 83 | (void)outputs; 84 | TfLiteStatus run_status = interpreter_->Invoke(); 85 | return run_status == kTfLiteOk ? Status::SUCCESS 86 | : Status::RUNTIME_ERROR; 87 | } 88 | 89 | void TfLiteExecutor::Finish() { 90 | interpreter_.reset(); 91 | } 92 | 93 | } // namespace aibench 94 | -------------------------------------------------------------------------------- /aibench/executors/tflite/tflite_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_TFLITE_TFLITE_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_TFLITE_TFLITE_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "aibench/executors/base_executor.h" 24 | #include "tensorflow/lite/interpreter.h" 25 | #include "tensorflow/lite/kernels/register.h" 26 | #include "tensorflow/lite/model.h" 27 | #include "tensorflow/lite/optional_debug_tools.h" 28 | #include "tensorflow/lite/string_type.h" 29 | #include "tensorflow/lite/string_util.h" 30 | #include "tensorflow/lite/delegates/gpu/gl_delegate.h" 31 | #include "tensorflow/lite/stderr_reporter.h" 32 | 33 | namespace aibench { 34 | 35 | class TfLiteExecutor : public BaseExecutor { 36 | public: 37 | using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr; 38 | using TfLiteDelegatePtrMap = std::map; 39 | explicit TfLiteExecutor(DeviceType device_type, 40 | const std::string &model_file) 41 | : BaseExecutor(TFLITE, device_type, model_file, "") {} 42 | 43 | virtual Status Init(int num_threads); 44 | 45 | virtual Status Prepare(); 46 | 47 | virtual Status Run(const std::map &inputs, 48 | std::map *outputs); 49 | virtual void Finish(); 50 | private: 51 | int num_threads_; 52 | std::unique_ptr interpreter_; 53 | std::unique_ptr model_; 54 | TfLiteDelegatePtrMap delegates_; 55 | }; 56 | 57 | } // namespace aibench 58 | 59 | #endif // AIBENCH_EXECUTORS_TFLITE_TFLITE_EXECUTOR_H_ 60 | -------------------------------------------------------------------------------- /aibench/executors/tnn/tnn_executor.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | 16 | #include "aibench/executors/tnn/tnn_executor.h" 17 | 18 | int tnn_num_threads; 19 | 20 | namespace aibench { 21 | 22 | Status TnnExecutor::Prepare() { 23 | const DeviceType device_type = GetDeviceType(); 24 | TNN_NS::ModelConfig model_config; 25 | std::ifstream model_graph_file(GetModelFile().c_str()); 26 | std::string model_graph_data; 27 | model_graph_data = std::string((std::istreambuf_iterator 28 | (model_graph_file)), 29 | std::istreambuf_iterator()); 30 | 31 | model_config.params.push_back(model_graph_data); 32 | 33 | std::ifstream model_weights_file(GetWeightFile().c_str()); 34 | std::string model_weights_data; 35 | model_weights_data = std::string((std::istreambuf_iterator 36 | (model_weights_file)), 37 | std::istreambuf_iterator()); 38 | 39 | model_config.params.push_back(model_weights_data); 40 | 41 | tnn_.Init(model_config); 42 | 43 | TNN_NS::Status error; 44 | 45 | TNN_NS::NetworkConfig cpu_network_config; 46 | if (device_type == CPU) { 47 | cpu_network_config.device_type = TNN_NS::DEVICE_ARM; 48 | } else if (device_type == GPU) { 49 | cpu_network_config.device_type = TNN_NS::DEVICE_OPENCL; 50 | } 51 | instance_ = tnn_.CreateInst(cpu_network_config, error); 52 | instance_->SetCpuNumThreads(tnn_num_threads); 53 | return error == TNN_NS::TNN_OK ? Status::SUCCESS : Status::RUNTIME_ERROR; 54 | } 55 | 56 | Status TnnExecutor::Init(int num_threads) { 57 | tnn_num_threads = num_threads; 58 | LOG(INFO) << "TnnExecutor num_threads: " << num_threads; 59 | return Status::SUCCESS; 60 | } 61 | 62 | Status TnnExecutor::Run(const std::map &inputs, 63 | std::map *outputs) { 64 | if (!instance_) { 65 | return {}; 66 | } 67 | 68 | void* command_queue; 69 | instance_->GetCommandQueue(reinterpret_cast(&command_queue)); 70 | 71 | TNN_NS::BlobMap input_blobs; 72 | instance_->GetAllInputBlobs(input_blobs); 73 | // check inputs 74 | auto input_name = inputs.find(input_blobs.begin()->first); 75 | if (input_name == inputs.end()) { 76 | LOG(ERROR) << "input name not matched"; 77 | return Status::RUNTIME_ERROR; 78 | } 79 | void* data = inputs.at(input_blobs.begin()->first).data().get(); 80 | std::vector shape = inputs.at(input_blobs.begin()->first).shape(); 81 | std::vector tnn_shape(shape.begin(), shape.end()); 82 | TNN_NS::Blob* input = input_blobs.begin()->second; 83 | TNN_NS::Mat input_mat(TNN_NS::DEVICE_ARM, TNN_NS::NCHW_FLOAT, 84 | tnn_shape, data); 85 | 86 | TNN_NS::BlobConverter input_blob_convert(input); 87 | TNN_NS::MatConvertParam input_convert_param; 88 | input_blob_convert.ConvertFromMat(input_mat, input_convert_param, 89 | command_queue); 90 | 91 | instance_->Forward(); 92 | 93 | TNN_NS::BlobMap output_blobs; 94 | instance_->GetAllOutputBlobs(output_blobs); 95 | TNN_NS::Blob* output = output_blobs.begin()->second; 96 | 97 | // check outputs 98 | auto output_name = outputs->find(output_blobs.begin()->first); 99 | if (output_name == outputs->end()) { 100 | LOG(ERROR) << "output name not matched"; 101 | return Status::RUNTIME_ERROR; 102 | } 103 | 104 | return Status::SUCCESS; 105 | } 106 | 107 | void TnnExecutor::Finish() { 108 | if (!instance_) { 109 | return; 110 | } 111 | instance_->DeInit(); 112 | } 113 | 114 | } // namespace aibench 115 | -------------------------------------------------------------------------------- /aibench/executors/tnn/tnn_executor.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The MobileAIBench Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_EXECUTORS_TNN_TNN_EXECUTOR_H_ 16 | #define AIBENCH_EXECUTORS_TNN_TNN_EXECUTOR_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "aibench/executors/base_executor.h" 24 | #include "include/tnn/core/tnn.h" 25 | #include "include/tnn/core/instance.h" 26 | #include "include/tnn/core/common.h" 27 | #include "include/tnn/utils/dims_vector_utils.h" 28 | #include "include/tnn/utils/blob_converter.h" 29 | 30 | namespace aibench { 31 | 32 | class TnnExecutor : public BaseExecutor { 33 | public: 34 | explicit TnnExecutor(DeviceType device_type, 35 | const std::string &model_file, 36 | const std::string &weight_file) 37 | : BaseExecutor(TNN, device_type, model_file, weight_file) {} 38 | 39 | Status Init(int num_threads) override; 40 | 41 | Status Prepare() override; 42 | 43 | Status Run(const std::map &inputs, 44 | std::map *outputs) override; 45 | 46 | void Finish() override; 47 | private: 48 | TNN_NS::TNN tnn_; 49 | std::shared_ptr instance_; 50 | }; 51 | 52 | } // namespace aibench 53 | 54 | #endif // AIBENCH_EXECUTORS_TNN_TNN_EXECUTOR_H_ 55 | -------------------------------------------------------------------------------- /aibench/port/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | cc_library( 8 | name = "port", 9 | deps = [ 10 | "//aibench/port/android:port_android", 11 | "//aibench/port/linux:port_linux", 12 | ], 13 | ) 14 | 15 | cc_library( 16 | name = "port_base", 17 | srcs = [ 18 | "env.cc", 19 | "logger.cc", 20 | ], 21 | deps = [ 22 | "//include:public_headers", 23 | ], 24 | ) 25 | -------------------------------------------------------------------------------- /aibench/port/android/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | load("//aibench:aibench.bzl", "if_android") 8 | 9 | cc_library( 10 | name = "port_android", 11 | srcs = if_android(glob([ 12 | "*.cc", 13 | ])), 14 | hdrs = if_android(glob([ 15 | "*.h", 16 | ])), 17 | deps = if_android([ 18 | "//aibench/port/linux_base:port_linux_base", 19 | ]), 20 | alwayslink = 1, 21 | ) 22 | -------------------------------------------------------------------------------- /aibench/port/android/env.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/android/env.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #ifdef __hexagon__ 24 | #include 25 | #else 26 | #include 27 | #endif 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | #include "aibench/port/android/malloc_logger.h" 37 | #include "aibench/port/posix/time.h" 38 | #include "aibench/utils/macros.h" 39 | #include "aibench/utils/memory.h" 40 | #include "aibench/utils/logging.h" 41 | 42 | namespace aibench { 43 | namespace port { 44 | 45 | LogWriter *AndroidEnv::GetLogWriter() { 46 | return &log_writer_; 47 | } 48 | 49 | namespace { 50 | 51 | struct BacktraceState { 52 | void **current; 53 | void **end; 54 | }; 55 | 56 | _Unwind_Reason_Code UnwindCallback(struct _Unwind_Context *context, void *arg) { 57 | BacktraceState *state = static_cast(arg); 58 | uintptr_t pc = _Unwind_GetIP(context); 59 | if (pc) { 60 | if (state->current == state->end) { 61 | return _URC_END_OF_STACK; 62 | } else { 63 | *state->current++ = reinterpret_cast(pc); 64 | } 65 | } 66 | return _URC_NO_REASON; 67 | } 68 | 69 | size_t BackTrace(void **buffer, size_t max) { 70 | BacktraceState state = {buffer, buffer + max}; 71 | _Unwind_Backtrace(UnwindCallback, &state); 72 | 73 | return state.current - buffer; 74 | } 75 | 76 | bool CpuIsolate(size_t cpu_id) { 77 | std::string cpuinfo_isolate_conf = MakeString( 78 | "/sys/devices/system/cpu/cpu", 79 | cpu_id, 80 | "/isolate"); 81 | std::ifstream isolate_file(cpuinfo_isolate_conf); 82 | int isolate_switch = 0; 83 | if (isolate_file.is_open()) { 84 | std::string line; 85 | if (std::getline(isolate_file, line)) { 86 | isolate_switch = strtol(line.c_str(), nullptr, 0); 87 | } 88 | isolate_file.close(); 89 | } 90 | 91 | return (isolate_switch != 0); 92 | } 93 | 94 | } // namespace 95 | 96 | Status AndroidEnv::GetCPUMaxFreq(std::vector *max_freqs) { 97 | AIBENCH_RETURN_IF_ERROR(LinuxBaseEnv::GetCPUMaxFreq(max_freqs)); 98 | 99 | size_t cpu_num = (max_freqs != nullptr) ? max_freqs->size() : 0; 100 | if (cpu_num > 0) { 101 | for (size_t i = 0; i < cpu_num; ++i) { 102 | if (CpuIsolate(i)) { 103 | (*max_freqs)[i] = 0; 104 | } 105 | } 106 | } 107 | 108 | return Status::SUCCESS; 109 | } 110 | 111 | std::vector AndroidEnv::GetBackTraceUnsafe(int max_steps) { 112 | std::vector buffer(max_steps, 0); 113 | int steps = BackTrace(buffer.data(), max_steps); 114 | 115 | std::vector bt; 116 | for (int i = 0; i < steps; ++i) { 117 | std::ostringstream os; 118 | 119 | const void *addr = buffer[i]; 120 | const char *symbol = ""; 121 | Dl_info info; 122 | if (dladdr(addr, &info) && info.dli_sname) { 123 | symbol = info.dli_sname; 124 | } 125 | 126 | os << "pc " << addr << " " << symbol; 127 | 128 | bt.push_back(os.str()); 129 | } 130 | 131 | return bt; 132 | } 133 | 134 | std::unique_ptr AndroidEnv::NewMallocLogger( 135 | std::ostringstream *oss, 136 | const std::string &name) { 137 | return make_unique(oss, name); 138 | } 139 | 140 | Env *Env::Default() { 141 | static AndroidEnv android_env; 142 | return &android_env; 143 | } 144 | 145 | } // namespace port 146 | } // namespace aibench 147 | -------------------------------------------------------------------------------- /aibench/port/android/env.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_ANDROID_ENV_H_ 16 | #define AIBENCH_PORT_ANDROID_ENV_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/port/android/logger.h" 23 | #include "aibench/port/env.h" 24 | #include "aibench/port/linux_base/env.h" 25 | #include "aibench/port/posix/file_system.h" 26 | 27 | namespace aibench { 28 | namespace port { 29 | 30 | class AndroidEnv : public LinuxBaseEnv { 31 | public: 32 | LogWriter *GetLogWriter() override; 33 | Status GetCPUMaxFreq(std::vector *max_freqs) override; 34 | std::vector GetBackTraceUnsafe(int max_steps) override; 35 | std::unique_ptr NewMallocLogger( 36 | std::ostringstream *oss, 37 | const std::string &name) override; 38 | 39 | private: 40 | AndroidLogWriter log_writer_; 41 | }; 42 | 43 | } // namespace port 44 | } // namespace aibench 45 | 46 | #endif // AIBENCH_PORT_ANDROID_ENV_H_ 47 | -------------------------------------------------------------------------------- /aibench/port/android/logger.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/android/logger.h" 16 | 17 | #include 18 | 19 | #include 20 | 21 | namespace aibench { 22 | namespace port { 23 | 24 | void AndroidLogWriter::WriteLogMessage(const char *fname, 25 | const int line, 26 | const LogLevel severity, 27 | const char *message) { 28 | int android_log_level; 29 | switch (severity) { 30 | case INFO: 31 | android_log_level = ANDROID_LOG_INFO; 32 | break; 33 | case WARNING: 34 | android_log_level = ANDROID_LOG_WARN; 35 | break; 36 | case ERROR: 37 | android_log_level = ANDROID_LOG_ERROR; 38 | break; 39 | case FATAL: 40 | android_log_level = ANDROID_LOG_FATAL; 41 | break; 42 | default: 43 | android_log_level = ANDROID_LOG_ERROR; 44 | break; 45 | } 46 | 47 | std::stringstream ss; 48 | const char *const partial_name = strrchr(fname, '/'); 49 | ss << (partial_name != nullptr ? partial_name + 1 : fname) << ":" << line 50 | << " " << message; 51 | __android_log_write(android_log_level, "AIBENCH", ss.str().c_str()); 52 | 53 | // Also log to stderr (for standalone Android apps) and abort. 54 | LogWriter::WriteLogMessage(fname, line, severity, message); 55 | } 56 | 57 | } // namespace port 58 | } // namespace aibench 59 | -------------------------------------------------------------------------------- /aibench/port/android/logger.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_ANDROID_LOGGER_H_ 16 | #define AIBENCH_PORT_ANDROID_LOGGER_H_ 17 | 18 | #include "aibench/port/logger.h" 19 | 20 | namespace aibench { 21 | namespace port { 22 | 23 | class AndroidLogWriter : public LogWriter { 24 | protected: 25 | void WriteLogMessage(const char *fname, 26 | const int line, 27 | const LogLevel severity, 28 | const char *message) override; 29 | }; 30 | 31 | } // namespace port 32 | } // namespace aibench 33 | 34 | #endif // AIBENCH_PORT_ANDROID_LOGGER_H_ 35 | -------------------------------------------------------------------------------- /aibench/port/android/malloc_logger.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/android/malloc_logger.h" 16 | 17 | #include 18 | 19 | #include 20 | #include 21 | 22 | namespace aibench { 23 | namespace port { 24 | 25 | namespace { 26 | struct mallinfo LogMallinfoChange(std::ostringstream *oss, 27 | const std::string &name, 28 | const struct mallinfo curr, 29 | const struct mallinfo prev) { 30 | if (prev.arena != curr.arena) { 31 | (*oss) << "[" << name << "] " 32 | << "Non-mmapped space allocated (bytes): " << curr.arena 33 | << ", diff: " << ((int64_t)curr.arena - (int64_t)prev.arena); 34 | } 35 | if (prev.ordblks != curr.ordblks) { 36 | (*oss) << "[" << name << "] " 37 | << "Number of free chunks: " << curr.ordblks << ", diff: " 38 | << ((int64_t)curr.ordblks - (int64_t)prev.ordblks); 39 | } 40 | if (prev.smblks != curr.smblks) { 41 | (*oss) << "[" << name << "] " 42 | << "Number of free fastbin blocks: " << curr.smblks 43 | << ", diff: " << ((int64_t)curr.smblks - (int64_t)prev.smblks); 44 | } 45 | if (prev.hblks != curr.hblks) { 46 | (*oss) << "[" << name << "] " 47 | << "Number of mmapped regions: " << curr.hblks 48 | << ", diff: " << ((int64_t)curr.hblks - (int64_t)prev.hblks); 49 | } 50 | if (prev.hblkhd != curr.hblkhd) { 51 | (*oss) << "[" << name << "] " 52 | << "Space allocated in mmapped regions (bytes): " << curr.hblkhd 53 | << ", diff: " << ((int64_t)curr.hblkhd - (int64_t)prev.hblkhd); 54 | } 55 | if (prev.usmblks != curr.usmblks) { 56 | (*oss) << "[" << name << "] " 57 | << "Maximum total allocated space (bytes): " << curr.usmblks 58 | << ", diff: " 59 | << ((int64_t)curr.usmblks - (int64_t)prev.usmblks); 60 | } 61 | if (prev.fsmblks != curr.fsmblks) { 62 | (*oss) << "[" << name << "] " 63 | << "Space in freed fastbin blocks (bytes): " << curr.fsmblks 64 | << ", diff: " 65 | << ((int64_t)curr.fsmblks - (int64_t)prev.fsmblks); 66 | } 67 | if (prev.uordblks != curr.uordblks) { 68 | (*oss) << "[" << name << "] " 69 | << "Total allocated space (bytes): " << curr.uordblks 70 | << ", diff: " 71 | << ((int64_t)curr.uordblks - (int64_t)prev.uordblks); 72 | } 73 | if (prev.fordblks != curr.fordblks) { 74 | (*oss) << "[" << name << "] " 75 | << "Total free space (bytes): " << curr.fordblks << ", diff: " 76 | << ((int64_t)curr.fordblks - (int64_t)prev.fordblks); 77 | } 78 | if (prev.keepcost != curr.keepcost) { 79 | (*oss) << "[" << name << "] " 80 | << "Top-most, releasable space (bytes): " << curr.keepcost 81 | << ", diff: " 82 | << ((int64_t)curr.keepcost - (int64_t)prev.keepcost); 83 | } 84 | return curr; 85 | } 86 | } // namespace 87 | 88 | AndroidMallocLogger::AndroidMallocLogger(std::ostringstream *oss, 89 | const std::string &name) : 90 | oss_(oss), name_(name) { 91 | prev_ = mallinfo(); 92 | } 93 | 94 | AndroidMallocLogger::~AndroidMallocLogger() { 95 | struct mallinfo curr = mallinfo(); 96 | LogMallinfoChange(oss_, name_, curr, prev_); 97 | } 98 | 99 | } // namespace port 100 | } // namespace aibench 101 | -------------------------------------------------------------------------------- /aibench/port/android/malloc_logger.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_ANDROID_MALLOC_LOGGER_H_ 16 | #define AIBENCH_PORT_ANDROID_MALLOC_LOGGER_H_ 17 | 18 | #include 19 | 20 | #include 21 | 22 | #include "aibench/port/env.h" 23 | 24 | namespace aibench { 25 | namespace port { 26 | 27 | class AndroidMallocLogger : public MallocLogger { 28 | public: 29 | explicit AndroidMallocLogger(std::ostringstream *oss, 30 | const std::string &name); 31 | ~AndroidMallocLogger() override; 32 | 33 | private: 34 | std::ostringstream *oss_; 35 | const std::string name_; 36 | struct mallinfo prev_; 37 | }; 38 | 39 | } // namespace port 40 | } // namespace aibench 41 | 42 | 43 | #endif // AIBENCH_PORT_ANDROID_MALLOC_LOGGER_H_ 44 | -------------------------------------------------------------------------------- /aibench/port/env.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/env.h" 16 | 17 | #include 18 | 19 | #include "aibench/utils/memory.h" 20 | #include "aibench/public/aibench.h" 21 | 22 | namespace aibench { 23 | namespace port { 24 | 25 | Status Env::AdviseFree(void *addr, size_t length) { 26 | return Status::UNSUPPORTED; 27 | } 28 | 29 | Status Env::GetCPUMaxFreq(std::vector *max_freqs) { 30 | return Status::UNSUPPORTED; 31 | } 32 | 33 | Status Env::SchedSetAffinity(const std::vector &cpu_ids) { 34 | return Status::UNSUPPORTED; 35 | } 36 | 37 | std::unique_ptr Env::NewMallocLogger( 38 | std::ostringstream *oss, 39 | const std::string &name) { 40 | return make_unique(); 41 | } 42 | 43 | } // namespace port 44 | } // namespace aibench 45 | -------------------------------------------------------------------------------- /aibench/port/linux/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | load("//aibench:aibench.bzl", "if_linux") 8 | 9 | cc_library( 10 | name = "port_linux", 11 | srcs = if_linux(glob([ 12 | "*.cc", 13 | ])), 14 | hdrs = if_linux(glob([ 15 | "*.h", 16 | ])), 17 | deps = if_linux([ 18 | "//aibench/port/linux_base:port_linux_base", 19 | ]), 20 | alwayslink = 1, 21 | ) 22 | -------------------------------------------------------------------------------- /aibench/port/linux/env.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/linux/env.h" 16 | 17 | #include 18 | #include 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | #include "aibench/port/env.h" 25 | #include "aibench/port/posix/backtrace.h" 26 | #include "aibench/port/posix/file_system.h" 27 | #include "aibench/port/posix/time.h" 28 | #include "aibench/utils/macros.h" 29 | 30 | namespace aibench { 31 | namespace port { 32 | 33 | // In our embedded linux device, SchedSetAffinity has side effects 34 | // on performance, so we override this method to do nothing. You 35 | // can try to comment this function, perhaps you could get a better 36 | // performance as we do in Android devices. 37 | Status LinuxEnv::SchedSetAffinity(const std::vector &cpu_ids) { 38 | AIBENCH_UNUSED(cpu_ids); 39 | 40 | return Status::SUCCESS; 41 | } 42 | 43 | LogWriter *LinuxEnv::GetLogWriter() { 44 | return &log_writer_; 45 | } 46 | 47 | std::vector LinuxEnv::GetBackTraceUnsafe(int max_steps) { 48 | return aibench::port::posix::GetBackTraceUnsafe(max_steps); 49 | } 50 | 51 | Env *Env::Default() { 52 | static LinuxEnv linux_env; 53 | return &linux_env; 54 | } 55 | 56 | } // namespace port 57 | } // namespace aibench 58 | -------------------------------------------------------------------------------- /aibench/port/linux/env.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_LINUX_ENV_H_ 16 | #define AIBENCH_PORT_LINUX_ENV_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "aibench/port/linux_base/env.h" 22 | #include "aibench/port/logger.h" 23 | 24 | namespace aibench { 25 | namespace port { 26 | 27 | class LinuxEnv : public LinuxBaseEnv { 28 | public: 29 | Status SchedSetAffinity(const std::vector &cpu_ids) override; 30 | LogWriter *GetLogWriter() override; 31 | std::vector GetBackTraceUnsafe(int max_steps) override; 32 | 33 | private: 34 | LogWriter log_writer_; 35 | }; 36 | 37 | } // namespace port 38 | } // namespace aibench 39 | 40 | #endif // AIBENCH_PORT_LINUX_ENV_H_ 41 | -------------------------------------------------------------------------------- /aibench/port/linux_base/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | cc_library( 8 | name = "port_linux_base", 9 | srcs = glob([ 10 | "*.cc", 11 | ]), 12 | hdrs = glob([ 13 | "*.h", 14 | ]), 15 | deps = [ 16 | "//aibench/port:port_base", 17 | "//aibench/port/posix:port_posix", 18 | ], 19 | alwayslink = 1, 20 | ) 21 | -------------------------------------------------------------------------------- /aibench/port/linux_base/env.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/linux_base/env.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include "aibench/port/posix/file_system.h" 29 | #include "aibench/port/posix/time.h" 30 | #include "aibench/utils/logging.h" 31 | 32 | namespace aibench { 33 | namespace port { 34 | 35 | namespace { 36 | 37 | int GetCPUCount() { 38 | int cpu_count = 0; 39 | std::string cpu_sys_conf = "/proc/cpuinfo"; 40 | std::ifstream f(cpu_sys_conf); 41 | if (!f.is_open()) { 42 | LOG(ERROR) << "failed to open " << cpu_sys_conf; 43 | return -1; 44 | } 45 | std::string line; 46 | const std::string processor_key = "processor"; 47 | while (std::getline(f, line)) { 48 | if (line.size() >= processor_key.size() 49 | && line.compare(0, processor_key.size(), processor_key) == 0) { 50 | ++cpu_count; 51 | } 52 | } 53 | if (f.bad()) { 54 | LOG(ERROR) << "failed to read " << cpu_sys_conf; 55 | } 56 | if (!f.eof()) { 57 | LOG(ERROR) << "failed to read end of " << cpu_sys_conf; 58 | } 59 | f.close(); 60 | VLOG(1) << "CPU cores: " << cpu_count; 61 | return cpu_count; 62 | } 63 | 64 | } // namespace 65 | 66 | int64_t LinuxBaseEnv::NowMicros() { 67 | return aibench::port::posix::NowMicros(); 68 | } 69 | 70 | FileSystem *LinuxBaseEnv::GetFileSystem() { 71 | return &posix_file_system_; 72 | } 73 | 74 | Status LinuxBaseEnv::GetCPUMaxFreq(std::vector *max_freqs) { 75 | AIBENCH_CHECK_NOTNULL(max_freqs); 76 | int cpu_count = GetCPUCount(); 77 | if (cpu_count < 0) { 78 | return Status::RUNTIME_ERROR; 79 | } 80 | for (int cpu_id = 0; cpu_id < cpu_count; ++cpu_id) { 81 | std::string cpuinfo_max_freq_sys_conf = MakeString( 82 | "/sys/devices/system/cpu/cpu", 83 | cpu_id, 84 | "/cpufreq/cpuinfo_max_freq"); 85 | std::ifstream f(cpuinfo_max_freq_sys_conf); 86 | if (!f.is_open()) { 87 | LOG(ERROR) << "failed to open " << cpuinfo_max_freq_sys_conf; 88 | return Status::RUNTIME_ERROR; 89 | } 90 | std::string line; 91 | if (std::getline(f, line)) { 92 | float freq = strtof(line.c_str(), nullptr); 93 | max_freqs->push_back(freq); 94 | } 95 | if (f.bad()) { 96 | LOG(ERROR) << "failed to read " << cpuinfo_max_freq_sys_conf; 97 | } 98 | f.close(); 99 | } 100 | 101 | VLOG(1) << "CPU freq: " << MakeString(*max_freqs); 102 | 103 | return Status::SUCCESS; 104 | } 105 | 106 | Status LinuxBaseEnv::SchedSetAffinity(const std::vector &cpu_ids) { 107 | cpu_set_t mask; 108 | CPU_ZERO(&mask); 109 | for (auto cpu_id : cpu_ids) { 110 | CPU_SET(cpu_id, &mask); 111 | } 112 | 113 | pid_t pid = syscall(SYS_gettid); 114 | int err = sched_setaffinity(pid, sizeof(mask), &mask); 115 | if (err) { 116 | LOG(WARNING) << "SchedSetAffinity failed: " << strerror(errno); 117 | return Status(Status::INVALID_ARGS, 118 | "SchedSetAffinity failed: " + 119 | std::string(strerror(errno))); 120 | } 121 | 122 | return Status::SUCCESS; 123 | } 124 | 125 | Status LinuxBaseEnv::AdviseFree(void *addr, size_t length) { 126 | int page_size = sysconf(_SC_PAGESIZE); 127 | void *addr_aligned = 128 | reinterpret_cast( 129 | (reinterpret_cast(addr) + page_size - 1) 130 | & (~(page_size - 1))); 131 | uintptr_t delta = 132 | reinterpret_cast(addr_aligned) 133 | - reinterpret_cast(addr); 134 | if (length >= delta + page_size) { 135 | size_t len_aligned = (length - delta) & (~(page_size - 1)); 136 | int error = madvise(addr_aligned, len_aligned, MADV_DONTNEED); 137 | if (error != 0) { 138 | LOG(ERROR) << "Advise free failed: " << strerror(errno); 139 | return Status::RUNTIME_ERROR; 140 | } 141 | } 142 | return Status::SUCCESS; 143 | } 144 | 145 | 146 | } // namespace port 147 | } // namespace aibench 148 | -------------------------------------------------------------------------------- /aibench/port/linux_base/env.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_LINUX_BASE_ENV_H_ 16 | #define AIBENCH_PORT_LINUX_BASE_ENV_H_ 17 | 18 | #include 19 | 20 | #include "aibench/port/env.h" 21 | #include "aibench/port/posix/file_system.h" 22 | 23 | namespace aibench { 24 | namespace port { 25 | 26 | class LinuxBaseEnv : public Env { 27 | public: 28 | int64_t NowMicros() override; 29 | Status AdviseFree(void *addr, size_t length) override; 30 | Status GetCPUMaxFreq(std::vector *max_freqs) override; 31 | FileSystem *GetFileSystem() override; 32 | Status SchedSetAffinity(const std::vector &cpu_ids) override; 33 | 34 | protected: 35 | PosixFileSystem posix_file_system_; 36 | }; 37 | 38 | } // namespace port 39 | } // namespace aibench 40 | 41 | #endif // AIBENCH_PORT_LINUX_BASE_ENV_H_ 42 | -------------------------------------------------------------------------------- /aibench/port/logger.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/logger.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/port/env.h" 23 | #include "aibench/utils/string_util.h" 24 | 25 | namespace aibench { 26 | namespace port { 27 | 28 | inline bool IsValidLogLevel(const LogLevel level) { 29 | return level > LogLevel::INVALID_MIN && 30 | level < LogLevel::INVALID_MAX; 31 | } 32 | 33 | LogLevel LogLevelFromStr(const char *log_level_str) { 34 | if (log_level_str != nullptr) { 35 | std::string ls = ToUpper(log_level_str); 36 | 37 | if (ls == "I" || ls == "INFO") { 38 | return LogLevel::INFO; 39 | } 40 | if (ls == "W" || ls == "WARNING") { 41 | return LogLevel::WARNING; 42 | } 43 | if (ls == "E" || ls == "ERROR") { 44 | return LogLevel::ERROR; 45 | } 46 | if (ls == "F" || ls == "FATAL") { 47 | return LogLevel::FATAL; 48 | } 49 | } 50 | 51 | return LogLevel::INVALID_MIN; 52 | } 53 | 54 | char LogLevelToShortStr(LogLevel level) { 55 | if (!IsValidLogLevel(level)) { 56 | level = LogLevel::INFO; 57 | } 58 | 59 | return "IWEF"[static_cast(level) - 1]; 60 | } 61 | 62 | int VLogLevelFromStr(const char *vlog_level_str) { 63 | if (vlog_level_str != nullptr) { 64 | return atoi(vlog_level_str); 65 | } 66 | 67 | return 0; 68 | } 69 | 70 | 71 | void LogWriter::WriteLogMessage(const char *fname, 72 | const int line, 73 | const LogLevel severity, 74 | const char *message) { 75 | printf("%c %s:%d] %s\n", LogLevelToShortStr(severity), fname, line, message); 76 | } 77 | 78 | Logger::Logger(const char *fname, int line, LogLevel severity) 79 | : fname_(fname), line_(line), severity_(severity) {} 80 | 81 | void Logger::GenerateLogMessage() { 82 | LogWriter *log_writer = Env::Default()->GetLogWriter(); 83 | log_writer->WriteLogMessage(fname_, line_, severity_, str().c_str()); 84 | 85 | // When there is a fatal log, terminate execution 86 | if (severity_ == LogLevel::FATAL) { 87 | DealWithFatal(); 88 | } 89 | } 90 | 91 | void Logger::DealWithFatal() { 92 | // When there is a fatal log, log the backtrace and abort. 93 | LogWriter *log_writer = Env::Default()->GetLogWriter(); 94 | std::vector bt = Env::Default()->GetBackTraceUnsafe(50); 95 | if (!bt.empty()) { 96 | log_writer->WriteLogMessage(fname_, line_, severity_, "backtrace:"); 97 | for (size_t i = 0; i < bt.size(); ++i) { 98 | std::ostringstream os; 99 | os << " " << bt[i]; 100 | log_writer->WriteLogMessage(fname_, line_, severity_, os.str().c_str()); 101 | } 102 | } 103 | 104 | abort(); 105 | } 106 | 107 | Logger::~Logger() { 108 | static const LogLevel min_log_level = MinLogLevelFromEnv(); 109 | if (LogLevelPassThreashold(severity_, min_log_level)) { 110 | GenerateLogMessage(); 111 | } 112 | } 113 | 114 | } // namespace port 115 | } // namespace aibench 116 | -------------------------------------------------------------------------------- /aibench/port/posix/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | cc_library( 8 | name = "port_posix", 9 | srcs = glob([ 10 | "*.cc", 11 | ]), 12 | hdrs = glob([ 13 | "*.h", 14 | ]), 15 | deps = [ 16 | "//aibench/port:port_base", 17 | "//include:public_headers", 18 | ], 19 | ) 20 | -------------------------------------------------------------------------------- /aibench/port/posix/backtrace.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_POSIX_BACKTRACE_H_ 16 | #define AIBENCH_PORT_POSIX_BACKTRACE_H_ 17 | 18 | #include 19 | 20 | #include 21 | #include 22 | 23 | namespace aibench { 24 | namespace port { 25 | namespace posix { 26 | 27 | inline std::vector GetBackTraceUnsafe(int max_steps) { 28 | std::vector buffer(max_steps, 0); 29 | int steps = backtrace(buffer.data(), max_steps); 30 | 31 | std::vector bt; 32 | char **symbols = backtrace_symbols(buffer.data(), steps); 33 | if (symbols != nullptr) { 34 | for (int i = 0; i < steps; i++) { 35 | bt.push_back(symbols[i]); 36 | } 37 | } 38 | return bt; 39 | } 40 | 41 | } // namespace posix 42 | } // namespace port 43 | } // namespace aibench 44 | 45 | #endif // AIBENCH_PORT_POSIX_BACKTRACE_H_ 46 | -------------------------------------------------------------------------------- /aibench/port/posix/file_system.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "aibench/port/posix/file_system.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #include "aibench/utils/memory.h" 27 | 28 | namespace aibench { 29 | namespace port { 30 | 31 | namespace { 32 | class PosixReadOnlyMemoryRegion : public ReadOnlyMemoryRegion { 33 | public: 34 | PosixReadOnlyMemoryRegion() = delete; 35 | PosixReadOnlyMemoryRegion(const void* addr, uint64_t length) 36 | : addr_(addr), length_(length) {} 37 | ~PosixReadOnlyMemoryRegion() override { 38 | if (length_ > 0) { 39 | munmap(const_cast(addr_), length_); 40 | } 41 | } 42 | const void *data() const override { return addr_; } 43 | uint64_t length() const override { return length_; } 44 | 45 | private: 46 | const void *addr_; 47 | const uint64_t length_; 48 | }; 49 | } // namespace 50 | 51 | Status PosixFileSystem::NewReadOnlyMemoryRegionFromFile( 52 | const char *fname, 53 | std::unique_ptr* result) { 54 | Status s = Status::SUCCESS; 55 | int fd = open(fname, O_RDONLY); 56 | if (fd < 0) { 57 | // TODO(heliangliang) check errno 58 | s = Status::RUNTIME_ERROR; 59 | } else { 60 | struct stat st; 61 | fstat(fd, &st); 62 | if (st.st_size > 0) { 63 | const void* address = 64 | mmap(nullptr, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 65 | if (address == MAP_FAILED) { 66 | // TODO(heliangliang) check errno 67 | s = Status::RUNTIME_ERROR; 68 | } else { 69 | *result = make_unique(address, st.st_size); 70 | } 71 | close(fd); 72 | } else { 73 | // Empty file: mmap returns EINVAL (since Linux 2.6.12) length was 0 74 | *result = make_unique(nullptr, 0); 75 | s = Status::RUNTIME_ERROR; 76 | } 77 | } 78 | return s; 79 | } 80 | 81 | } // namespace port 82 | } // namespace aibench 83 | -------------------------------------------------------------------------------- /aibench/port/posix/file_system.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_POSIX_FILE_SYSTEM_H_ 16 | #define AIBENCH_PORT_POSIX_FILE_SYSTEM_H_ 17 | 18 | #include 19 | #include 20 | 21 | #include "aibench/port/file_system.h" 22 | 23 | namespace aibench { 24 | namespace port { 25 | 26 | class PosixFileSystem : public FileSystem { 27 | public: 28 | PosixFileSystem() = default; 29 | ~PosixFileSystem() override = default; 30 | Status NewReadOnlyMemoryRegionFromFile(const char *fname, 31 | std::unique_ptr* result) override; 32 | }; 33 | 34 | } // namespace port 35 | } // namespace aibench 36 | 37 | #endif // AIBENCH_PORT_POSIX_FILE_SYSTEM_H_ 38 | -------------------------------------------------------------------------------- /aibench/port/posix/time.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_POSIX_TIME_H_ 16 | #define AIBENCH_PORT_POSIX_TIME_H_ 17 | 18 | #include 19 | 20 | #include 21 | 22 | namespace aibench { 23 | namespace port { 24 | namespace posix { 25 | 26 | inline int64_t NowMicros() { 27 | struct timeval tv; 28 | gettimeofday(&tv, nullptr); 29 | return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; 30 | } 31 | 32 | } // namespace posix 33 | } // namespace port 34 | } // namespace aibench 35 | 36 | #endif // AIBENCH_PORT_POSIX_TIME_H_ 37 | -------------------------------------------------------------------------------- /aibench/proto/BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # aibench proto. 3 | # 4 | 5 | package( 6 | default_visibility = ["//visibility:public"], 7 | ) 8 | 9 | licenses(["notice"]) # Apache 2.0 10 | 11 | load( 12 | "@com_google_protobuf//:protobuf.bzl", 13 | "py_proto_library", 14 | "cc_proto_library", 15 | ) 16 | 17 | py_proto_library( 18 | name = "aibench_proto_py", 19 | srcs = glob(["*.proto"]), 20 | default_runtime = "@com_google_protobuf//:protobuf_python", 21 | protoc = "@com_google_protobuf//:protoc", 22 | srcs_version = "PY2AND3", 23 | deps = ["@com_google_protobuf//:protobuf_python"], 24 | ) 25 | 26 | cc_proto_library( 27 | name = "aibench_proto_cc", 28 | srcs = glob(["*.proto"]), 29 | default_runtime = "@com_google_protobuf//:protobuf_lite", 30 | protoc = "@com_google_protobuf//:protoc", 31 | ) 32 | 33 | py_binary( 34 | name = "test", 35 | srcs = ["test.py"], 36 | srcs_version = "PY2AND3", 37 | deps = [ 38 | ":aibench_proto_py", 39 | ], 40 | ) 41 | -------------------------------------------------------------------------------- /aibench/proto/aibench.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package aibench; 4 | 5 | option optimize_for = LITE_RUNTIME; 6 | 7 | import "aibench/proto/base.proto"; 8 | 9 | message TensorShape { 10 | repeated int32 shape = 1; 11 | } 12 | 13 | message ModelInfo { 14 | optional ModelName model_name = 1; 15 | optional bool quantize = 2 [default = false]; 16 | optional string model_path = 3; 17 | optional string model_checksum = 4; 18 | optional string weight_path = 5; 19 | optional string weight_checksum = 6; 20 | repeated DeviceType devices = 7; 21 | repeated string input_names = 8; 22 | repeated string output_names = 9; 23 | 24 | // Specific members to override ModelBaseInfo's member 25 | repeated DataFormat data_format = 10; 26 | repeated TensorShape input_shape = 11; 27 | repeated TensorShape output_shape = 12; 28 | } 29 | 30 | message BenchInfo { 31 | optional ExecutorType executor = 1; 32 | repeated ModelInfo models = 2; 33 | } 34 | 35 | message ModelBaseInfo { 36 | enum ModelCategory { 37 | ImageClassification = 1; 38 | ObjectDetection = 2; 39 | } 40 | 41 | optional ModelName model_name = 1; 42 | optional ModelCategory category = 2; 43 | optional PreProcessor pre_processor = 3; 44 | optional PostProcessor post_processor = 4; 45 | optional MetricEvaluator metric_evaluator = 5; 46 | repeated TensorShape input_shape = 6; 47 | repeated TensorShape output_shape = 7; 48 | repeated ChannelOrder channel_order = 8; 49 | repeated DataFormat data_format = 9; 50 | } 51 | 52 | message BenchFactory { 53 | repeated BenchInfo benchmarks = 1; 54 | } 55 | 56 | message ModelFactory { 57 | repeated ModelBaseInfo models = 1; 58 | } 59 | 60 | -------------------------------------------------------------------------------- /aibench/proto/base.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package aibench; 4 | 5 | option optimize_for = LITE_RUNTIME; 6 | 7 | enum BenchmarkOption { 8 | Performance = 1; 9 | Precision = 2; 10 | } 11 | 12 | enum ExecutorType { 13 | MACE = 1; 14 | SNPE = 2; 15 | TFLITE = 3; 16 | NCNN = 4; 17 | HIAI = 5; 18 | MNN = 6; 19 | TNN = 7; 20 | } 21 | 22 | enum DeviceType { 23 | CPU = 1; 24 | GPU = 2; 25 | DSP = 3; 26 | NPU = 4; 27 | VULKAN = 5; 28 | OPENGL = 6; 29 | } 30 | 31 | enum ModelName { 32 | MobileNetV1 = 1; 33 | MobileNetV2 = 2; 34 | InceptionV3 = 3; 35 | SqueezeNetV11 = 4; 36 | } 37 | 38 | enum ChannelOrder { 39 | RGB = 1; 40 | BGR = 2; 41 | } 42 | 43 | enum DataFormat { 44 | NHWC = 1; 45 | NCHW = 2; 46 | } 47 | 48 | message PreProcessor { 49 | enum PreProcessorType { 50 | DefaultProcessor = 1; 51 | } 52 | optional PreProcessorType type = 1; 53 | 54 | message MeanValue { 55 | repeated float mean = 1; 56 | } 57 | repeated MeanValue input_mean = 2; 58 | repeated float var = 3; 59 | } 60 | 61 | message PostProcessor { 62 | enum PostProcessorType { 63 | ImageClassification = 1; 64 | SSDDetection = 2; 65 | YOLOV3Detection = 3; 66 | } 67 | optional PostProcessorType type = 1; 68 | optional float threashold = 2 [default = 0.5]; 69 | } 70 | 71 | message MetricEvaluator { 72 | enum MetricEvaluatorType { 73 | ImageClassification = 1; 74 | COCOObjectDetection = 2; 75 | } 76 | optional MetricEvaluatorType type = 1; 77 | } 78 | -------------------------------------------------------------------------------- /aibench/proto/model.meta: -------------------------------------------------------------------------------- 1 | models { 2 | model_name: MobileNetV1 3 | category: ImageClassification 4 | pre_processor { 5 | type: DefaultProcessor 6 | input_mean: { 7 | mean: 127.5 8 | mean: 127.5 9 | mean: 127.5 10 | } 11 | var: 127.5 12 | } 13 | post_processor { 14 | type: ImageClassification 15 | } 16 | metric_evaluator: { 17 | type: ImageClassification 18 | } 19 | input_shape: { 20 | shape: 1 21 | shape: 224 22 | shape: 224 23 | shape: 3 24 | } 25 | output_shape: { 26 | shape: 1 27 | shape: 1001 28 | } 29 | channel_order: RGB 30 | data_format: NHWC 31 | } 32 | 33 | models { 34 | model_name: MobileNetV2 35 | category: ImageClassification 36 | pre_processor { 37 | type: DefaultProcessor 38 | input_mean: { 39 | mean: 127.5 40 | mean: 127.5 41 | mean: 127.5 42 | } 43 | var: 127.5 44 | } 45 | post_processor { 46 | type: ImageClassification 47 | } 48 | metric_evaluator: { 49 | type: ImageClassification 50 | } 51 | input_shape: { 52 | shape: 1 53 | shape: 224 54 | shape: 224 55 | shape: 3 56 | } 57 | output_shape: { 58 | shape: 1 59 | shape: 1001 60 | } 61 | channel_order: RGB 62 | data_format: NHWC 63 | } 64 | 65 | models { 66 | model_name: InceptionV3 67 | category: ImageClassification 68 | pre_processor { 69 | type: DefaultProcessor 70 | input_mean: { 71 | mean: 127.5 72 | mean: 127.5 73 | mean: 127.5 74 | } 75 | var: 127.5 76 | } 77 | post_processor { 78 | type: ImageClassification 79 | } 80 | metric_evaluator: { 81 | type: ImageClassification 82 | } 83 | input_shape: { 84 | shape: 1 85 | shape: 299 86 | shape: 299 87 | shape: 3 88 | } 89 | output_shape: { 90 | shape: 1 91 | shape: 1001 92 | } 93 | channel_order: RGB 94 | data_format: NHWC 95 | } 96 | 97 | models { 98 | model_name: SqueezeNetV11 99 | category: ImageClassification 100 | pre_processor { 101 | type: DefaultProcessor 102 | input_mean: { 103 | mean: 104 104 | mean: 117 105 | mean: 123 106 | } 107 | var: 1 108 | } 109 | post_processor { 110 | type: ImageClassification 111 | } 112 | metric_evaluator: { 113 | type: ImageClassification 114 | } 115 | input_shape: { 116 | shape: 1 117 | shape: 227 118 | shape: 227 119 | shape: 3 120 | } 121 | output_shape: { 122 | shape: 1 123 | shape: 1 124 | shape: 1 125 | shape: 1000 126 | } 127 | channel_order: BGR 128 | data_format: NHWC 129 | } 130 | -------------------------------------------------------------------------------- /aibench/proto/test.py: -------------------------------------------------------------------------------- 1 | from google.protobuf import text_format 2 | 3 | from aibench.proto import base_pb2 4 | from aibench.proto import aibench_pb2 5 | 6 | model_factory = aibench_pb2.ModelFactory() 7 | bench_factory = aibench_pb2.BenchFactory() 8 | 9 | try: 10 | with open('aibench/proto/model.meta', 'rb') as fin: 11 | file_content = fin.read() 12 | text_format.Parse(file_content, model_factory) 13 | print(str(model_factory)) 14 | with open('aibench/proto/benchmark.meta', 'rb') as fin: 15 | file_content = fin.read() 16 | text_format.Parse(file_content, bench_factory) 17 | print(str(bench_factory)) 18 | except text_format.ParseError as e: 19 | raise IOError("Cannot parse file.", e) 20 | -------------------------------------------------------------------------------- /aibench/python/BUILD: -------------------------------------------------------------------------------- 1 | py_library( 2 | name = "utils", 3 | srcs = glob([ 4 | "utils/*.py", 5 | ]), 6 | srcs_version = "PY2AND3", 7 | ) 8 | 9 | py_library( 10 | name = "evaluators", 11 | srcs = glob( 12 | [ 13 | "evaluators/*.py", 14 | ], 15 | exclude = [ 16 | "evaluators/*_test.py", 17 | ], 18 | ), 19 | srcs_version = "PY2AND3", 20 | deps = [ 21 | ":utils", 22 | ], 23 | ) 24 | 25 | py_test( 26 | name = "evaluator_test", 27 | srcs = ["evaluators/evaluator_test.py"], 28 | srcs_version = "PY2AND3", 29 | deps = [ 30 | ":evaluators", 31 | ":utils", 32 | ], 33 | ) 34 | 35 | py_library( 36 | name = "benchmark_lib", 37 | srcs = glob([ 38 | "device/*.py", 39 | "utils/*.py", 40 | ]), 41 | srcs_version = "PY2AND3", 42 | deps = [ 43 | "//aibench/proto:aibench_proto_py", 44 | ], 45 | ) 46 | 47 | py_binary( 48 | name = "benchmark", 49 | srcs = [ 50 | "benchmark.py", 51 | "bench_engine.py", 52 | ], 53 | srcs_version = "PY2AND3", 54 | deps = [ 55 | "benchmark_lib", 56 | ], 57 | ) 58 | -------------------------------------------------------------------------------- /aibench/python/device/adb_device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import re 16 | import sh 17 | 18 | from aibench.proto import base_pb2 19 | from aibench.python.utils import sh_commands 20 | from aibench.python.device.device import Device 21 | from aibench.python.device.device import YAMLKeyword 22 | 23 | 24 | class AdbDevice(Device): 25 | 26 | def __init__(self, adb): 27 | self.address = adb[0] 28 | prop = self.get_props() 29 | Device.__init__(self, { 30 | YAMLKeyword.device_name: adb[1], 31 | YAMLKeyword.target_abis: 32 | prop['ro.product.cpu.abilist'].split(','), 33 | YAMLKeyword.target_socs: prop['ro.board.platform'], 34 | YAMLKeyword.models: prop['ro.product.model'], 35 | YAMLKeyword.address: adb[0], 36 | }) 37 | 38 | def get_shell_prefix(self): 39 | return "adb -s %s shell" % self.address 40 | 41 | def exec_command(self, command, *args, **kwargs): 42 | return sh.adb('-s', self.address, 'shell', 43 | command, *args, **kwargs) 44 | 45 | def push(self, src_path, dst_path, silent=False): 46 | sh_commands.adb_push(src_path, dst_path, self.address, silent) 47 | 48 | def pull(self, src_path, dst_path='.'): 49 | sh_commands.adb_pull(src_path, dst_path, self.address) 50 | 51 | def get_props(self): 52 | outputs = sh.adb("-s", self.address, "shell", "getprop") 53 | raw_props = sh_commands.split_stdout(outputs) 54 | props = {} 55 | p = re.compile(r'\[(.+)\]: \[(.+)\]') 56 | for raw_prop in raw_props: 57 | m = p.match(raw_prop) 58 | if m: 59 | props[m.group(1)] = m.group(2) 60 | return props 61 | 62 | def get_available_device_types(self, device_types, abi, executor): 63 | avail_device_types = Device.get_available_device_types( 64 | self, device_types, abi, executor) 65 | if base_pb2.GPU in device_types: 66 | avail_device_types.append(base_pb2.GPU) 67 | return avail_device_types 68 | 69 | def get_bench_path(self): 70 | return "/data/local/tmp/aibench" 71 | -------------------------------------------------------------------------------- /aibench/python/device/device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from aibench.proto import base_pb2 17 | from aibench.python.utils import common 18 | from aibench.python.utils import sh_commands 19 | 20 | 21 | class YAMLKeyword(object): 22 | username = 'username' 23 | target_abis = 'target_abis' 24 | target_socs = 'target_socs' 25 | device_types = 'device_types' 26 | address = 'address' 27 | models = 'models' 28 | device_name = 'device_name' 29 | 30 | 31 | class Device: 32 | 33 | def __init__(self, device_dict): 34 | self.device_name = device_dict[YAMLKeyword.device_name] 35 | self.target_abis = device_dict[YAMLKeyword.target_abis] 36 | self.target_soc = device_dict[YAMLKeyword.target_socs] 37 | self.address = device_dict[YAMLKeyword.address] 38 | self.models = device_dict[YAMLKeyword.models] 39 | 40 | def lock(self): 41 | return sh_commands.device_lock(self.address) 42 | 43 | def get_shell_prefix(self): 44 | return "" 45 | 46 | def exec_command(self, command, *args, **kwargs): 47 | pass 48 | 49 | def push(self, src_path, dst_path): 50 | pass 51 | 52 | def pull(self, src_path, dst_path='.'): 53 | pass 54 | 55 | def get_props(self): 56 | return {} 57 | 58 | def get_available_device_types(self, device_types, abi, executor): 59 | avail_device_types = [] 60 | if base_pb2.CPU in device_types: 61 | avail_device_types.append(base_pb2.CPU) 62 | return avail_device_types 63 | 64 | def get_available_executors(self, executors, abi): 65 | avail_executors = [] 66 | if base_pb2.MACE in executors: 67 | avail_executors.append(base_pb2.MACE) 68 | if base_pb2.TFLITE in executors: 69 | if abi != "aarch64" and abi != "armhf": 70 | avail_executors.append(base_pb2.TFLITE) 71 | if base_pb2.NCNN in executors: 72 | avail_executors.append(base_pb2.NCNN) 73 | if base_pb2.MNN in executors: 74 | if abi != "aarch64" and abi != "armhf": 75 | avail_executors.append(base_pb2.MNN) 76 | if base_pb2.TNN in executors: 77 | if abi != "aarch64" and abi != "armhf": 78 | avail_executors.append(base_pb2.TNN) 79 | 80 | return avail_executors 81 | 82 | def get_bench_path(self): 83 | pass 84 | -------------------------------------------------------------------------------- /aibench/python/device/device_manager.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | import re 17 | import six 18 | import sh 19 | import yaml 20 | 21 | from aibench.python.device.adb_device import AdbDevice 22 | from aibench.python.device.device import Device 23 | from aibench.python.device.device import YAMLKeyword 24 | from aibench.python.device.host_device import HostDevice 25 | from aibench.python.device.huawei_adb_device import HuaweiAdbDevice 26 | from aibench.python.device.qualcomm_adb_device import QualcommAdbDevice 27 | from aibench.python.device.ssh_device import SshDevice 28 | 29 | 30 | class DeviceManager: 31 | 32 | def list_adb_device(self): 33 | adb_list = sh.adb('devices').stdout.decode('utf-8') \ 34 | .strip().split('\n')[1:] 35 | adb_list = [tuple(pair.split('\t')) for pair in adb_list] 36 | devices = [] 37 | for adb in adb_list: 38 | adb_device = self._create_adb_device(adb) 39 | devices.append(adb_device) 40 | return devices 41 | 42 | def list_ssh_device(self, yml): 43 | devices = [] 44 | with open(yml) as f: 45 | yml_config = yaml.load(f.read()) 46 | devices = yml_config['devices'] 47 | device_list = [] 48 | for name, dev in six.iteritems(devices): 49 | print("Find ssh device:%s" % name) 50 | dev[YAMLKeyword.device_name] = name 51 | device_list.append(SshDevice(dev)) 52 | return device_list 53 | 54 | def list_devices(self, 55 | yml='generic-mobile-devices/devices_for_ai_bench.yml'): 56 | devices_list = [] 57 | devices_list.extend(self.list_adb_device()) 58 | if os.path.exists(yml): 59 | devices_list.extend(self.list_ssh_device(yml)) 60 | else: 61 | six.print_('No Arm linux device yaml file') 62 | 63 | return devices_list 64 | 65 | def _create_adb_device(self, adb): 66 | adb_device = AdbDevice(adb) 67 | # TODO(luxuhui@xiaomi.com): optimize this match after qualcomm release 68 | # newer socs 69 | if re.match("sdm\\d+$|msm\\d+$|msmnile|kona", 70 | adb_device.target_soc, re.I): 71 | # ["sdm845", "sdm660", "msm8998", "msm8996", "msmnile"] 72 | adb_device = QualcommAdbDevice(adb) 73 | print("Find qualcomm adb device:%s" % adb[0]) 74 | elif re.match("kirin\\d+$", adb_device.target_soc, re.I): 75 | # ["kirin980", "kirin970", "kirin960"] 76 | adb_device = HuaweiAdbDevice(adb) 77 | print("Find huawei adb device:%s" % adb[0]) 78 | else: 79 | print("Find adb device:%s" % adb[0]) 80 | 81 | return adb_device 82 | -------------------------------------------------------------------------------- /aibench/python/device/host_device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from aibench.python.device.device import Device 17 | 18 | 19 | class HostDevice(Device): 20 | pass 21 | -------------------------------------------------------------------------------- /aibench/python/device/huawei_adb_device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import re 16 | import sh 17 | 18 | from aibench.python.device.adb_device import AdbDevice 19 | from aibench.proto import base_pb2 20 | from aibench.python.utils.sh_commands import * 21 | 22 | 23 | class HuaweiAdbDevice(AdbDevice): 24 | 25 | def get_available_device_types(self, device_types, abi, executor): 26 | avail_device_types = AdbDevice.get_available_device_types( 27 | self, device_types, abi, executor) 28 | 29 | if (base_pb2.NPU in device_types) and (abi == "arm64-v8a") and \ 30 | self._support_hiai_ddk200(): 31 | avail_device_types.append(base_pb2.NPU) 32 | 33 | return avail_device_types 34 | 35 | def get_available_executors(self, executors, abi): 36 | avail_executors = \ 37 | AdbDevice.get_available_executors(self, executors, abi) 38 | if (base_pb2.HIAI in executors) and (abi == "arm64-v8a") and \ 39 | self._support_hiai_ddk200(): 40 | avail_executors.append(base_pb2.HIAI) 41 | 42 | return avail_executors 43 | 44 | def _support_hiai_ddk200(self): 45 | support = False 46 | soc_vers = re.findall("kirin(\\d+)$", self.target_soc, re.I) 47 | # DDK 200 matches the kirin980 48 | if len(soc_vers) > 0 and int(soc_vers[0]) >= 980: 49 | support = True 50 | return support 51 | -------------------------------------------------------------------------------- /aibench/python/device/qualcomm_adb_device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import sh 16 | 17 | from aibench.python.device.adb_device import AdbDevice 18 | from aibench.proto import base_pb2 19 | from aibench.python.utils.sh_commands import * 20 | 21 | 22 | class QualcommAdbDevice(AdbDevice): 23 | 24 | def get_available_device_types(self, device_types, abi, executor): 25 | avail_device_types = AdbDevice.get_available_device_types( 26 | self, device_types, abi, executor) 27 | if base_pb2.DSP in device_types and self._support_dev_dsp(executor): 28 | avail_device_types.append(base_pb2.DSP) 29 | 30 | if base_pb2.NPU in device_types and self._support_npu(): 31 | avail_device_types.append(base_pb2.NPU) 32 | 33 | return avail_device_types 34 | 35 | def get_available_executors(self, executors, abi): 36 | avail_executors = \ 37 | AdbDevice.get_available_executors(self, executors, abi) 38 | if base_pb2.SNPE in executors: 39 | avail_executors.append(base_pb2.SNPE) 40 | 41 | return avail_executors 42 | 43 | # TODO(luxuhui@xiaomi.com): optimize this method after qualcomm release 44 | # newer socs. 45 | def _support_npu(self): 46 | support = False 47 | # msmnile is 855, the only soc support NPU up to now. 48 | if self.target_soc == "msmnile": 49 | support = True 50 | return support 51 | 52 | def _support_dev_dsp(self, executor): 53 | support_dev_dsp = False 54 | if self.target_soc == "sdm660" and executor == base_pb2.SNPE: 55 | return support_dev_dsp 56 | if self.target_soc == "msmnile": 57 | return support_dev_dsp 58 | try: 59 | output = self.exec_command( 60 | "ls /system/vendor/lib/rfsa/adsp/libhexagon_nn_skel.so") # noqa 61 | except sh.ErrorReturnCode_1: 62 | print("libhexagon_nn_skel.so does not exists, QualcommAdbDevice Skip DSP.") # noqa 63 | else: 64 | if "No such file or directory" in output: 65 | print("libhexagon_nn_skel.so does not exists, QualcommAdbDevice Skip DSP.") # noqa 66 | else: 67 | support_dev_dsp = True 68 | return support_dev_dsp 69 | -------------------------------------------------------------------------------- /aibench/python/device/ssh_device.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | import sh 17 | import six 18 | import sys 19 | import yaml 20 | 21 | from aibench.python.utils.common import * 22 | from aibench.python.utils.sh_commands import * 23 | from aibench.python.device.device import Device 24 | from aibench.python.device.device import YAMLKeyword 25 | 26 | 27 | class SshDevice(Device): 28 | 29 | def __init__(self, device_dict): 30 | Device.__init__(self, device_dict) 31 | self.username = device_dict[YAMLKeyword.username] 32 | if YAMLKeyword.device_types in device_dict.keys(): 33 | self.device_types = device_dict[YAMLKeyword.device_types] 34 | else: 35 | self.device_types = ["cpu"] 36 | try: 37 | sh.ssh('-q', '%s@%s' % (self.username, self.address), 38 | 'exit') 39 | except sh.ErrorReturnCode as e: 40 | six.print_('device connect failed, ' 41 | 'please check your authentication', 42 | file=sys.stderr) 43 | raise e 44 | 45 | def get_shell_prefix(self): 46 | return "ssh %s@%s" % (self.username, self.address) 47 | 48 | def exec_command(self, command, *args, **kwargs): 49 | return sh.ssh('%s@%s' % (self.username, self.address), 50 | command, *args, **kwargs) 51 | 52 | def push(self, src_path, dst_path, silent=False): 53 | ssh_push(src_path, dst_path, self.username, self.address, silent) 54 | 55 | def pull(self, src_path, dst_path='.'): 56 | if os.path.isdir(dst_path): 57 | exist_file = dst_path + '/' + src_path.split('/')[-1] 58 | if os.path.exists(exist_file): 59 | sh.rm('-rf', exist_file) 60 | elif os.path.exists(dst_path): 61 | sh.rm('-f', dst_path) 62 | try: 63 | sh.scp('-r', 64 | '%s@%s:%s' % (self.username, self.address, src_path), 65 | dst_path) 66 | except sh.ErrorReturnCode_1 as e: 67 | six.print_('Error msg {}'.format(e), file=sys.stderr) 68 | return 69 | 70 | def get_props(self): 71 | # TODO(luxuhui@xiaomi.com): read data from yml config 72 | props = {} 73 | props["ro.product.model"] = self.models 74 | return props 75 | 76 | def get_available_device_types(self, device_types, abi, executor): 77 | # TODO(luxuhui@xiaomi.com): get support device_types from config 78 | return Device.get_available_device_types(self, device_types, 79 | abi, executor) 80 | 81 | def get_bench_path(self): 82 | return "/home/%s/tmp/aibench" % self.username 83 | -------------------------------------------------------------------------------- /aibench/python/evaluators/base_evaluator.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | class Evaluator(object): 17 | def __init__(self): 18 | pass 19 | 20 | def prepare_dataset(self): 21 | pass 22 | 23 | def evaluate(self, result_file): 24 | pass 25 | -------------------------------------------------------------------------------- /aibench/python/evaluators/coco_evaluator.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from aibench.python.utils import common 17 | from aibench.python.evaluators.base_evaluator import Evaluator 18 | 19 | from pycocotools.coco import COCO 20 | from pycocotools.cocoeval import COCOeval 21 | 22 | COCO_EVAL_URL = "http://images.cocodataset.org/annotations/annotations_trainval2017.zip" # noqa 23 | COCO_DIR = "dataset/coco" 24 | 25 | 26 | class COCOEvaluator(Evaluator): 27 | def __init__(self): 28 | super(COCOEvaluator, self).__init__() 29 | 30 | def prepare_dataset(self): 31 | common.download_and_extract_dataset(COCO_EVAL_URL, COCO_DIR) 32 | 33 | def evaluate(self, result_file): 34 | pass 35 | 36 | 37 | class COCOObjectDetectionEvaluator(COCOEvaluator): 38 | def __init__(self): 39 | super(COCOObjectDetectionEvaluator, self).__init__() 40 | 41 | def prepare_dataset(self): 42 | super(COCOObjectDetectionEvaluator, self).prepare_dataset() 43 | ann_file = '%s/annotations/%s_%s.json' \ 44 | % (COCO_DIR, "instances", "val2017") 45 | self._coco_gt = COCO(ann_file) 46 | 47 | def evaluate(self, result_file): 48 | img_ids = sorted(self._coco_gt.getImgIds()) 49 | coco_dt = self._coco_gt.loadRes(result_file) 50 | 51 | coco_eval = COCOeval(self._coco_gt, coco_dt, "bbox") 52 | coco_eval.params.imgIds = img_ids 53 | coco_eval.evaluate() 54 | coco_eval.accumulate() 55 | coco_eval.summarize() 56 | -------------------------------------------------------------------------------- /aibench/python/evaluators/evaluator_test.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import unittest 17 | import tempfile 18 | 19 | from aibench.python.utils import common 20 | from aibench.python.evaluators.coco_evaluator import COCOObjectDetectionEvaluator # noqa 21 | 22 | FAKE_BBOX_FILE = "https://raw.githubusercontent.com/cocodataset/cocoapi/master/results/instances_val2014_fakebbox100_results.json" # noqa 23 | 24 | 25 | class TestEvaluator(unittest.TestCase): 26 | 27 | def test_coco_object_detection_evaluator(self): 28 | coco_detection_evaluator = COCOObjectDetectionEvaluator() 29 | coco_detection_evaluator.prepare_dataset() 30 | common.download_and_extract_dataset(FAKE_BBOX_FILE, 31 | tempfile.gettempdir()) 32 | fake_file = "%s/%s" % (tempfile.gettempdir(), 33 | FAKE_BBOX_FILE.split("/")[-1]) 34 | coco_detection_evaluator.evaluate(fake_file) 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /aibench/python/utils/bench_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import hashlib 16 | 17 | 18 | def file_checksum(fname): 19 | hash_func = hashlib.md5() 20 | with open(fname, "rb") as f: 21 | for chunk in iter(lambda: f.read(4096), b""): 22 | hash_func.update(chunk) 23 | return hash_func.hexdigest() 24 | -------------------------------------------------------------------------------- /aibench/python/utils/common.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import inspect 16 | import six 17 | import os 18 | import zipfile 19 | import tarfile 20 | from six.moves import urllib 21 | 22 | 23 | ################################ 24 | # log 25 | ################################ 26 | class CMDColors: 27 | PURPLE = '\033[95m' 28 | BLUE = '\033[94m' 29 | GREEN = '\033[92m' 30 | YELLOW = '\033[93m' 31 | RED = '\033[91m' 32 | ENDC = '\033[0m' 33 | BOLD = '\033[1m' 34 | UNDERLINE = '\033[4m' 35 | 36 | 37 | def get_frame_info(level): 38 | caller_frame = inspect.stack()[level] 39 | info = inspect.getframeinfo(caller_frame[0]) 40 | return info.filename + ':' + str(info.lineno) + ': ' 41 | 42 | 43 | class AIBenchLogger: 44 | @staticmethod 45 | def header(message): 46 | six.print_(CMDColors.PURPLE + get_frame_info(2) + message 47 | + CMDColors.ENDC) 48 | 49 | @staticmethod 50 | def summary(message): 51 | six.print_(CMDColors.GREEN + get_frame_info(2) + message 52 | + CMDColors.ENDC) 53 | 54 | @staticmethod 55 | def info(message): 56 | six.print_(get_frame_info(2) + message) 57 | 58 | @staticmethod 59 | def warning(message): 60 | six.print_(CMDColors.YELLOW + 'WARNING:' + get_frame_info(2) + message 61 | + CMDColors.ENDC) 62 | 63 | @staticmethod 64 | def error(message, location_info=""): 65 | if not location_info: 66 | location_info = get_frame_info(2) 67 | six.print_(CMDColors.RED + 'ERROR: ' + location_info + message 68 | + CMDColors.ENDC) 69 | exit(1) 70 | 71 | 72 | def aibench_check(condition, message): 73 | if not condition: 74 | AIBenchLogger.error(message, get_frame_info(2)) 75 | 76 | 77 | ################################ 78 | # String Formatter 79 | ################################ 80 | class StringFormatter: 81 | @staticmethod 82 | def table(header, data, title, align="R"): 83 | data_size = len(data) 84 | column_size = len(header) 85 | column_length = [len(str(ele)) + 1 for ele in header] 86 | for row_idx in range(data_size): 87 | data_tuple = data[row_idx] 88 | ele_size = len(data_tuple) 89 | assert (ele_size == column_size) 90 | for i in range(ele_size): 91 | column_length[i] = max(column_length[i], 92 | len(str(data_tuple[i])) + 1) 93 | 94 | table_column_length = sum(column_length) + column_size + 1 95 | dash_line = '-' * table_column_length + '\n' 96 | header_line = '=' * table_column_length + '\n' 97 | output = "" 98 | output += dash_line 99 | output += str(title).center(table_column_length) + '\n' 100 | output += dash_line 101 | output += '|' + '|'.join([str(header[i]).center(column_length[i]) 102 | for i in range(column_size)]) + '|\n' 103 | output += header_line 104 | 105 | for data_tuple in data: 106 | ele_size = len(data_tuple) 107 | row_list = [] 108 | for i in range(ele_size): 109 | if align == "R": 110 | row_list.append(str(data_tuple[i]).rjust(column_length[i])) 111 | elif align == "L": 112 | row_list.append(str(data_tuple[i]).ljust(column_length[i])) 113 | elif align == "C": 114 | row_list.append(str(data_tuple[i]) 115 | .center(column_length[i])) 116 | output += '|' + '|'.join(row_list) + "|\n" + dash_line 117 | return output 118 | 119 | @staticmethod 120 | def block(message): 121 | line_length = 10 + len(str(message)) + 10 122 | star_line = '*' * line_length + '\n' 123 | return star_line + str(message).center(line_length) + '\n' + star_line 124 | 125 | 126 | ABI_TYPES = [ 127 | "armeabi-v7a", 128 | "arm64-v8a", 129 | "armhf", 130 | "aarch64", 131 | "host", 132 | ] 133 | 134 | ABI_TOOLCHAIN_CONFIG = { 135 | "armeabi-v7a": "android", 136 | "arm64-v8a": "android", 137 | "armhf": "arm_linux_gnueabihf", 138 | "aarch64": "aarch64_linux_gnu", 139 | "host": "", 140 | } 141 | 142 | 143 | def download_and_extract_dataset(url, download_dir): 144 | filename = url.split('/')[-1] 145 | file_path = os.path.join(download_dir, filename) 146 | if not os.path.exists(file_path): 147 | if not os.path.exists(download_dir): 148 | os.makedirs(download_dir) 149 | 150 | print("Downloading %s" % url) 151 | file_path, _ = urllib.request.urlretrieve(url, file_path) 152 | 153 | if file_path.endswith(".zip"): 154 | zipfile.ZipFile(file=file_path, mode="r").extractall(download_dir) 155 | elif file_path.endswith((".tar.gz", ".tgz")): 156 | tarfile.open(name=file_path, mode="r:gz").extractall(download_dir) 157 | 158 | print("Done extracted to %s" % download_dir) 159 | else: 160 | print("Data has already downloaded and extracted.") 161 | -------------------------------------------------------------------------------- /aibench/python/utils/sh_commands.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The MobileAIBench Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import hashlib 16 | import os 17 | import sh 18 | 19 | import aibench.python.utils.bench_utils as bench_utils 20 | 21 | 22 | def strip_invalid_utf8(str): 23 | return sh.iconv(str, "-c", "-t", "UTF-8") 24 | 25 | 26 | def split_stdout(stdout_str): 27 | stdout_str = strip_invalid_utf8(stdout_str) 28 | # Filter out last empty line 29 | return [line.strip() for line in stdout_str.split('\n') 30 | if len(line.strip()) > 0] 31 | 32 | 33 | def adb_push_file(src_file, dst_dir, serialno, silent=False): 34 | if not os.path.isfile(src_file): 35 | print("Not file, skip pushing " + src_file) 36 | return 37 | src_checksum = bench_utils.file_checksum(src_file) 38 | dst_file = os.path.join(dst_dir, os.path.basename(src_file)) 39 | stdout_buff = [] 40 | try: 41 | sh.adb("-s", serialno, "shell", "md5sum", dst_file, 42 | _out=lambda line: stdout_buff.append(line)) 43 | except sh.ErrorReturnCode_1: 44 | print("Push %s to %s" % (src_file, dst_dir)) 45 | sh.adb("-s", serialno, "push", src_file, dst_dir) 46 | else: 47 | dst_checksum = stdout_buff[0].split()[0] 48 | if src_checksum == dst_checksum: 49 | if not silent: 50 | print("Equal checksum with %s and %s" % (src_file, dst_file)) 51 | else: 52 | if not silent: 53 | print("Push %s to %s" % (src_file, dst_dir)) 54 | sh.adb("-s", serialno, "push", src_file, dst_dir) 55 | 56 | 57 | def adb_push(src_path, dst_dir, serialno, silent=False): 58 | if os.path.isdir(src_path): 59 | for src_file in os.listdir(src_path): 60 | adb_push_file(os.path.join(src_path, src_file), 61 | dst_dir, serialno, silent) 62 | else: 63 | adb_push_file(src_path, dst_dir, serialno, silent) 64 | 65 | 66 | def adb_pull(src_path, dst_path, serialno): 67 | print("Pull %s to %s" % (src_path, dst_path)) 68 | try: 69 | sh.adb("-s", serialno, "pull", src_path, dst_path) 70 | except Exception as e: 71 | print("Error msg: %s" % e.stderr) 72 | 73 | 74 | def ssh_push_file(src_file, dst_dir, username, address, silent=False): 75 | if not os.path.isfile(src_file): 76 | print("Not file, skip pushing " + src_file) 77 | return 78 | src_checksum = bench_utils.file_checksum(src_file) 79 | dst_file = os.path.join(dst_dir, os.path.basename(src_file)) 80 | stdout_buff = [] 81 | try: 82 | sh.ssh('%s@%s' % (username, address), "md5sum", dst_file, 83 | _out=lambda line: stdout_buff.append(line)) 84 | except sh.ErrorReturnCode_1: 85 | print("Scp %s to %s" % (src_file, dst_dir)) 86 | sh.ssh('%s@%s' % (username, address), "mkdir -p %s" % dst_dir) 87 | sh.scp(src_file, '%s@%s:%s' % (username, address, dst_dir)) 88 | else: 89 | dst_checksum = stdout_buff[0].split()[0] 90 | if src_checksum == dst_checksum: 91 | if not silent: 92 | print("Equal checksum with %s and %s" % (src_file, dst_file)) 93 | else: 94 | if not silent: 95 | print("Scp %s to %s" % (src_file, dst_dir)) 96 | sh.scp(src_file, '%s@%s:%s' % (username, address, dst_dir)) 97 | 98 | 99 | def ssh_push(src_path, dst_dir, username, address, silent=False): 100 | if os.path.isdir(src_path): 101 | print("Start scp dir %s=>%s, basename=%s" 102 | % (src_path, dst_dir, os.path.basename(src_path))) 103 | sh.scp("-r", src_path, '%s@%s:%s' % (username, address, dst_dir)) 104 | tmp_dst_dir = os.path.join(dst_dir, os.path.basename(src_path)) 105 | sh.ssh('%s@%s' % (username, address), 106 | "mv %s/* %s" % (tmp_dst_dir, dst_dir)) 107 | else: 108 | ssh_push_file(src_path, dst_dir, username, address, silent) 109 | -------------------------------------------------------------------------------- /aibench/utils/BUILD.bazel: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Mace utils. 3 | # 4 | package( 5 | default_visibility = ["//visibility:public"], 6 | ) 7 | 8 | licenses(["notice"]) # Apache 2.0 9 | 10 | load( 11 | "//aibench:aibench.bzl", 12 | "if_android", 13 | "if_android_armv7", 14 | ) 15 | 16 | cc_library( 17 | name = "utils", 18 | srcs = glob( 19 | [ 20 | "*.cc", 21 | ], 22 | ), 23 | copts = [ 24 | "-Werror", 25 | "-Wextra", 26 | "-Wno-missing-field-initializers", 27 | ] + if_android_armv7([ 28 | "-mfpu=neon", 29 | "-mfloat-abi=softfp", 30 | ]), 31 | linkopts = if_android([ 32 | "-llog", 33 | ]), 34 | deps = [ 35 | "//include:public_headers", 36 | ], 37 | alwayslink = 1, 38 | ) 39 | -------------------------------------------------------------------------------- /aibench/utils/status.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include 16 | 17 | #include "aibench/utils/memory.h" 18 | #include "aibench/public/aibench.h" 19 | 20 | namespace aibench { 21 | 22 | class Status::Impl { 23 | public: 24 | explicit Impl(const Code code): code_(code), information_("") {} 25 | Impl(const Code code, const std::string &informaton) 26 | : code_(code), information_(informaton) {} 27 | ~Impl() = default; 28 | 29 | void SetCode(const Code code) { code_ = code; } 30 | Code code() const { return code_; } 31 | void SetInformation(const std::string &info) { information_ = info; } 32 | std::string information() const { 33 | if (information_.empty()) { 34 | return CodeToString(); 35 | } else { 36 | return CodeToString() + ": " + information_; 37 | } 38 | } 39 | 40 | private: 41 | std::string CodeToString() const { 42 | switch (code_) { 43 | case Status::SUCCESS: 44 | return "Success"; 45 | case Status::INVALID_ARGS: 46 | return "Invalid Arguments"; 47 | case Status::OUT_OF_RESOURCES: 48 | return "Out of resources"; 49 | case UNSUPPORTED: 50 | return "Unsupported"; 51 | case RUNTIME_ERROR: 52 | return "Runtime error"; 53 | default: 54 | std::ostringstream os; 55 | os << code_; 56 | return os.str(); 57 | } 58 | } 59 | 60 | private: 61 | Status::Code code_; 62 | std::string information_; 63 | }; 64 | 65 | Status::Status() 66 | : impl_(new Status::Impl(Status::SUCCESS)) {} 67 | Status::Status(const Code code) : impl_(new Status::Impl(code)) {} 68 | Status::Status(const Code code, const std::string &information) 69 | : impl_(new Status::Impl(code, information)) {} 70 | Status::Status(const Status &other) 71 | : impl_(new Status::Impl(other.code(), other.information())) {} 72 | Status::Status(Status &&other) 73 | : impl_(new Status::Impl(other.code(), other.information())) {} 74 | Status::~Status() = default; 75 | 76 | Status& Status::operator=(const Status &other) { 77 | impl_->SetCode(other.code()); 78 | impl_->SetInformation(other.information()); 79 | return *this; 80 | } 81 | Status& Status::operator=(const Status &&other) { 82 | impl_->SetCode(other.code()); 83 | impl_->SetInformation(other.information()); 84 | return *this; 85 | } 86 | 87 | Status::Code Status::code() const { 88 | return impl_->code(); 89 | } 90 | 91 | std::string Status::information() const { 92 | return impl_->information(); 93 | } 94 | 95 | bool Status::operator==(const Status &other) const { 96 | return other.code() == impl_->code(); 97 | } 98 | 99 | bool Status::operator!=(const Status &other) const { 100 | return other.code() != impl_->code(); 101 | } 102 | 103 | } // namespace aibench 104 | -------------------------------------------------------------------------------- /include/BUILD.bazel: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | cc_library( 8 | name = "public_headers", 9 | hdrs = glob([ 10 | "aibench/port/*.h", 11 | "aibench/utils/*.h", 12 | "aibench/public/*.h", 13 | ]), 14 | strip_include_prefix = "", 15 | copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"], 16 | ) 17 | -------------------------------------------------------------------------------- /include/aibench/port/env.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_ENV_H_ 16 | #define AIBENCH_PORT_ENV_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #ifdef _WIN32 26 | #include 27 | #endif 28 | 29 | #include 30 | #include "aibench/public/aibench.h" 31 | 32 | namespace aibench { 33 | namespace port { 34 | 35 | class MallocLogger { 36 | public: 37 | MallocLogger() = default; 38 | virtual ~MallocLogger() = default; 39 | }; 40 | 41 | class FileSystem; 42 | class LogWriter; 43 | 44 | class Env { 45 | public: 46 | virtual int64_t NowMicros() = 0; 47 | virtual Status AdviseFree(void *addr, size_t length); 48 | virtual Status GetCPUMaxFreq(std::vector *max_freqs); 49 | virtual Status SchedSetAffinity(const std::vector &cpu_ids); 50 | virtual FileSystem *GetFileSystem() = 0; 51 | virtual LogWriter *GetLogWriter() = 0; 52 | // Return the current backtrace, will allocate memory inside the call 53 | // which may fail 54 | virtual std::vector GetBackTraceUnsafe(int max_steps) = 0; 55 | virtual std::unique_ptr NewMallocLogger( 56 | std::ostringstream *oss, 57 | const std::string &name); 58 | 59 | static Env *Default(); 60 | }; 61 | 62 | } // namespace port 63 | 64 | inline int64_t NowMicros() { 65 | return port::Env::Default()->NowMicros(); 66 | } 67 | 68 | inline Status AdviseFree(void *addr, size_t length) { 69 | return port::Env::Default()->AdviseFree(addr, length); 70 | } 71 | 72 | inline Status GetCPUMaxFreq(std::vector *max_freqs) { 73 | return port::Env::Default()->GetCPUMaxFreq(max_freqs); 74 | } 75 | 76 | inline Status SchedSetAffinity(const std::vector &cpu_ids) { 77 | return port::Env::Default()->SchedSetAffinity(cpu_ids); 78 | } 79 | 80 | inline port::FileSystem *GetFileSystem() { 81 | return port::Env::Default()->GetFileSystem(); 82 | } 83 | 84 | inline Status Memalign(void **memptr, size_t alignment, size_t size) { 85 | #ifdef _WIN32 86 | *memptr = _aligned_malloc(size, alignment); 87 | if (*memptr == nullptr) { 88 | return Status::OUT_OF_RESOURCES; 89 | } else { 90 | return Status::SUCCESS; 91 | } 92 | #else 93 | #if defined(__ANDROID__) || defined(__hexagon__) 94 | *memptr = memalign(alignment, size); 95 | if (*memptr == nullptr) { 96 | return Status::OUT_OF_RESOURCES; 97 | } else { 98 | return Status::SUCCESS; 99 | } 100 | #else 101 | int error = posix_memalign(memptr, alignment, size); 102 | if (error != 0) { 103 | if (*memptr != nullptr) { 104 | free(*memptr); 105 | *memptr = nullptr; 106 | } 107 | return Status::OUT_OF_RESOURCES; 108 | } else { 109 | return Status::SUCCESS; 110 | } 111 | #endif 112 | #endif 113 | } 114 | 115 | inline Status GetEnv(const char *name, std::string *value) { 116 | #ifdef _WIN32 117 | char *val; 118 | size_t len; 119 | errno_t error = _dupenv_s(&val, &len, name); 120 | if (error != 0) { 121 | return Status::RUNTIME_ERROR; 122 | } else { 123 | if (val != nullptr) { 124 | *value = std::string(val); 125 | free(val); 126 | } 127 | return Status::SUCCESS; 128 | } 129 | #else 130 | char *val = getenv(name); 131 | if (val != nullptr) { 132 | *value = std::string(val); 133 | } 134 | return Status::SUCCESS; 135 | #endif 136 | } 137 | 138 | #if defined(_WIN32) && !defined(S_ISREG) 139 | #define S_ISREG(m) (((m) & 0170000) == (0100000)) 140 | #endif 141 | } // namespace aibench 142 | 143 | #endif // AIBENCH_PORT_ENV_H_ 144 | -------------------------------------------------------------------------------- /include/aibench/port/file_system.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_UTILS_FILE_SYSTEM_H_ 16 | #define AIBENCH_UTILS_FILE_SYSTEM_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "aibench/public/aibench.h" 23 | #include "aibench/utils/macros.h" 24 | 25 | namespace aibench { 26 | namespace port { 27 | 28 | class ReadOnlyMemoryRegion { 29 | public: 30 | ReadOnlyMemoryRegion() = default; 31 | virtual ~ReadOnlyMemoryRegion() = default; 32 | virtual const void *data() const = 0; 33 | virtual uint64_t length() const = 0; 34 | private: 35 | AIBENCH_DISABLE_COPY_AND_ASSIGN(ReadOnlyMemoryRegion); 36 | }; 37 | 38 | class ReadOnlyBufferMemoryRegion : public ReadOnlyMemoryRegion { 39 | public: 40 | ReadOnlyBufferMemoryRegion() : data_(nullptr), length_(0) {} 41 | ReadOnlyBufferMemoryRegion(const void *data, uint64_t length) : 42 | data_(data), length_(length) {} 43 | const void *data() const override { return data_; } 44 | uint64_t length() const override { return length_; } 45 | 46 | private: 47 | const void *data_; 48 | uint64_t length_; 49 | }; 50 | 51 | class WritableFile { 52 | public: 53 | WritableFile() {} 54 | virtual ~WritableFile(); 55 | virtual Status Append(const char *data, size_t length) = 0; 56 | virtual Status Close() = 0; 57 | virtual Status Flush() = 0; 58 | private: 59 | AIBENCH_DISABLE_COPY_AND_ASSIGN(WritableFile); 60 | }; 61 | 62 | class FileSystem { 63 | public: 64 | FileSystem() = default; 65 | virtual ~FileSystem() = default; 66 | virtual Status NewReadOnlyMemoryRegionFromFile(const char *fname, 67 | std::unique_ptr* result) = 0; 68 | }; 69 | 70 | } // namespace port 71 | } // namespace aibench 72 | 73 | #endif // AIBENCH_UTILS_FILE_SYSTEM_H_ 74 | -------------------------------------------------------------------------------- /include/aibench/port/logger.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_PORT_LOGGER_H_ 16 | #define AIBENCH_PORT_LOGGER_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | namespace aibench { 23 | 24 | enum LogLevel { 25 | INVALID_MIN = 0, 26 | INFO = 1, 27 | WARNING = 2, 28 | ERROR = 3, 29 | FATAL = 4, 30 | INVALID_MAX, 31 | }; 32 | 33 | namespace port { 34 | 35 | inline bool LogLevelPassThreashold(const LogLevel level, 36 | const LogLevel threshold) { 37 | return level >= threshold; 38 | } 39 | 40 | LogLevel LogLevelFromStr(const char *log_level_str); 41 | int VLogLevelFromStr(const char *vlog_level_str); 42 | 43 | inline LogLevel MinLogLevelFromEnv() { 44 | // Read the min log level from env once during the first call to logging. 45 | static LogLevel log_level = LogLevelFromStr(getenv("AIBENCH_CPP_MIN_LOG_LEVEL")); 46 | return log_level; 47 | } 48 | 49 | inline int MinVLogLevelFromEnv() { 50 | // Read the min vlog level from env once during the first call to logging. 51 | static int vlog_level = VLogLevelFromStr(getenv("AIBENCH_CPP_MIN_VLOG_LEVEL")); 52 | return vlog_level; 53 | } 54 | 55 | class LogWriter { 56 | public: 57 | LogWriter() = default; 58 | virtual ~LogWriter() = default; 59 | virtual void WriteLogMessage(const char *fname, 60 | const int line, 61 | const LogLevel severity, 62 | const char *message); 63 | }; 64 | 65 | class Logger : public std::ostringstream { 66 | public: 67 | Logger(const char *fname, int line, LogLevel severity); 68 | ~Logger(); 69 | 70 | private: 71 | void GenerateLogMessage(); 72 | void DealWithFatal(); 73 | 74 | const char *fname_; 75 | int line_; 76 | LogLevel severity_; 77 | }; 78 | 79 | } // namespace port 80 | 81 | // Whether the log level pass the env configured threshold, can be used for 82 | // short cutting. 83 | inline bool ShouldGenerateLogMessage(LogLevel severity) { 84 | LogLevel threshold = port::MinLogLevelFromEnv(); 85 | return port::LogLevelPassThreashold(severity, threshold); 86 | } 87 | 88 | inline bool ShouldGenerateVLogMessage(int vlog_level) { 89 | int threshold = port::MinVLogLevelFromEnv(); 90 | return ShouldGenerateLogMessage(INFO) && 91 | vlog_level <= threshold; 92 | } 93 | } // namespace aibench 94 | 95 | #endif // AIBENCH_PORT_LOGGER_H_ 96 | -------------------------------------------------------------------------------- /include/aibench/public/aibench.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // This file defines core AIBENCH APIs. 16 | // There APIs will be stable and backward compatible. 17 | 18 | #ifndef AIBENCH_PUBLIC_AIBENCH_H_ 19 | #define AIBENCH_PUBLIC_AIBENCH_H_ 20 | 21 | namespace aibench { 22 | 23 | class Status { 24 | public: 25 | enum Code { 26 | SUCCESS = 0, 27 | RUNTIME_ERROR = 1, 28 | UNSUPPORTED = 2, 29 | INVALID_ARGS = 3, 30 | OUT_OF_RESOURCES = 4, 31 | }; 32 | 33 | public: 34 | Status(); 35 | Status(const Code code); // NOLINT(runtime/explicit) 36 | Status(const Code code, const std::string &information); 37 | Status(const Status &); 38 | Status(Status &&); 39 | Status &operator=(const Status &); 40 | Status &operator=(const Status &&); 41 | ~Status(); 42 | Code code() const; 43 | std::string information() const; 44 | 45 | bool operator==(const Status &other) const; 46 | bool operator!=(const Status &other) const; 47 | 48 | private: 49 | class Impl; 50 | std::unique_ptr impl_; 51 | }; 52 | 53 | } // namespace aibench 54 | 55 | #endif // AIBENCH_PUBLIC_AIBENCH_H_ 56 | -------------------------------------------------------------------------------- /include/aibench/utils/logging.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_UTILS_LOGGING_H_ 16 | #define AIBENCH_UTILS_LOGGING_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #include "aibench/public/aibench.h" 26 | #include "aibench/port/env.h" 27 | #include "aibench/port/logger.h" 28 | #include "aibench/utils/macros.h" 29 | #include "aibench/utils/memory.h" 30 | #include "aibench/utils/string_util.h" 31 | 32 | 33 | namespace aibench { 34 | 35 | namespace logging_internal { 36 | 37 | #define LOG(severity) \ 38 | ::aibench::port::Logger(__FILE__, __LINE__, aibench::severity) 39 | 40 | #define LOG_PTR(severity) \ 41 | make_unique(__FILE__, __LINE__, aibench::severity) 42 | 43 | #define VLOG_IS_ON(vll) (aibench::ShouldGenerateVLogMessage(vll)) 44 | #define VLOG(vll) if (VLOG_IS_ON(vll)) LOG(INFO) 45 | 46 | // AIBENCH_CHECK/AIBENCH_ASSERT dies with a fatal error if condition is not true. 47 | // AIBENCH_ASSERT is controlled by NDEBUG ('-c opt' for bazel) while AIBENCH_CHECK 48 | // will be executed regardless of compilation mode. 49 | // Therefore, it is safe to do things like: 50 | // AIBENCH_CHECK(fp->Write(x) == 4) 51 | // AIBENCH_CHECK(fp->Write(x) == 4, "Write failed") 52 | // which are not safe for AIBENCH_ASSERT. 53 | #define AIBENCH_CHECK(condition, ...) \ 54 | if (!(condition)) \ 55 | LOG(FATAL) << "Check failed: " #condition " " << aibench::MakeString(__VA_ARGS__) 56 | 57 | #ifndef NDEBUG 58 | #define AIBENCH_ASSERT(condition, ...) \ 59 | if (!(condition)) \ 60 | LOG(FATAL) << "Assert failed: " #condition " " \ 61 | << aibench::MakeString(__VA_ARGS__) 62 | #else 63 | #define AIBENCH_ASSERT(condition, ...) ((void)0) 64 | #endif 65 | 66 | template 67 | T &&CheckNotNull(const char *file, int line, const char *exprtext, T &&t) { 68 | if (t == nullptr) { 69 | ::aibench::port::Logger(file, line, FATAL) << std::string(exprtext); 70 | } 71 | return std::forward(t); 72 | } 73 | 74 | #define AIBENCH_CHECK_NOTNULL(val) \ 75 | ::aibench::logging_internal::CheckNotNull(__FILE__, __LINE__, \ 76 | "'" #val "' Must not be NULL", (val)) 77 | 78 | #define AIBENCH_NOT_IMPLEMENTED AIBENCH_CHECK(false, "not implemented") 79 | 80 | #define AIBENCH_CHECK_SUCCESS(stmt) \ 81 | { \ 82 | Status status = (stmt); \ 83 | if (status != Status::SUCCESS) { \ 84 | LOG(FATAL) << #stmt << " failed with error: " \ 85 | << status.information(); \ 86 | } \ 87 | } 88 | 89 | #define AIBENCH_RETURN_IF_ERROR(stmt) \ 90 | { \ 91 | Status status = (stmt); \ 92 | if (status != Status::SUCCESS) { \ 93 | VLOG(0) << #stmt << " failed with error: " \ 94 | << status.information(); \ 95 | return status; \ 96 | } \ 97 | } 98 | 99 | class LatencyLogger { 100 | public: 101 | LatencyLogger(int vlog_level, const std::string &message) 102 | : vlog_level_(vlog_level), message_(message) { 103 | if (VLOG_IS_ON(vlog_level_)) { 104 | start_micros_ = NowMicros(); 105 | VLOG(vlog_level_) << message_ << " started"; 106 | } 107 | } 108 | ~LatencyLogger() { 109 | if (VLOG_IS_ON(vlog_level_)) { 110 | int64_t stop_micros = NowMicros(); 111 | VLOG(vlog_level_) << message_ 112 | << " latency: " << stop_micros - start_micros_ << " us"; 113 | } 114 | } 115 | 116 | private: 117 | const int vlog_level_; 118 | const std::string message_; 119 | int64_t start_micros_; 120 | 121 | AIBENCH_DISABLE_COPY_AND_ASSIGN(LatencyLogger); 122 | }; 123 | 124 | #define AIBENCH_LATENCY_LOGGER(vlog_level, ...) \ 125 | aibench::logging_internal::LatencyLogger latency_logger_##__line__( \ 126 | vlog_level, VLOG_IS_ON(vlog_level) ? aibench::MakeString(__VA_ARGS__) : "") 127 | 128 | 129 | #ifdef AIBENCH_ENABLE_MALLOC_LOGGING 130 | #define AIBENCH_MEMORY_LOGGING_GUARD() \ 131 | auto malloc_logger_##__line__ = port::Env::Default()->NewMallocLogger( \ 132 | ::aibench::port::Logger(__FILE__, __LINE__, aibench::INFO), \ 133 | std::string(__FILE__) + ":" + std::string(__func__)); 134 | #else 135 | #define AIBENCH_MEMORY_LOGGING_GUARD() 136 | #endif 137 | 138 | } // namespace logging_internal 139 | } // namespace aibench 140 | 141 | #endif // AIBENCH_UTILS_LOGGING_H_ 142 | -------------------------------------------------------------------------------- /include/aibench/utils/macros.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_UTILS_MACROS_H_ 16 | #define AIBENCH_UTILS_MACROS_H_ 17 | 18 | namespace aibench { 19 | 20 | // Disable the copy and assignment operator for a class. 21 | #ifndef AIBENCH_DISABLE_COPY_AND_ASSIGN 22 | #define AIBENCH_DISABLE_COPY_AND_ASSIGN(CLASSNAME) \ 23 | CLASSNAME(const CLASSNAME &) = delete; \ 24 | CLASSNAME &operator=(const CLASSNAME &) = delete; 25 | #endif 26 | 27 | #ifndef AIBENCH_EMPTY_VIRTUAL_DESTRUCTOR 28 | #define AIBENCH_EMPTY_VIRTUAL_DESTRUCTOR(CLASSNAME) \ 29 | public: \ 30 | virtual ~CLASSNAME() {} 31 | #endif 32 | 33 | #define AIBENCH_UNUSED(var) (void)(var) 34 | 35 | #define AIBENCH_COMPUTE_KERNEL_SOURCE(...) #__VA_ARGS__ 36 | 37 | // GCC can be told that a certain branch is not likely to be taken (for 38 | // instance, a CHECK failure), and use that information in static analysis. 39 | // Giving it this information can help it optimize for the common case in 40 | // the absence of better information (ie. -fprofile-arcs). 41 | #if defined(COMPILER_GCC3) 42 | #define AIBENCH_PREDICT_FALSE(x) (__builtin_expect(x, 0)) 43 | #define AIBENCH_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) 44 | #else 45 | #define AIBENCH_PREDICT_FALSE(x) (x) 46 | #define AIBENCH_PREDICT_TRUE(x) (x) 47 | #endif 48 | 49 | } // namespace aibench 50 | 51 | #endif // AIBENCH_UTILS_MACROS_H_ 52 | -------------------------------------------------------------------------------- /include/aibench/utils/memory.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_UTILS_MEMORY_H_ 16 | #define AIBENCH_UTILS_MEMORY_H_ 17 | 18 | #include 19 | #include 20 | 21 | namespace aibench { 22 | 23 | namespace memory_internal { 24 | 25 | // Traits to select proper overload and return type for `make_unique<>`. 26 | template 27 | struct MakeUniqueResult { 28 | using scalar = std::unique_ptr; 29 | }; 30 | template 31 | struct MakeUniqueResult { 32 | using array = std::unique_ptr; 33 | }; 34 | template 35 | struct MakeUniqueResult { 36 | using invalid = void; 37 | }; 38 | 39 | } // namespace memory_internal 40 | 41 | // gcc 4.8 has __cplusplus at 201301 but doesn't define make_unique. Other 42 | // supported compilers either just define __cplusplus as 201103 but have 43 | // make_unique (msvc), or have make_unique whenever __cplusplus > 201103 (clang) 44 | #if (__cplusplus > 201103L || defined(_MSC_VER)) && \ 45 | !(defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8) 46 | using std::make_unique; 47 | #else 48 | 49 | // `make_unique` overload for non-array types. 50 | template 51 | typename memory_internal::MakeUniqueResult::scalar make_unique( 52 | Args&&... args) { 53 | return std::unique_ptr(new T(std::forward(args)...)); 54 | } 55 | 56 | // `make_unique` overload for an array T[] of unknown bounds. 57 | // The array allocation needs to use the `new T[size]` form and cannot take 58 | // element constructor arguments. The `std::unique_ptr` will manage destructing 59 | // these array elements. 60 | template 61 | typename memory_internal::MakeUniqueResult::array make_unique(size_t n) { 62 | return std::unique_ptr(new typename std::remove_extent::type[n]()); 63 | } 64 | 65 | // `make_unique` overload for an array T[N] of known bounds. 66 | // This construction will be rejected. 67 | template 68 | typename memory_internal::MakeUniqueResult::invalid make_unique( 69 | Args&&... /* args */) = delete; 70 | #endif 71 | 72 | } // namespace aibench 73 | 74 | #endif // AIBENCH_UTILS_MEMORY_H_ 75 | -------------------------------------------------------------------------------- /include/aibench/utils/string_util.h: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The AIBENCH Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef AIBENCH_UTILS_STRING_UTIL_H_ 16 | #define AIBENCH_UTILS_STRING_UTIL_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | namespace aibench { 24 | namespace string_util { 25 | 26 | inline void MakeStringInternal(std::stringstream & /*ss*/) {} 27 | 28 | template 29 | inline void MakeStringInternal(std::stringstream &ss, const T &t) { 30 | ss << t; 31 | } 32 | 33 | template 34 | inline void MakeStringInternal(std::stringstream &ss, 35 | const T &t, 36 | const Args &... args) { 37 | MakeStringInternal(ss, t); 38 | MakeStringInternal(ss, args...); 39 | } 40 | 41 | class StringFormatter { 42 | public: 43 | static std::string Table(const std::string &title, 44 | const std::vector &header, 45 | const std::vector> &data); 46 | }; 47 | 48 | } // namespace string_util 49 | 50 | template 51 | std::string MakeString(const Args &... args) { 52 | std::stringstream ss; 53 | string_util::MakeStringInternal(ss, args...); 54 | return ss.str(); 55 | } 56 | 57 | template 58 | std::string MakeListString(const T *args, size_t size) { 59 | std::stringstream ss; 60 | ss << "["; 61 | for (size_t i = 0; i < size; ++i) { 62 | ss << args[i]; 63 | if (i < size - 1) { 64 | ss << ", "; 65 | } 66 | } 67 | ss << "]"; 68 | return ss.str(); 69 | } 70 | 71 | template 72 | std::string MakeString(const std::vector &args) { 73 | return MakeListString(args.data(), args.size()); 74 | } 75 | 76 | // Specializations for already-a-string types. 77 | template <> 78 | inline std::string MakeString(const std::string &str) { 79 | return str; 80 | } 81 | 82 | inline std::string MakeString(const char *c_str) { return std::string(c_str); } 83 | 84 | inline std::string ToLower(const std::string &src) { 85 | std::string dest(src); 86 | std::transform(src.begin(), src.end(), dest.begin(), ::tolower); 87 | return dest; 88 | } 89 | 90 | inline std::string ToUpper(const std::string &src) { 91 | std::string dest(src); 92 | std::transform(src.begin(), src.end(), dest.begin(), ::toupper); 93 | return dest; 94 | } 95 | 96 | std::string ObfuscateString(const std::string &src, 97 | const std::string &lookup_table); 98 | 99 | std::string ObfuscateString(const std::string &src); 100 | 101 | std::string ObfuscateSymbol(const std::string &src); 102 | 103 | #ifdef AIBENCH_OBFUSCATE_LITERALS 104 | #define AIBENCH_OBFUSCATE_STRING(str) ObfuscateString(str) 105 | #define AIBENCH_OBFUSCATE_SYMBOL(str) ObfuscateSymbol(str) 106 | #else 107 | #define AIBENCH_OBFUSCATE_STRING(str) (str) 108 | #define AIBENCH_OBFUSCATE_SYMBOL(str) (str) 109 | #endif 110 | 111 | std::vector Split(const std::string &str, char delims); 112 | void StripString(std::string *s); 113 | // Get the right neighbor after first occurence of pattern in str 114 | std::string GetStrAfterPattern(const std::string &str, 115 | const std::string &pattern); 116 | 117 | } // namespace aibench 118 | 119 | #endif // AIBENCH_UTILS_STRING_UTIL_H_ 120 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoMi/mobile-ai-bench/a91bca47af319b4d88b87b58a6c879de9c910733/logo.png -------------------------------------------------------------------------------- /report/csv_to_html.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | 4 | csvs = ['run_report', 'prepare_report', 'precision_report'] 5 | 6 | for csv in csvs: 7 | csv_file = 'output/' + csv + '.csv' 8 | if os.path.exists(csv_file): 9 | df = pd.read_csv('output/' + csv + '.csv') 10 | df.to_html('report/' + csv + '.html') 11 | 12 | dir_path = os.path.dirname(os.path.realpath(__file__)) 13 | print("Open %s/index.html in a browser to see the report!" % dir_path) 14 | -------------------------------------------------------------------------------- /report/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Mobile-AI-Bench Report 5 | 6 | 7 |

Mobile-AI-Bench Report

8 | 9 |
10 |

Running Performance

11 | 12 | 13 |
14 | 15 |
16 |

Prepare Performance

17 | 18 | 19 |
20 | 21 |
22 |

Model Precision

23 | 24 | 25 |
26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pycocotools>=2.0.0 2 | -------------------------------------------------------------------------------- /third_party/compilers/aarch64_compiler.BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ['//visibility:public']) 2 | 3 | filegroup( 4 | name = 'gcc', 5 | srcs = [ 6 | 'bin/aarch64-linux-gnu-gcc', 7 | ], 8 | ) 9 | 10 | filegroup( 11 | name = 'ar', 12 | srcs = [ 13 | 'bin/aarch64-linux-gnu-ar', 14 | ], 15 | ) 16 | 17 | filegroup( 18 | name = 'ld', 19 | srcs = [ 20 | 'bin/aarch64-linux-gnu-ld', 21 | ], 22 | ) 23 | 24 | filegroup( 25 | name = 'nm', 26 | srcs = [ 27 | 'bin/aarch64-linux-gnu-nm', 28 | ], 29 | ) 30 | 31 | filegroup( 32 | name = 'objcopy', 33 | srcs = [ 34 | 'bin/aarch64-linux-gnu-objcopy', 35 | ], 36 | ) 37 | 38 | filegroup( 39 | name = 'objdump', 40 | srcs = [ 41 | 'bin/aarch64-linux-gnu-objdump', 42 | ], 43 | ) 44 | 45 | filegroup( 46 | name = 'strip', 47 | srcs = [ 48 | 'bin/aarch64-linux-gnu-strip', 49 | ], 50 | ) 51 | 52 | filegroup( 53 | name = 'as', 54 | srcs = [ 55 | 'bin/aarch64-linux-gnu-as', 56 | ], 57 | ) 58 | 59 | filegroup( 60 | name = "compiler_pieces", 61 | srcs = glob([ 62 | "aarch64-linux-gnu/**", 63 | "libexec/**", 64 | "lib/gcc/aarch64-linux-gnu/**", 65 | "include/**", 66 | ]), 67 | ) 68 | 69 | filegroup( 70 | name = "compiler_components", 71 | srcs = [ 72 | ":ar", 73 | ":as", 74 | ":gcc", 75 | ":ld", 76 | ":nm", 77 | ":objcopy", 78 | ":objdump", 79 | ":strip", 80 | ], 81 | ) 82 | -------------------------------------------------------------------------------- /third_party/compilers/arm_compiler.BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | filegroup( 4 | name = "gcc", 5 | srcs = [ 6 | "bin/arm-linux-gnueabihf-gcc", 7 | ], 8 | ) 9 | 10 | filegroup( 11 | name = "ar", 12 | srcs = [ 13 | "bin/arm-linux-gnueabihf-ar", 14 | ], 15 | ) 16 | 17 | filegroup( 18 | name = "ld", 19 | srcs = [ 20 | "bin/arm-linux-gnueabihf-ld", 21 | ], 22 | ) 23 | 24 | filegroup( 25 | name = "nm", 26 | srcs = [ 27 | "bin/arm-linux-gnueabihf-nm", 28 | ], 29 | ) 30 | 31 | filegroup( 32 | name = "objcopy", 33 | srcs = [ 34 | "bin/arm-linux-gnueabihf-objcopy", 35 | ], 36 | ) 37 | 38 | filegroup( 39 | name = "objdump", 40 | srcs = [ 41 | "bin/arm-linux-gnueabihf-objdump", 42 | ], 43 | ) 44 | 45 | filegroup( 46 | name = "strip", 47 | srcs = [ 48 | "bin/arm-linux-gnueabihf-strip", 49 | ], 50 | ) 51 | 52 | filegroup( 53 | name = "as", 54 | srcs = [ 55 | "bin/arm-linux-gnueabihf-as", 56 | ], 57 | ) 58 | 59 | filegroup( 60 | name = "compiler_pieces", 61 | srcs = glob([ 62 | "arm-linux-gnueabihf/**", 63 | "libexec/**", 64 | "lib/gcc/arm-linux-gnueabihf/**", 65 | "include/**", 66 | ]), 67 | ) 68 | 69 | filegroup( 70 | name = "compiler_components", 71 | srcs = [ 72 | ":ar", 73 | ":as", 74 | ":gcc", 75 | ":ld", 76 | ":nm", 77 | ":objcopy", 78 | ":objdump", 79 | ":strip", 80 | ], 81 | ) 82 | -------------------------------------------------------------------------------- /third_party/gflags/COPYING.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006, Google Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following disclaimer 12 | in the documentation and/or other materials provided with the 13 | distribution. 14 | * Neither the name of Google Inc. nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /third_party/googletest/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2008, Google Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following disclaimer 12 | in the documentation and/or other materials provided with the 13 | distribution. 14 | * Neither the name of Google Inc. nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /third_party/googletest/googletest.BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | exports_files(["LICENSE"]) 4 | 5 | cc_library( 6 | name = "gtest", 7 | srcs = [ 8 | "googletest/src/gtest-all.cc", 9 | "googlemock/src/gmock-all.cc", 10 | ], 11 | hdrs = glob([ 12 | "**/*.h", 13 | "googletest/src/*.cc", 14 | "googlemock/src/*.cc", 15 | ]), 16 | includes = [ 17 | "googlemock", 18 | "googletest", 19 | "googletest/include", 20 | "googlemock/include", 21 | ], 22 | linkopts = ["-pthread"], 23 | visibility = ["//visibility:public"], 24 | ) 25 | 26 | cc_library( 27 | name = "gtest_main", 28 | srcs = ["googlemock/src/gmock_main.cc"], 29 | linkopts = ["-pthread"], 30 | visibility = ["//visibility:public"], 31 | deps = [":gtest"], 32 | ) 33 | -------------------------------------------------------------------------------- /third_party/hiai/hiai.BUILD: -------------------------------------------------------------------------------- 1 | exports_files(["LICENSE"]) 2 | 3 | cc_library( 4 | name = "hiai_header", 5 | hdrs = glob([ 6 | "DDK/ai_ddk_mixmodel_lib/include/*.h", 7 | ]), 8 | includes = ["DDK/ai_ddk_mixmodel_lib/include/"], 9 | ) 10 | 11 | cc_library( 12 | name = "hiai_arm64-v8a", 13 | srcs = glob([ 14 | "DDK/ai_ddk_mixmodel_lib/lib64/libhiai.so", 15 | ]), 16 | deps = ["hiai_header"], 17 | visibility = ["//visibility:public"], 18 | ) 19 | -------------------------------------------------------------------------------- /third_party/mace/BUILD: -------------------------------------------------------------------------------- 1 | package( 2 | default_visibility = ["//visibility:public"], 3 | ) 4 | 5 | licenses(["notice"]) # Apache 2.0 6 | 7 | exports_files(["LICENSE"]) 8 | 9 | cc_library( 10 | name = "mace_headers", 11 | hdrs = glob([ 12 | "build/include/mace/*/*.h", 13 | ]), 14 | includes = ["build/include"], 15 | strip_include_prefix = "", 16 | visibility = ["//visibility:public"], 17 | ) 18 | 19 | 20 | cc_library( 21 | name = "mace_x86_64", 22 | srcs = [ 23 | "build/lib/linux-x86-64/libmace.so", 24 | ], 25 | visibility = ["//visibility:public"], 26 | deps = ["mace_headers"], 27 | ) 28 | 29 | cc_library( 30 | name = "mace_armeabi-v7a", 31 | srcs = [ 32 | "build/lib/armeabi-v7a/libc++_shared.so", 33 | "build/lib/armeabi-v7a/libmace.so", 34 | "build/lib/armeabi-v7a/libhexagon_controller.so", 35 | ], 36 | visibility = ["//visibility:public"], 37 | deps = ["mace_headers"], 38 | ) 39 | 40 | cc_library( 41 | name = "mace_arm64-v8a", 42 | srcs = [ 43 | "build/lib/arm64-v8a/libc++_shared.so", 44 | "build/lib/arm64-v8a/libmace.so", 45 | "build/lib/arm64-v8a/libhexagon_controller.so", 46 | "build/lib/arm64-v8a/libcdsprpc.so", 47 | ], 48 | visibility = ["//visibility:public"], 49 | deps = ["mace_headers"], 50 | ) 51 | 52 | cc_library( 53 | name = "mace_armhf", 54 | srcs = [ 55 | "build/lib/arm_linux_gnueabihf/libmace.so", 56 | ], 57 | visibility = ["//visibility:public"], 58 | deps = ["mace_headers"], 59 | ) 60 | 61 | cc_library( 62 | name = "mace_aarch64", 63 | srcs = [ 64 | "build/lib/aarch64_linux_gnu/libmace.so", 65 | ], 66 | visibility = ["//visibility:public"], 67 | deps = ["mace_headers"], 68 | ) 69 | -------------------------------------------------------------------------------- /third_party/mnn/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | exports_files(["LICENSE"]) 4 | 5 | cc_library( 6 | name = "mnn_headers", 7 | hdrs = glob([ 8 | "include/MNN/*.h", 9 | "include/MNN/*.hpp", 10 | "tools/*", 11 | "tools/*/*.hpp", 12 | "tools/*/*/*.h", 13 | "tools/*/*/*/*/*.h", 14 | "schema/*/*.h", 15 | "3rd_party/*/*/*/*.h", 16 | ]), 17 | includes = ["include"], 18 | strip_include_prefix = "", 19 | visibility = ["//visibility:public"], 20 | ) 21 | 22 | cc_library( 23 | name = "mnn_armeabi-v7a", 24 | srcs = [ 25 | "project/android/build_32/libMNN.so", 26 | "project/android/build_32/libMNN_Express.so", 27 | ], 28 | visibility = ["//visibility:public"], 29 | deps = ["mnn_headers"], 30 | ) 31 | 32 | cc_library( 33 | name = "mnn_arm64-v8a", 34 | srcs = [ 35 | "project/android/build_64/libMNN.so", 36 | "project/android/build_64/libMNN_Express.so", 37 | ], 38 | visibility = ["//visibility:public"], 39 | deps = ["mnn_headers"], 40 | ) 41 | -------------------------------------------------------------------------------- /third_party/opencv/LICENSE.txt: -------------------------------------------------------------------------------- 1 | By downloading, copying, installing or using the software you agree to this license. 2 | If you do not agree to this license, do not download, install, 3 | copy or use the software. 4 | 5 | 6 | License Agreement 7 | For Open Source Computer Vision Library 8 | (3-clause BSD License) 9 | 10 | Copyright (C) 2000-2019, Intel Corporation, all rights reserved. 11 | Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. 12 | Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. 13 | Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. 14 | Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved. 15 | Copyright (C) 2015-2016, Itseez Inc., all rights reserved. 16 | Third party copyrights are property of their respective owners. 17 | 18 | Redistribution and use in source and binary forms, with or without modification, 19 | are permitted provided that the following conditions are met: 20 | 21 | * Redistributions of source code must retain the above copyright notice, 22 | this list of conditions and the following disclaimer. 23 | 24 | * Redistributions in binary form must reproduce the above copyright notice, 25 | this list of conditions and the following disclaimer in the documentation 26 | and/or other materials provided with the distribution. 27 | 28 | * Neither the names of the copyright holders nor the names of the contributors 29 | may be used to endorse or promote products derived from this software 30 | without specific prior written permission. 31 | 32 | This software is provided by the copyright holders and contributors "as is" and 33 | any express or implied warranties, including, but not limited to, the implied 34 | warranties of merchantability and fitness for a particular purpose are disclaimed. 35 | In no event shall copyright holders or contributors be liable for any direct, 36 | indirect, incidental, special, exemplary, or consequential damages 37 | (including, but not limited to, procurement of substitute goods or services; 38 | loss of use, data, or profits; or business interruption) however caused 39 | and on any theory of liability, whether in contract, strict liability, 40 | or tort (including negligence or otherwise) arising in any way out of 41 | the use of this software, even if advised of the possibility of such damage. -------------------------------------------------------------------------------- /third_party/opencv/opencv.BUILD: -------------------------------------------------------------------------------- 1 | exports_files(["LICENSE"]) 2 | 3 | cc_library( 4 | name = "opencv_header", 5 | hdrs = glob([ 6 | "include/**/*.hpp", 7 | ]), 8 | includes = ["include"], 9 | ) 10 | 11 | cc_library( 12 | name = "opencv_armeabi-v7a", 13 | srcs = glob([ 14 | "libs/armeabi-v7a/libopencv_java4.so", 15 | ]), 16 | linkopts = [ 17 | "-lz", 18 | "-ldl", 19 | "-lm", 20 | "-llog", 21 | ], 22 | deps = ["opencv_header"], 23 | visibility = ["//visibility:public"], 24 | ) 25 | 26 | cc_library( 27 | name = "opencv_arm64-v8a", 28 | srcs = glob([ 29 | "libs/arm64-v8a/libopencv_java4.so", 30 | ]), 31 | linkopts = [ 32 | "-lz", 33 | "-ldl", 34 | "-lm", 35 | "-llog", 36 | ], 37 | deps = ["opencv_header"], 38 | visibility = ["//visibility:public"], 39 | ) 40 | 41 | cc_library( 42 | name = "opencv_aarch64_linux", 43 | srcs = glob([ 44 | "libs/aarch64_linux/*.so.4.0", 45 | ]), 46 | linkopts = [ 47 | "-ldl", 48 | "-lm", 49 | ], 50 | deps = ["opencv_header"], 51 | visibility = ["//visibility:public"], 52 | ) 53 | 54 | cc_library( 55 | name = "opencv_armhf_linux", 56 | srcs = glob([ 57 | "libs/armhf_linux/*.so.4.0", 58 | ]), 59 | linkopts = [ 60 | "-ldl", 61 | "-lm", 62 | ], 63 | deps = ["opencv_header"], 64 | visibility = ["//visibility:public"], 65 | ) 66 | -------------------------------------------------------------------------------- /third_party/six/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010-2015 Benjamin Peterson 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /third_party/six/six.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Six provides simple utilities for wrapping over differences between Python 2 3 | # and Python 3. 4 | 5 | licenses(["notice"]) 6 | 7 | exports_files(["LICENSE"]) 8 | 9 | py_library( 10 | name = "six", 11 | srcs = ["six.py"], 12 | srcs_version = "PY2AND3", 13 | visibility = ["//visibility:public"], 14 | ) 15 | -------------------------------------------------------------------------------- /third_party/snpe/snpe.BUILD: -------------------------------------------------------------------------------- 1 | exports_files(["LICENSE.txt"]) 2 | 3 | cc_library( 4 | name = "snpe_hdr", 5 | hdrs = glob([ 6 | "include/zdl/*/*.hpp", 7 | ]), 8 | includes = ["include/zdl"], 9 | ) 10 | 11 | cc_library( 12 | name = "snpe_x86_64", 13 | srcs = [ 14 | "lib/x86_64-linux-clang/libSNPE.so", 15 | ], 16 | deps = ["snpe_hdr"], 17 | visibility = ["//visibility:public"], 18 | ) 19 | 20 | cc_library( 21 | name = "snpe_armeabi-v7a", 22 | srcs = [ 23 | "lib/arm-android-clang6.0/libSNPE.so", 24 | "lib/arm-android-clang6.0/libc++_shared.so", 25 | ], 26 | deps = ["snpe_hdr"], 27 | visibility = ["//visibility:public"], 28 | ) 29 | 30 | cc_library( 31 | name = "snpe_arm64-v8a", 32 | linkopts = [ 33 | "-llog", 34 | ], 35 | srcs = [ 36 | "lib/aarch64-android-clang6.0/libSNPE.so", 37 | "lib/aarch64-android-clang6.0/libc++_shared.so", 38 | ], 39 | deps = ["snpe_hdr"], 40 | visibility = ["//visibility:public"], 41 | ) 42 | 43 | cc_library( 44 | name = "snpe_armhf", 45 | srcs = [ 46 | "lib/arm-oe-linux-gcc8.2hf/libSNPE.so", 47 | ], 48 | deps = ["snpe_hdr"], 49 | visibility = ["//visibility:public"], 50 | ) 51 | 52 | cc_library( 53 | name = "snpe_aarch64", 54 | srcs = [ 55 | "lib/aarch64-linux-gcc4.9/libSNPE.so", 56 | ], 57 | deps = ["snpe_hdr"], 58 | visibility = ["//visibility:public"], 59 | ) 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /third_party/tflite/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache 2.0 2 | 3 | exports_files(["LICENSE"]) 4 | 5 | cc_library( 6 | name = "tflite_headers", 7 | hdrs = glob([ 8 | "tensorflow/lite/*.h", 9 | "tensorflow/lite/*/*.h", 10 | "tensorflow/lite/*/*/*.h", 11 | "flatbuffers/*.h", 12 | "absl/base/*.h", 13 | ]), 14 | visibility = ["//visibility:public"], 15 | strip_include_prefix = "", 16 | ) 17 | 18 | cc_library( 19 | name = "tflite_armeabi-v7a", 20 | srcs = [ 21 | "tensorflow/lite/lib/armeabi-v7a/libtensorflowlite.so", 22 | ], 23 | visibility = ["//visibility:public"], 24 | deps = ["tflite_headers"], 25 | ) 26 | 27 | cc_library( 28 | name = "tflite_arm64-v8a", 29 | srcs = [ 30 | "tensorflow/lite/lib/arm64-v8a/libtensorflowlite.so", 31 | ], 32 | visibility = ["//visibility:public"], 33 | deps = ["tflite_headers"], 34 | ) 35 | -------------------------------------------------------------------------------- /third_party/tnn/LICENSE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoMi/mobile-ai-bench/a91bca47af319b4d88b87b58a6c879de9c910733/third_party/tnn/LICENSE -------------------------------------------------------------------------------- /third_party/tnn/tnn.BUILD: -------------------------------------------------------------------------------- 1 | exports_files(["LICENSE"]) 2 | 3 | cc_library( 4 | name = "tnn_hdr", 5 | hdrs = glob([ 6 | "include/tnn/*.h", 7 | "include/tnn/*/*.h", 8 | ]), 9 | includes = ["include"], 10 | ) 11 | 12 | cc_library( 13 | name = "tnn_armeabi-v7a", 14 | srcs = [ 15 | "armeabi-v7a/libTNN.so", 16 | ], 17 | deps = ["tnn_hdr"], 18 | visibility = ["//visibility:public"], 19 | ) 20 | 21 | cc_library( 22 | name = "tnn_arm64-v8a", 23 | srcs = [ 24 | "arm64-v8a/libTNN.so", 25 | ], 26 | deps = ["tnn_hdr"], 27 | visibility = ["//visibility:public"], 28 | ) 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/BUILD: -------------------------------------------------------------------------------- 1 | # This is the entry point for --crosstool_top. Toolchains are found 2 | # by lopping off the name of --crosstool_top and searching for 3 | # 'cc-compiler-${CPU}' in this BUILD file, where CPU is the target CPU 4 | # specified in --cpu. 5 | # 6 | # This file group should include 7 | # * all cc_toolchain targets supported 8 | # * all file groups that said cc_toolchain might refer to, 9 | # including the default_grte_top setting in the CROSSTOOL 10 | # protobuf. 11 | filegroup( 12 | name = "toolchain_fg", 13 | srcs = [ 14 | ":cc-compiler-aarch64", 15 | ":linaro_linux_all_files", 16 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:compiler_components", 17 | ], 18 | ) 19 | 20 | filegroup( 21 | name = "srcs", 22 | srcs = glob(["**"]) + [ 23 | "//tools/aarch64_compiler/linaro_linux_gcc:srcs", 24 | ], 25 | visibility = ["//visibility:public"], 26 | ) 27 | 28 | cc_toolchain_suite( 29 | name = "toolchain", 30 | # target_cpu | compiler 31 | toolchains = { 32 | "aarch64|gcc": "cc-compiler-aarch64", 33 | }, 34 | ) 35 | 36 | filegroup( 37 | name = "linaro_linux_all_files", 38 | srcs = [ 39 | "//tools/aarch64_compiler/linaro_linux_gcc:tool-wrappers", 40 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:compiler_pieces", 41 | ], 42 | ) 43 | 44 | filegroup( 45 | name = "linaro_linux_linker_files", 46 | srcs = [ 47 | "//tools/aarch64_compiler/linaro_linux_gcc:ar", 48 | "//tools/aarch64_compiler/linaro_linux_gcc:gcc", 49 | "//tools/aarch64_compiler/linaro_linux_gcc:ld", 50 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:compiler_pieces", 51 | ], 52 | ) 53 | 54 | filegroup( 55 | name = "linaro_linux_compiler_files", 56 | srcs = [ 57 | "//tools/aarch64_compiler/linaro_linux_gcc:as", 58 | "//tools/aarch64_compiler/linaro_linux_gcc:gcc", 59 | "//tools/aarch64_compiler/linaro_linux_gcc:ld", 60 | ], 61 | ) 62 | 63 | filegroup( 64 | name = "empty", 65 | srcs = [], 66 | ) 67 | 68 | cc_toolchain( 69 | name = "cc-compiler-aarch64", 70 | all_files = ":linaro_linux_all_files", 71 | compiler_files = ":linaro_linux_compiler_files", 72 | cpu = "aarch64", 73 | dwp_files = ":empty", 74 | dynamic_runtime_libs = [":empty"], 75 | linker_files = ":linaro_linux_linker_files", 76 | objcopy_files = "//tools/aarch64_compiler/linaro_linux_gcc:objcopy", 77 | static_runtime_libs = [":empty"], 78 | strip_files = "//tools/aarch64_compiler/linaro_linux_gcc:strip", 79 | supports_param_files = 1, 80 | visibility = ["//visibility:public"], 81 | ) 82 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//tools/aarch64_compiler:__pkg__"]) 2 | 3 | filegroup( 4 | name = "srcs", 5 | srcs = glob(["**"]), 6 | visibility = ["//visibility:public"], 7 | ) 8 | 9 | filegroup( 10 | name = "gcc", 11 | srcs = [ 12 | "aarch64-linux-gnu-gcc", 13 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:gcc", 14 | ], 15 | ) 16 | 17 | filegroup( 18 | name = "ar", 19 | srcs = [ 20 | "aarch64-linux-gnu-ar", 21 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:ar", 22 | ], 23 | ) 24 | 25 | filegroup( 26 | name = "ld", 27 | srcs = [ 28 | "aarch64-linux-gnu-ld", 29 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:ld", 30 | ], 31 | ) 32 | 33 | filegroup( 34 | name = "nm", 35 | srcs = [ 36 | "aarch64-linux-gnu-nm", 37 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:nm", 38 | ], 39 | ) 40 | 41 | filegroup( 42 | name = "objcopy", 43 | srcs = [ 44 | "aarch64-linux-gnu-objcopy", 45 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:objcopy", 46 | ], 47 | ) 48 | 49 | filegroup( 50 | name = "objdump", 51 | srcs = [ 52 | "aarch64-linux-gnu-objdump", 53 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:objdump", 54 | ], 55 | ) 56 | 57 | filegroup( 58 | name = "strip", 59 | srcs = [ 60 | "aarch64-linux-gnu-strip", 61 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:strip", 62 | ], 63 | ) 64 | 65 | filegroup( 66 | name = "as", 67 | srcs = [ 68 | "aarch64-linux-gnu-as", 69 | "@gcc_linaro_7_3_1_aarch64_linux_gnu//:as", 70 | ], 71 | ) 72 | 73 | filegroup( 74 | name = "tool-wrappers", 75 | srcs = [ 76 | ":ar", 77 | ":as", 78 | ":gcc", 79 | ":ld", 80 | ":nm", 81 | ":objcopy", 82 | ":objdump", 83 | ":strip", 84 | ], 85 | ) 86 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-ar: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-ar \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-ar \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-as: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-as \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-as \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-gcc: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | PATH="external/gcc_linaro_7_3_1_aarch64_linux_gnu/libexec/gcc/aarch64-linux-gnu/7.3.1:$PATH" \ 4 | exec \ 5 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-gcc \ 6 | "$@" 7 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-gcov: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-gcov \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-gcov \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-ld: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-ld \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-ld \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-nm: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-nm \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-nm \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-objcopy: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-objcopy \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-objcopy \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-objdump: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-objdump \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-objdump \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/aarch64_compiler/linaro_linux_gcc/aarch64-linux-gnu-strip: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a aarch64-linux-gnu-strip \ 4 | external/gcc_linaro_7_3_1_aarch64_linux_gnu/bin/aarch64-linux-gnu-strip \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/BUILD: -------------------------------------------------------------------------------- 1 | # This is the entry point for --crosstool_top. Toolchains are found 2 | # by lopping off the name of --crosstool_top and searching for 3 | # 'cc-compiler-${CPU}' in this BUILD file, where CPU is the target CPU 4 | # specified in --cpu. 5 | # 6 | # This file group should include 7 | # * all cc_toolchain targets supported 8 | # * all file groups that said cc_toolchain might refer to, 9 | # including the default_grte_top setting in the CROSSTOOL 10 | # protobuf. 11 | filegroup( 12 | name = "toolchain_fg", 13 | srcs = [ 14 | ":cc-compiler-armhf", 15 | ":linaro_linux_all_files", 16 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:compiler_components", 17 | ], 18 | ) 19 | 20 | filegroup( 21 | name = "srcs", 22 | srcs = glob(["**"]) + [ 23 | "//tools/arm_compiler/linaro_linux_gcc:srcs", 24 | ], 25 | visibility = ["//visibility:public"], 26 | ) 27 | 28 | cc_toolchain_suite( 29 | name = "toolchain", 30 | # target_cpu | compiler 31 | toolchains = { 32 | "armhf|gcc": "cc-compiler-armhf", 33 | }, 34 | ) 35 | 36 | filegroup( 37 | name = "linaro_linux_all_files", 38 | srcs = [ 39 | "//tools/arm_compiler/linaro_linux_gcc:tool-wrappers", 40 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:compiler_pieces", 41 | ], 42 | ) 43 | 44 | filegroup( 45 | name = "linaro_linux_linker_files", 46 | srcs = [ 47 | "//tools/arm_compiler/linaro_linux_gcc:ar", 48 | "//tools/arm_compiler/linaro_linux_gcc:gcc", 49 | "//tools/arm_compiler/linaro_linux_gcc:ld", 50 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:compiler_pieces", 51 | ], 52 | ) 53 | 54 | filegroup( 55 | name = "linaro_linux_compiler_files", 56 | srcs = [ 57 | "//tools/arm_compiler/linaro_linux_gcc:as", 58 | "//tools/arm_compiler/linaro_linux_gcc:gcc", 59 | "//tools/arm_compiler/linaro_linux_gcc:ld", 60 | ], 61 | ) 62 | 63 | filegroup( 64 | name = "empty", 65 | srcs = [], 66 | ) 67 | 68 | cc_toolchain( 69 | name = "cc-compiler-armhf", 70 | all_files = ":linaro_linux_all_files", 71 | compiler_files = ":linaro_linux_compiler_files", 72 | cpu = "armhf", 73 | dwp_files = ":empty", 74 | dynamic_runtime_libs = [":empty"], 75 | linker_files = ":linaro_linux_linker_files", 76 | objcopy_files = "//tools/arm_compiler/linaro_linux_gcc:objcopy", 77 | static_runtime_libs = [":empty"], 78 | strip_files = "//tools/arm_compiler/linaro_linux_gcc:strip", 79 | supports_param_files = 1, 80 | visibility = ["//visibility:public"], 81 | ) 82 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//tools/arm_compiler:__pkg__"]) 2 | 3 | filegroup( 4 | name = "srcs", 5 | srcs = glob(["**"]), 6 | visibility = ["//visibility:public"], 7 | ) 8 | 9 | filegroup( 10 | name = "gcc", 11 | srcs = [ 12 | "arm-linux-gnueabihf-gcc", 13 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:gcc", 14 | ], 15 | ) 16 | 17 | filegroup( 18 | name = "ar", 19 | srcs = [ 20 | "arm-linux-gnueabihf-ar", 21 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:ar", 22 | ], 23 | ) 24 | 25 | filegroup( 26 | name = "ld", 27 | srcs = [ 28 | "arm-linux-gnueabihf-ld", 29 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:ld", 30 | ], 31 | ) 32 | 33 | filegroup( 34 | name = "nm", 35 | srcs = [ 36 | "arm-linux-gnueabihf-nm", 37 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:nm", 38 | ], 39 | ) 40 | 41 | filegroup( 42 | name = "objcopy", 43 | srcs = [ 44 | "arm-linux-gnueabihf-objcopy", 45 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:objcopy", 46 | ], 47 | ) 48 | 49 | filegroup( 50 | name = "objdump", 51 | srcs = [ 52 | "arm-linux-gnueabihf-objdump", 53 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:objdump", 54 | ], 55 | ) 56 | 57 | filegroup( 58 | name = "strip", 59 | srcs = [ 60 | "arm-linux-gnueabihf-strip", 61 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:strip", 62 | ], 63 | ) 64 | 65 | filegroup( 66 | name = "as", 67 | srcs = [ 68 | "arm-linux-gnueabihf-as", 69 | "@gcc_linaro_7_3_1_arm_linux_gnueabihf//:as", 70 | ], 71 | ) 72 | 73 | filegroup( 74 | name = "tool-wrappers", 75 | srcs = [ 76 | ":ar", 77 | ":as", 78 | ":gcc", 79 | ":ld", 80 | ":nm", 81 | ":objcopy", 82 | ":objdump", 83 | ":strip", 84 | ], 85 | ) 86 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-ar: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-ar \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-ar \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-as: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-as \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-as \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-gcc: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | PATH="external/gcc_linaro_7_3_1_arm_linux_gnueabihf/libexec/gcc/arm-linux-gnueabihf/7.3.1:$PATH" \ 4 | exec \ 5 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-gcc \ 6 | "$@" 7 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-gcov: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-gcov \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-gcov \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-ld: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-ld \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-ld \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-nm: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-nm \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-nm \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-objcopy: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-objcopy \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-objcopy \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-objdump: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-objdump \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-objdump \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/arm_compiler/linaro_linux_gcc/arm-linux-gnueabihf-strip: -------------------------------------------------------------------------------- 1 | #!/bin/bash --norc 2 | 3 | exec -a arm-linux-gnueabihf-strip \ 4 | external/gcc_linaro_7_3_1_arm_linux_gnueabihf/bin/arm-linux-gnueabihf-strip \ 5 | "$@" 6 | -------------------------------------------------------------------------------- /tools/bazel.rc: -------------------------------------------------------------------------------- 1 | build --verbose_failures 2 | build --copt=-std=c++11 3 | build --copt=-fPIC 4 | build --copt=-D_GLIBCXX_USE_C99_MATH_TR1 5 | build --copt=-ffast-math 6 | build --copt=-Ofast 7 | build --strategy=CppCompile=standalone 8 | 9 | build -c opt 10 | build --copt=-O3 11 | build --linkopt=-Wl,--strip-all 12 | build --copt=-fvisibility=hidden 13 | build --copt=-ffunction-sections 14 | build --copt=-fdata-sections 15 | build --linkopt=-Wl,--gc-sections 16 | 17 | # By default, we don't distinct target and host platfroms. 18 | # When doing cross compilation, use --config=cross_compile to distinct them. 19 | build --distinct_host_configuration=false 20 | build:cross_compile --distinct_host_configuration=true 21 | 22 | # Usage example: bazel build --config android 23 | build:android --define linux_base=true 24 | build:android --crosstool_top=//external:android/crosstool 25 | build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain 26 | build:android --config=cross_compile 27 | 28 | # Linux host build, --config linux 29 | build:linux --define linux_base=true 30 | build:linux --define linux=true 31 | 32 | # Usage example: bazel build --config arm_linux_gnueabihf 33 | build:arm_linux_gnueabihf --define linux_base=true 34 | build:arm_linux_gnueabihf --define linux=true 35 | build:arm_linux_gnueabihf --config=cross_compile 36 | build:arm_linux_gnueabihf --crosstool_top=//tools/arm_compiler:toolchain 37 | build:arm_linux_gnueabihf --host_crosstool_top=@bazel_tools//tools/cpp:toolchain 38 | build:arm_linux_gnueabihf --cpu=armhf 39 | build:arm_linux_gnueabihf --copt -mfloat-abi=hard 40 | build:arm_linux_gnueabihf --copt -mfpu=neon 41 | build:arm_linux_gnueabihf --copt -Wno-ignored-attributes 42 | build:arm_linux_gnueabihf --copt -Wno-unused-function 43 | build:arm_linux_gnueabihf --copt -Wno-sequence-point 44 | build:arm_linux_gnueabihf --copt -Wno-implicit-fallthrough 45 | 46 | # Usage example: bazel build --config aarch64_linux_gnu 47 | build:aarch64_linux_gnu --define linux_base=true 48 | build:aarch64_linux_gnu --define linux=true 49 | build:aarch64_linux_gnu --config=cross_compile 50 | build:aarch64_linux_gnu --crosstool_top=//tools/aarch64_compiler:toolchain 51 | build:aarch64_linux_gnu --host_crosstool_top=@bazel_tools//tools/cpp:toolchain 52 | build:aarch64_linux_gnu --cpu=aarch64 53 | build:aarch64_linux_gnu --copt -Wno-ignored-attributes 54 | build:aarch64_linux_gnu --copt -Wno-unused-function 55 | build:aarch64_linux_gnu --copt -Wno-sequence-point 56 | build:aarch64_linux_gnu --copt -Wno-implicit-fallthrough 57 | -------------------------------------------------------------------------------- /tools/benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e -u -o pipefail 4 | set -x 5 | 6 | bazel build //aibench/python:benchmark 7 | 8 | bazel-bin/aibench/python/benchmark "$@" 9 | -------------------------------------------------------------------------------- /tools/build_mace.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e -u -o pipefail 4 | 5 | trap "exit" INT 6 | 7 | # get mace-models 8 | rm -rf mace-models 9 | GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" git clone git@github.com:XiaoMi/mace-models.git 10 | cd mace-models/ 11 | MODEL_ROOT_PATH=`pwd` 12 | CONF_FILES=`find $MODEL_ROOT_PATH -name *.yml | { grep -v ".gitlab-ci.yml" || true; }` 13 | 14 | # get mace 15 | cd .. 16 | rm -rf mace 17 | GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" git clone git@github.com:XiaoMi/mace.git 18 | cd mace/ 19 | 20 | MODELS=( 21 | "inception-v3" 22 | "inception-v3-quantize-retrain" 23 | "inception-v3-quantize-retrain-dsp" 24 | "mobilenet-v1" 25 | "mobilenet-v1-quantize-retrain" 26 | "mobilenet-v1-quantize-retrain-dsp" 27 | "mobilenet-v2" 28 | "mobilenet-v2-quantize-retrain" 29 | "squeezenet-v11" 30 | ) 31 | 32 | # convert models to pb and data 33 | TIMESTAMP=$(date +%s) 34 | for CONF_FILE in $CONF_FILES; do 35 | for MODEL in "${MODELS[@]}"; do 36 | if [ "$(basename $CONF_FILE .yml)" == "$MODEL" ]; then 37 | set +e 38 | python tools/converter.py convert --config=$CONF_FILE $1 39 | RESULT=$? 40 | set -e 41 | if [ $RESULT == 0 ]; then 42 | FILE_NAME=$(basename $CONF_FILE .yml) 43 | cp builds/${FILE_NAME}/model/*.pb ../output/${FILE_NAME}_${TIMESTAMP}.pb 44 | cp builds/${FILE_NAME}/model/*.data ../output/${FILE_NAME}_${TIMESTAMP}.data 45 | fi 46 | fi 47 | done 48 | done 49 | -------------------------------------------------------------------------------- /tools/cmake_toolchain/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache 2.0 2 | 3 | exports_files(["LICENSE"]) 4 | 5 | filegroup( 6 | name = "toolchain", 7 | srcs = [ 8 | "toolchain.cmake.tpl", 9 | ], 10 | visibility = ["//visibility:public"], 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /tools/cmake_toolchain/cmake.bzl: -------------------------------------------------------------------------------- 1 | 2 | # -*- Python -*- 3 | 4 | 5 | 6 | def getCMakeToolchain(): 7 | return select({ 8 | "@aibench//aibench:android_armv7": "$$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake", 9 | "@aibench//aibench:android_arm64": "$$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake", 10 | "@aibench//aibench:aarch64_linux": "$(rootpath @aibench//tools/cmake_toolchain:cmakes)", 11 | "@aibench//aibench:armhf_linux": "$(rootpath @aibench//tools/cmake_toolchain:cmakes)", 12 | "//conditions:default": "", 13 | }) 14 | 15 | -------------------------------------------------------------------------------- /tools/cmake_toolchain/toolchain.cmake.tpl: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016 The Android Open Source Project 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Configurable variables. 16 | # Modeled after the ndk-build system. 17 | # For any variables defined in: 18 | # https://developer.android.com/ndk/guides/android_mk.html 19 | # https://developer.android.com/ndk/guides/application_mk.html 20 | # if it makes sense for CMake, then replace LOCAL, APP, or NDK with ANDROID, and 21 | # we have that variable below. 22 | # The exception is ANDROID_TOOLCHAIN vs NDK_TOOLCHAIN_VERSION. 23 | # Since we only have one version of each gcc and clang, specifying a version 24 | # doesn't make much sense. 25 | # 26 | 27 | 28 | cmake_minimum_required(VERSION 3.6.0) 29 | 30 | 31 | # Standard cross-compiling stuff. 32 | set(CMAKE_SYSTEM_NAME Gerneric) 33 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 34 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 35 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 36 | set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 37 | 38 | set(COMPILER_PATH "GNU_PATH") 39 | 40 | set(CMAKE_C_COMPILER "GCC_PATH") 41 | set(CMAKE_FIND_ROOT_PATH ${COMPILER_PATH}) 42 | 43 | 44 | -------------------------------------------------------------------------------- /tools/common.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Xiaomi, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | ################################# 17 | # YAMLKeyword 18 | ################################# 19 | class YAMLKeyword: 20 | device_name = 'device_name' 21 | target_abis = 'target_abis' 22 | target_socs = 'target_socs' 23 | models = 'models' 24 | system = 'system' 25 | address = 'address' 26 | username = 'username' 27 | password = 'password' 28 | 29 | 30 | ################################# 31 | # System type 32 | ################################# 33 | class SystemType: 34 | android = 'android' 35 | arm_linux = 'arm_linux' 36 | host = 'host' 37 | 38 | 39 | ################################ 40 | # ABI Type 41 | ################################ 42 | class ABIType(object): 43 | armeabi_v7a = 'armeabi-v7a' 44 | arm64_v8a = 'arm64-v8a' 45 | arm64 = 'arm64' 46 | aarch64 = 'aarch64' 47 | armhf = 'armhf' 48 | host = 'host' 49 | 50 | 51 | abi_types = [ 52 | "armeabi-v7a", 53 | "arm64-v8a", 54 | 'arm64', 55 | 'armhf', 56 | "host", 57 | ] 58 | 59 | 60 | ################################# 61 | # Tool chain Type 62 | ################################# 63 | class ToolchainType: 64 | android = 'android' 65 | arm_linux_gnueabihf = 'arm_linux_gnueabihf' 66 | aarch64_linux_gnu = 'aarch64_linux_gnu' 67 | host = '' 68 | -------------------------------------------------------------------------------- /tools/configs.yml: -------------------------------------------------------------------------------- 1 | # libs: 2 | tensorflow-2.4.1.zip: https://cnbj1.fds.api.xiaomi.com/aibench/third_party/tensorflow-2.4.1.zip 3 | tensorflow-2.4.1.zip_md5_checksum: 3674cf2ace8ccab67ca89726b8b12980 4 | MNN-1.1.1.zip: https://cnbj1.fds.api.xiaomi.com/aibench/third_party/MNN-1.1.1.zip 5 | MNN-1.1.1.zip_md5_checksum: 42c8cb30c0683bd1c6e8c7c8a7a59510 6 | mace-1.0.4.zip: https://cnbj1.fds.api.xiaomi.com/aibench/third_party/mace-1.0.4.zip 7 | mace-1.0.4.zip_md5_checksum: 77c8ac455e1424ec1cd740f3f76ac427 8 | 9 | # inputs: 10 | imagenet_less.zip: https://cnbj1-fds.api.xiaomi.net/aibench/inputs/imagenet_less.zip 11 | imagenet_less.zip_md5_checksum: e0f7a6b64ed07a0ed0402d61b79cebe2 12 | -------------------------------------------------------------------------------- /tools/google-format.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | clang-format-3.9 \ 4 | -style="{BasedOnStyle: google, \ 5 | DerivePointerAlignment: false, \ 6 | PointerAlignment: Right, \ 7 | BinPackParameters: false}" -i $1 8 | --------------------------------------------------------------------------------