├── .clang-format ├── .github └── workflows │ └── ci.yml ├── Android.bp ├── BUILD.gn ├── BasePreparedModel.cpp ├── BasePreparedModel.h ├── Driver.cpp ├── Driver.h ├── IENetwork.cpp ├── IENetwork.h ├── ModelManager.cpp ├── ModelManager.h ├── README.md ├── ci ├── build-test.sh ├── cts-vts.py ├── intel-nnhal-dev │ └── intel-nnhal-dev-9999.ebuild └── intel-openvino-dev │ ├── files │ └── 0001-Compilation-changes-for-CrOS-ov-master.patch │ └── intel-openvino-dev-9999.ebuild ├── config ├── android.hardware.neuralnetworks@1.0-generic.rc ├── android.hardware.neuralnetworks@1.1-generic-cpu.rc ├── android.hardware.neuralnetworks@1.1-generic-gpu.rc ├── android.hardware.neuralnetworks@1.2-generic-cpu.rc └── android.hardware.neuralnetworks@1.3-generic-cpu.rc ├── cpu ├── CpuPreparedModel.cpp └── CpuPreparedModel.h ├── gna ├── GnaPreparedModel.cpp └── GnaPreparedModel.h ├── ngraph_creator ├── Android.bp ├── include │ ├── NgraphNetworkCreator.hpp │ ├── NgraphNodes.hpp │ └── OperationsFactory.hpp ├── operations │ ├── include │ │ ├── Abs.hpp │ │ ├── Add.hpp │ │ ├── Argmax.hpp │ │ ├── Argmin.hpp │ │ ├── AveragePool2D.hpp │ │ ├── BatchToSpace.hpp │ │ ├── BidirectionalSequenceRNN.hpp │ │ ├── Cast.hpp │ │ ├── ChannelShuffle.hpp │ │ ├── Concat.hpp │ │ ├── Conv2d.hpp │ │ ├── DepthToSpace.hpp │ │ ├── DepthwiseConv2d.hpp │ │ ├── Dequantize.hpp │ │ ├── Div.hpp │ │ ├── EmbeddingLookup.hpp │ │ ├── Equal.hpp │ │ ├── Exp.hpp │ │ ├── ExpandDims.hpp │ │ ├── Floor.hpp │ │ ├── FullyConnected.hpp │ │ ├── Gather.hpp │ │ ├── Greater.hpp │ │ ├── GreaterEqual.hpp │ │ ├── GroupedConv2d.hpp │ │ ├── HardSwish.hpp │ │ ├── InstanceNormalization.hpp │ │ ├── L2Normalization.hpp │ │ ├── L2Pooling2D.hpp │ │ ├── LSTM.hpp │ │ ├── Less.hpp │ │ ├── LessEqual.hpp │ │ ├── Log.hpp │ │ ├── LogSoftmax.hpp │ │ ├── LogicalAnd.hpp │ │ ├── LogicalNot.hpp │ │ ├── LogicalOr.hpp │ │ ├── Logistic.hpp │ │ ├── MaxPool2d.hpp │ │ ├── Maximum.hpp │ │ ├── Mean.hpp │ │ ├── Minimum.hpp │ │ ├── Mul.hpp │ │ ├── Neg.hpp │ │ ├── NgraphHelper.hpp │ │ ├── NotEqual.hpp │ │ ├── OperationsBase.hpp │ │ ├── PRelu.hpp │ │ ├── Pad.hpp │ │ ├── PadV2.hpp │ │ ├── Pow.hpp │ │ ├── Quantize.hpp │ │ ├── RNN.hpp │ │ ├── ROIAlign.hpp │ │ ├── ROIPooling.hpp │ │ ├── RSQRT.hpp │ │ ├── ReduceAll.hpp │ │ ├── ReduceAny.hpp │ │ ├── ReduceMax.hpp │ │ ├── ReduceMin.hpp │ │ ├── ReduceProd.hpp │ │ ├── ReduceSum.hpp │ │ ├── Relu.hpp │ │ ├── Relu1.hpp │ │ ├── Relu6.hpp │ │ ├── Reshape.hpp │ │ ├── ResizeBilinear.hpp │ │ ├── ResizeNearestNeighbor.hpp │ │ ├── SQRT.hpp │ │ ├── Select.hpp │ │ ├── Sin.hpp │ │ ├── Softmax.hpp │ │ ├── SpaceToBatch.hpp │ │ ├── SpaceToDepth.hpp │ │ ├── Split.hpp │ │ ├── Squeeze.hpp │ │ ├── StridedSlice.hpp │ │ ├── Sub.hpp │ │ ├── Tanh.hpp │ │ ├── TopkV2.hpp │ │ ├── Transpose.hpp │ │ ├── TransposeConv2D.hpp │ │ └── UnidirectionalSequenceRNN.hpp │ └── src │ │ ├── Abs.cpp │ │ ├── Add.cpp │ │ ├── Argmax.cpp │ │ ├── Argmin.cpp │ │ ├── AveragePool2D.cpp │ │ ├── BatchToSpace.cpp │ │ ├── BidirectionalSequenceRNN.cpp │ │ ├── Cast.cpp │ │ ├── ChannelShuffle.cpp │ │ ├── Concat.cpp │ │ ├── Conv2d.cpp │ │ ├── DepthToSpace.cpp │ │ ├── DepthwiseConv2d.cpp │ │ ├── Dequantize.cpp │ │ ├── Div.cpp │ │ ├── EmbeddingLookup.cpp │ │ ├── Equal.cpp │ │ ├── Exp.cpp │ │ ├── ExpandDims.cpp │ │ ├── Floor.cpp │ │ ├── FullyConnected.cpp │ │ ├── Gather.cpp │ │ ├── Greater.cpp │ │ ├── GreaterEqual.cpp │ │ ├── GroupedConv2d.cpp │ │ ├── HardSwish.cpp │ │ ├── InstanceNormalization.cpp │ │ ├── L2Normalization.cpp │ │ ├── L2Pooling2D.cpp │ │ ├── LSTM.cpp │ │ ├── Less.cpp │ │ ├── LessEqual.cpp │ │ ├── Log.cpp │ │ ├── LogSoftmax.cpp │ │ ├── LogicalAnd.cpp │ │ ├── LogicalNot.cpp │ │ ├── LogicalOr.cpp │ │ ├── Logistic.cpp │ │ ├── MaxPool2d.cpp │ │ ├── Maximum.cpp │ │ ├── Mean.cpp │ │ ├── Minimum.cpp │ │ ├── Mul.cpp │ │ ├── Neg.cpp │ │ ├── NotEqual.cpp │ │ ├── OperationsBase.cpp │ │ ├── PRelu.cpp │ │ ├── Pad.cpp │ │ ├── PadV2.cpp │ │ ├── Pow.cpp │ │ ├── Quantize.cpp │ │ ├── RNN.cpp │ │ ├── ROIAlign.cpp │ │ ├── ROIPooling.cpp │ │ ├── RSQRT.cpp │ │ ├── ReduceAll.cpp │ │ ├── ReduceAny.cpp │ │ ├── ReduceMax.cpp │ │ ├── ReduceMin.cpp │ │ ├── ReduceProd.cpp │ │ ├── ReduceSum.cpp │ │ ├── Relu.cpp │ │ ├── Relu1.cpp │ │ ├── Relu6.cpp │ │ ├── Reshape.cpp │ │ ├── ResizeBilinear.cpp │ │ ├── ResizeNearestNeighbor.cpp │ │ ├── SQRT.cpp │ │ ├── Select.cpp │ │ ├── Sin.cpp │ │ ├── Softmax.cpp │ │ ├── SpaceToBatch.cpp │ │ ├── SpaceToDepth.cpp │ │ ├── Split.cpp │ │ ├── Squeeze.cpp │ │ ├── StridedSlice.cpp │ │ ├── Sub.cpp │ │ ├── Tanh.cpp │ │ ├── TopkV2.cpp │ │ ├── Transpose.cpp │ │ ├── TransposeConv2D.cpp │ │ └── UnidirectionalSequenceRNN.cpp └── src │ ├── NgraphNetworkCreator.cpp │ ├── NgraphNodes.cpp │ └── OperationsFactory.cpp ├── service.cpp ├── utils.cpp └── utils.h /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | # BasedOnStyle: Google 4 | AccessModifierOffset: -4 5 | AlignAfterOpenBracket: Align 6 | AlignConsecutiveAssignments: false 7 | AlignConsecutiveDeclarations: false 8 | AlignEscapedNewlines: Left 9 | AlignOperands: true 10 | AlignTrailingComments: true 11 | AllowAllParametersOfDeclarationOnNextLine: true 12 | AllowShortBlocksOnASingleLine: false 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: All 15 | AllowShortIfStatementsOnASingleLine: true 16 | AllowShortLoopsOnASingleLine: true 17 | AlwaysBreakAfterDefinitionReturnType: None 18 | AlwaysBreakAfterReturnType: None 19 | AlwaysBreakBeforeMultilineStrings: true 20 | AlwaysBreakTemplateDeclarations: Yes 21 | BinPackArguments: true 22 | BinPackParameters: true 23 | BraceWrapping: 24 | AfterClass: false 25 | AfterControlStatement: false 26 | AfterEnum: false 27 | AfterFunction: false 28 | AfterNamespace: false 29 | AfterObjCDeclaration: false 30 | AfterStruct: false 31 | AfterUnion: false 32 | AfterExternBlock: false 33 | BeforeCatch: false 34 | BeforeElse: false 35 | IndentBraces: false 36 | SplitEmptyFunction: true 37 | SplitEmptyRecord: true 38 | SplitEmptyNamespace: true 39 | BreakBeforeBinaryOperators: None 40 | BreakBeforeBraces: Attach 41 | BreakBeforeInheritanceComma: false 42 | BreakInheritanceList: BeforeColon 43 | BreakBeforeTernaryOperators: true 44 | BreakConstructorInitializersBeforeComma: false 45 | BreakConstructorInitializers: BeforeColon 46 | BreakAfterJavaFieldAnnotations: false 47 | BreakStringLiterals: true 48 | ColumnLimit: 100 49 | CommentPragmas: '^ IWYU pragma:' 50 | CompactNamespaces: false 51 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 52 | ConstructorInitializerIndentWidth: 4 53 | ContinuationIndentWidth: 4 54 | Cpp11BracedListStyle: true 55 | DerivePointerAlignment: true 56 | DisableFormat: false 57 | ExperimentalAutoDetectBinPacking: false 58 | FixNamespaceComments: true 59 | ForEachMacros: 60 | - foreach 61 | - Q_FOREACH 62 | - BOOST_FOREACH 63 | IncludeBlocks: Preserve 64 | IncludeCategories: 65 | - Regex: '^' 66 | Priority: 2 67 | - Regex: '^<.*\.h>' 68 | Priority: 1 69 | - Regex: '^<.*' 70 | Priority: 2 71 | - Regex: '.*' 72 | Priority: 3 73 | IncludeIsMainRegex: '([-_](test|unittest))?$' 74 | IndentCaseLabels: true 75 | IndentPPDirectives: None 76 | IndentWidth: 4 77 | IndentWrappedFunctionNames: false 78 | JavaScriptQuotes: Leave 79 | JavaScriptWrapImports: true 80 | KeepEmptyLinesAtTheStartOfBlocks: false 81 | MacroBlockBegin: '' 82 | MacroBlockEnd: '' 83 | MaxEmptyLinesToKeep: 1 84 | NamespaceIndentation: None 85 | ObjCBinPackProtocolList: Never 86 | ObjCBlockIndentWidth: 4 87 | ObjCSpaceAfterProperty: false 88 | ObjCSpaceBeforeProtocolList: true 89 | PenaltyBreakAssignment: 2 90 | PenaltyBreakBeforeFirstCallParameter: 1 91 | PenaltyBreakComment: 300 92 | PenaltyBreakFirstLessLess: 120 93 | PenaltyBreakString: 1000 94 | PenaltyBreakTemplateDeclaration: 10 95 | PenaltyExcessCharacter: 1000000 96 | PenaltyReturnTypeOnItsOwnLine: 200 97 | PointerAlignment: Left 98 | RawStringFormats: 99 | - Language: Cpp 100 | Delimiters: 101 | - cc 102 | - CC 103 | - cpp 104 | - Cpp 105 | - CPP 106 | - 'c++' 107 | - 'C++' 108 | CanonicalDelimiter: '' 109 | BasedOnStyle: google 110 | - Language: TextProto 111 | Delimiters: 112 | - pb 113 | - PB 114 | - proto 115 | - PROTO 116 | EnclosingFunctions: 117 | - EqualsProto 118 | - EquivToProto 119 | - PARSE_PARTIAL_TEXT_PROTO 120 | - PARSE_TEST_PROTO 121 | - PARSE_TEXT_PROTO 122 | - ParseTextOrDie 123 | - ParseTextProtoOrDie 124 | CanonicalDelimiter: '' 125 | BasedOnStyle: google 126 | ReflowComments: true 127 | SortIncludes: true 128 | SortUsingDeclarations: true 129 | SpaceAfterCStyleCast: false 130 | SpaceAfterTemplateKeyword: true 131 | SpaceBeforeAssignmentOperators: true 132 | SpaceBeforeCpp11BracedList: false 133 | SpaceBeforeCtorInitializerColon: true 134 | SpaceBeforeInheritanceColon: true 135 | SpaceBeforeParens: ControlStatements 136 | SpaceBeforeRangeBasedForLoopColon: true 137 | SpaceInEmptyParentheses: false 138 | SpacesBeforeTrailingComments: 2 139 | SpacesInAngles: false 140 | SpacesInContainerLiterals: true 141 | SpacesInCStyleCastParentheses: false 142 | SpacesInParentheses: false 143 | SpacesInSquareBrackets: false 144 | Standard: Auto 145 | StatementMacros: 146 | - Q_UNUSED 147 | - QT_REQUIRE_VERSION 148 | TabWidth: 8 149 | UseTab: Never 150 | ... 151 | 152 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [push, pull_request] 3 | jobs: 4 | check-style: 5 | runs-on: ubuntu-latest 6 | #environment: development 7 | steps: 8 | - uses: actions/checkout@v2 9 | - name: Run clang-format style check. 10 | uses: jidicula/clang-format-action@v4.4.1 11 | with: 12 | clang-format-version: '13' 13 | check-path: 'nn-hal' 14 | fetch-code: 15 | needs: check-style 16 | runs-on: self-hosted 17 | env: 18 | ROOT_DIR: /srv/workspace 19 | REPO_NAME: intel-nnhal-dev 20 | steps: 21 | - name: Clone repo. 22 | run: | 23 | echo ${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }} 24 | cd ${ROOT_DIR}/src/third_party/ 25 | rm -rf ${REPO_NAME} 26 | git clone ${{ github.server_url }}/${{ github.repository }} ${REPO_NAME} 27 | cd ${REPO_NAME} 28 | - name: Fetch push code. 29 | if: github.event_name == 'push' 30 | run: | 31 | cd ${ROOT_DIR}/src/third_party/${REPO_NAME}/ 32 | git checkout ${{ github.sha }} 33 | cp -r ci/* ${ROOT_DIR}/src/ 34 | - name: Fetch pull request code. 35 | if: github.event_name == 'pull_request' 36 | run: | 37 | cd ${ROOT_DIR}/src/third_party/${REPO_NAME}/ 38 | git fetch origin pull/${{ github.event.number }}/head:${{ github.head_ref }} 39 | git checkout ${{ github.head_ref }} 40 | cp -r ci/* ${ROOT_DIR}/src/ 41 | build-package: 42 | needs: fetch-code 43 | runs-on: self-hosted 44 | env: 45 | ROOT_DIR: /srv/workspace 46 | steps: 47 | - name: Build and deploy nn-hal. 48 | run: | 49 | cd ${ROOT_DIR}/src/ 50 | sh build-test.sh "build" 51 | test-functional: 52 | needs: build-package 53 | runs-on: self-hosted 54 | env: 55 | ROOT_DIR: /srv/workspace 56 | steps: 57 | - name: Run functional tests for nn-hal. 58 | run: | 59 | cd ${ROOT_DIR}/src/ 60 | sh build-test.sh "functional" 61 | -------------------------------------------------------------------------------- /Android.bp: -------------------------------------------------------------------------------- 1 | //############################################################# 2 | cc_library_shared { 3 | 4 | name: "android.hardware.neuralnetworks@1.3-generic-impl", 5 | proprietary: true, 6 | owner: "intel", 7 | compile_multilib: "64", 8 | 9 | srcs: [ 10 | "Driver.cpp", 11 | "BasePreparedModel.cpp", 12 | "utils.cpp", 13 | "IENetwork.cpp", 14 | "ModelManager.cpp", 15 | "cpu/CpuPreparedModel.cpp", 16 | "gna/GnaPreparedModel.cpp" 17 | ], 18 | 19 | local_include_dirs: [ 20 | "ngraph_creator/include", 21 | "ngraph_creator/operations/include", 22 | "cpu", 23 | "gna" 24 | ], 25 | 26 | include_dirs: [ 27 | "frameworks/ml/nn/common/include", 28 | "frameworks/ml/nn/runtime/include", 29 | "frameworks/native/libs/nativewindow/include", 30 | "external/mesa3d/include/android_stub" 31 | ], 32 | 33 | header_libs: [ 34 | "libngraph_headers", 35 | "libinference_headers", 36 | "libMKLDNNPlugin_headers", 37 | "libpugixml_headers", 38 | "plugin_api_headers", 39 | ], 40 | 41 | cflags: [ 42 | "-fexceptions", 43 | "-std=c++11", 44 | "-fPIE", 45 | "-Wall", 46 | "-Wno-unused-variable", 47 | "-Wno-unused-parameter", 48 | "-Wno-non-virtual-dtor", 49 | "-Wno-missing-field-initializers", 50 | "-Wno-error", 51 | "-Wextra", 52 | "-Wno-extern-c-compat", 53 | "-Wno-sign-compare", 54 | "-Wno-unused-local-typedef", 55 | "-Wno-unused-private-field", 56 | "-Wno-invalid-partial-specialization", 57 | "-Wno-array-bounds", 58 | "-D_FORTIFY_SOURCE=2", 59 | "-fvisibility=default", 60 | "-fwrapv", 61 | "-fstack-protector-all", 62 | "-Wno-conversion-null", 63 | "-Wnull-dereference", 64 | "-Warray-bounds", 65 | "-O2", 66 | "-fPIC", 67 | ] + [ 68 | "-D__ANDROID__", 69 | "-DANDROID", 70 | "-DIE_LEGACY", 71 | ], 72 | 73 | strip: { 74 | none: true, 75 | }, 76 | 77 | shared_libs: [ 78 | "android.hardware.neuralnetworks@1.0", 79 | "android.hardware.neuralnetworks@1.1", 80 | "android.hardware.neuralnetworks@1.2", 81 | "android.hardware.neuralnetworks@1.3", 82 | "android.hidl.allocator@1.0", 83 | "android.hidl.memory@1.0", 84 | "libbase", 85 | "libcutils", 86 | "libdl", 87 | "libfmq", 88 | "libhardware", 89 | "libhidlbase", 90 | "libhidlmemory", 91 | "liblog", 92 | "libnativewindow", 93 | "libutils", 94 | "libinference_engine", 95 | "libngraph", 96 | "libMKLDNNPlugin" 97 | ], 98 | 99 | static_libs: [ 100 | "libpugixml", 101 | "libneuralnetworks_common", 102 | "libngraph_creator", 103 | ], 104 | 105 | defaults: [ 106 | "neuralnetworks_defaults" 107 | ] 108 | 109 | } 110 | 111 | //############################################################## 112 | cc_binary { 113 | name: "android.hardware.neuralnetworks@1.3-generic-service", 114 | init_rc: [ 115 | "config/android.hardware.neuralnetworks@1.3-generic-cpu.rc", 116 | ], 117 | relative_install_path: "hw", 118 | proprietary: true, 119 | owner: "intel", 120 | srcs: ["service.cpp"], 121 | 122 | include_dirs: [ 123 | "frameworks/ml/nn/common/include", 124 | "frameworks/ml/nn/runtime/include", 125 | "frameworks/native/libs/nativewindow/include", 126 | "external/mesa3d/include/android_stub" 127 | ], 128 | 129 | cflags: [ 130 | "-fexceptions", 131 | "-fPIE", 132 | "-std=c++11", 133 | "-Wno-error=deprecated-declarations", 134 | "-fvisibility=default", 135 | "-fPIC", 136 | ], 137 | 138 | shared_libs: [ 139 | "libhidlbase", 140 | "libhidltransport", 141 | "libhidlmemory", 142 | "libutils", 143 | "liblog", 144 | "libcutils", 145 | "libhardware", 146 | "libnativewindow", 147 | "android.hardware.neuralnetworks@1.3", 148 | "android.hardware.neuralnetworks@1.3-generic-impl", 149 | "android.hidl.allocator@1.0", 150 | "android.hidl.memory@1.0", 151 | ], 152 | 153 | defaults: [ 154 | "neuralnetworks_defaults" 155 | ], 156 | 157 | compile_multilib: "64", 158 | } -------------------------------------------------------------------------------- /IENetwork.cpp: -------------------------------------------------------------------------------- 1 | #include "IENetwork.h" 2 | #include "ie_common.h" 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #undef LOG_TAG 10 | #define LOG_TAG "IENetwork" 11 | 12 | namespace android { 13 | namespace hardware { 14 | namespace neuralnetworks { 15 | namespace nnhal { 16 | 17 | bool IENetwork::loadNetwork() { 18 | ALOGD("%s", __func__); 19 | 20 | #if __ANDROID__ 21 | InferenceEngine::Core ie(std::string("/vendor/etc/openvino/plugins.xml")); 22 | #else 23 | InferenceEngine::Core ie(std::string("/usr/local/lib64/plugins.xml")); 24 | #endif 25 | std::map config; 26 | 27 | if (mNetwork) { 28 | mExecutableNw = ie.LoadNetwork(*mNetwork, "CPU"); 29 | ALOGD("LoadNetwork is done...."); 30 | mInferRequest = mExecutableNw.CreateInferRequest(); 31 | ALOGD("CreateInfereRequest is done...."); 32 | 33 | mInputInfo = mNetwork->getInputsInfo(); 34 | mOutputInfo = mNetwork->getOutputsInfo(); 35 | } else { 36 | ALOGE("Invalid Network pointer"); 37 | return false; 38 | } 39 | 40 | return true; 41 | } 42 | 43 | // Need to be called before loadnetwork.. But not sure whether need to be called for 44 | // all the inputs in case multiple input / output 45 | void IENetwork::prepareInput(InferenceEngine::Precision precision, InferenceEngine::Layout layout) { 46 | ALOGE("%s", __func__); 47 | 48 | auto inputInfoItem = *mInputInfo.begin(); 49 | inputInfoItem.second->setPrecision(precision); 50 | inputInfoItem.second->setLayout(layout); 51 | } 52 | 53 | void IENetwork::prepareOutput(InferenceEngine::Precision precision, 54 | InferenceEngine::Layout layout) { 55 | InferenceEngine::DataPtr& output = mOutputInfo.begin()->second; 56 | output->setPrecision(precision); 57 | output->setLayout(layout); 58 | } 59 | 60 | void IENetwork::setBlob(const std::string& inName, const InferenceEngine::Blob::Ptr& inputBlob) { 61 | ALOGI("setBlob input or output blob name : %s", inName.c_str()); 62 | mInferRequest.SetBlob(inName, inputBlob); 63 | } 64 | 65 | InferenceEngine::TBlob::Ptr IENetwork::getBlob(const std::string& outName) { 66 | InferenceEngine::Blob::Ptr outputBlob; 67 | outputBlob = mInferRequest.GetBlob(outName); 68 | return android::hardware::neuralnetworks::nnhal::As>(outputBlob); 69 | } 70 | 71 | void IENetwork::infer() { 72 | ALOGI("Infer Network\n"); 73 | mInferRequest.StartAsync(); 74 | mInferRequest.Wait(10000); 75 | ALOGI("infer request completed"); 76 | } 77 | 78 | } // namespace nnhal 79 | } // namespace neuralnetworks 80 | } // namespace hardware 81 | } // namespace android 82 | -------------------------------------------------------------------------------- /IENetwork.h: -------------------------------------------------------------------------------- 1 | #ifndef __DEVICE_PLUGIN_H 2 | #define __DEVICE_PLUGIN_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "utils.h" 12 | // #include "ie_blob.h" 13 | // #include "ie_common.h" 14 | // #include "ie_core.hpp" 15 | // #include "inference_engine.hpp" 16 | 17 | namespace android { 18 | namespace hardware { 19 | namespace neuralnetworks { 20 | namespace nnhal { 21 | 22 | class IIENetwork { 23 | public: 24 | virtual ~IIENetwork() {} 25 | virtual bool loadNetwork() = 0; 26 | virtual InferenceEngine::InferRequest getInferRequest() = 0; 27 | virtual void infer() = 0; 28 | virtual void queryState() = 0; 29 | virtual InferenceEngine::TBlob::Ptr getBlob(const std::string& outName) = 0; 30 | virtual void prepareInput(InferenceEngine::Precision precision, 31 | InferenceEngine::Layout layout) = 0; 32 | virtual void prepareOutput(InferenceEngine::Precision precision, 33 | InferenceEngine::Layout layout) = 0; 34 | virtual void setBlob(const std::string& inName, 35 | const InferenceEngine::Blob::Ptr& inputBlob) = 0; 36 | }; 37 | 38 | // Abstract this class for all accelerators 39 | class IENetwork : public IIENetwork { 40 | private: 41 | std::shared_ptr mNetwork; 42 | InferenceEngine::ExecutableNetwork mExecutableNw; 43 | InferenceEngine::InferRequest mInferRequest; 44 | InferenceEngine::InputsDataMap mInputInfo; 45 | InferenceEngine::OutputsDataMap mOutputInfo; 46 | 47 | public: 48 | IENetwork() : IENetwork(nullptr) {} 49 | IENetwork(std::shared_ptr network) : mNetwork(network) {} 50 | 51 | virtual bool loadNetwork(); 52 | void prepareInput(InferenceEngine::Precision precision, InferenceEngine::Layout layout); 53 | void prepareOutput(InferenceEngine::Precision precision, InferenceEngine::Layout layout); 54 | void setBlob(const std::string& inName, const InferenceEngine::Blob::Ptr& inputBlob); 55 | InferenceEngine::TBlob::Ptr getBlob(const std::string& outName); 56 | InferenceEngine::InferRequest getInferRequest() { return mInferRequest; } 57 | void queryState() {} 58 | void infer(); 59 | }; 60 | 61 | } // namespace nnhal 62 | } // namespace neuralnetworks 63 | } // namespace hardware 64 | } // namespace android 65 | #endif -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DISCONTINUATION OF PROJECT 2 | This project will no longer be maintained by Intel. 3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 4 | Intel no longer accepts patches to this project. 5 | If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project. 6 | 7 | ![CI](https://github.com/reaganlo/nn-hal/actions/workflows/ci.yml/badge.svg) 8 | 9 | # Android Neural Networks HAL with OpenVINO supporting hardware accelerators such as / 10 | Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) 11 | 12 | ## Introduction 13 | The Android Neural Network Hardware Abstraction Layer(NN HAL) provides the hardware accelration 14 | for Android Neural Networks (NN) API. Intel NN-HAL takes the advantage of the Intel MKLD-DNN, 15 | enables high performance and low power implementation of Neural Networks API. 16 | Intel MKL-DNN https://github.com/intel/mkl-dnn & https://01.org/mkl-dnn 17 | Android NN API is on [Neural Networks API] 18 | (https://developer.android.com/ndk/guides/neuralnetworks/index.html). 19 | OpenVINO deep learning framework https://github.com/opencv/dldt & https://01.org/openvinotoolkit 20 | 21 | 22 | ## Supported Operations 23 | Following operations are currently supported by Android Neural Networks HAL for Intel MKL-DNN. 24 | 25 | * ANEURALNETWORKS_CONV_2D 26 | * ANEURALNETWORKS_ADD 27 | 28 | ## Known issues 29 | Support for Multiple Tensor inputs at runtime to model/network is ongoing 30 | 31 | ## License 32 | Android Neural Networks HAL is distributed under the Apache License, Version 2.0 33 | You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 34 | Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) is an open source 35 | performance library for Deep Learning (DL) applications intended for acceleration of DL 36 | frameworks on Intel® architecture. 37 | 38 | 39 | ## How to provide feedback 40 | By default, please submit an issue using native github.com interface: 41 | https://github.com/intel/nn-hal/issues 42 | 43 | ## How to contribute 44 | 45 | Create a pull request on github.com with your patch. Make sure your change is cleanly building 46 | and passing ULTs. 47 | 48 | A maintainer will contact you if there are questions or concerns. 49 | 50 | ## Continuous Integration 51 | Before committing any changes, make sure the coding style and testing configs are correct. 52 | If not, the CI will fail. 53 | 54 | ### Coding Style 55 | 56 | Run the following command to ensure that the proper coding style is being followed: 57 | ``` 58 | find . -regex '.*\.\(cpp\|hpp\|cc\|cxx\|h\)' -exec clang-format -style=file -i {} \; 59 | ``` 60 | 61 | ### Build and Test 62 | 63 | Update the BOARD value in [build-test.sh](ci/build-test.sh) as per your test requirement. 64 | If your BOARD is not supported, please contact the maintainer to get it added. 65 | 66 | Currently, the CI builds the intel-nnhal package and runs the following tests: 67 | - Functional tests that include ml_cmdline and a subset of cts and vts tests. 68 | -------------------------------------------------------------------------------- /ci/build-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Update the BOARD based on the testing requirement. ## 4 | ## Currently the following boards are supported: ## 5 | ## - volteer ## 6 | BOARD=volteer 7 | ACTION=$1 8 | 9 | red=`tput setaf 1` 10 | green=`tput setaf 2` 11 | blue=`tput setaf 4` 12 | reset=`tput sgr0` 13 | 14 | # Function to run command. 15 | runCmd() { 16 | echo "${blue}RUN: \"${cmd}\"${reset}" 17 | ${cmd} 18 | status=$? 19 | if [ ${status} -eq 0 ]; then 20 | echo "${green}SUCCESS: \"${cmd}\"${reset}" 21 | else 22 | echo "${red}FAIL: \"${cmd}\"${reset}" 23 | exit 1 24 | fi 25 | } 26 | 27 | # Function to return correct status for ml_cmdline based on output log. 28 | # This function is required because, by default, ml_cmdline 29 | # always returns status 0 irrespective of whether it failed or not. 30 | mlCmdline() { 31 | sub_cmd="ssh root@${IPADDRESS} ml_cmdline --nnapi" 32 | if ! ${sub_cmd} | grep "Status: OK" ; then 33 | return 1 34 | else 35 | return 0 36 | fi 37 | } 38 | 39 | # Function to get DUT IP address from config file based on BOARD. 40 | getBoardAddr() { 41 | IPADDRESS=$(awk -v key=${BOARD} -F "=" 'BEGIN{/key/} {print $2}' boards.ini | tr -d ' ' | sed -r '/^\s*$/d') 42 | echo ${IPADDRESS} 43 | if [ -z "${IPADDRESS}" ]; then 44 | echo "${red}ERROR: Unsupported BOARD=${BOARD}.${reset}" 45 | exit 1 46 | fi 47 | } 48 | 49 | getBoardAddr 50 | 51 | echo ${ACTION} 52 | if [ "${ACTION}" = "build" ]; then 53 | 54 | cmd="cros_sdk --enter" 55 | runCmd 56 | 57 | cmd="cros_sdk -- cros_workon-${BOARD} start intel-nnhal" 58 | runCmd 59 | 60 | cmd="cros_sdk USE=\"vendor-nnhal\" -- emerge-${BOARD} intel-nnhal" 61 | runCmd 62 | 63 | cmd="cros_sdk -- cros_workon_make --board=${BOARD} --install intel-nnhal" 64 | runCmd 65 | 66 | cmd="cros_sdk -- cros deploy ssh://${IPADDRESS} intel-nnhal" 67 | runCmd 68 | 69 | elif [ "${ACTION}" = "functional" ]; then 70 | 71 | # Run nnapi ml test 72 | cmd=mlCmdline 73 | runCmd 74 | 75 | # Run required cts tests 76 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && cros_nnapi_cts --gtest_filter=-Validation*:TestGenerated*:TestRandom*:Generated*:UnknownCombinations*" 77 | runCmd 78 | 79 | # Run subset of nnapi vts 1_0 tests 80 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && cros_nnapi_vts_1_0 --gtest_filter=-Validation*:TestGenerated*:TestRandom*:Generated*:UnknownCombinations*" 81 | runCmd 82 | 83 | # Run subset of nnapi vts 1_1 tests 84 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && cros_nnapi_vts_1_1 --gtest_filter=-Validation*:TestGenerated*:TestRandom*:Generated*:UnknownCombinations*" 85 | runCmd 86 | 87 | # Run subset of nnapi vts 1_2 tests 88 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && cros_nnapi_vts_1_2 --gtest_filter=-Validation*:TestGenerated*:TestRandom*:Generated*:UnknownCombinations*" 89 | runCmd 90 | 91 | # Run subset of nnapi vts 1_3 tests 92 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && cros_nnapi_vts_1_3 --gtest_filter=-Validation*:TestGenerated*:TestRandom*:Generated*:UnknownCombinations*" 93 | runCmd 94 | 95 | elif [ "${ACTION}" = "regression" ]; then 96 | # Copy test script to DUT 97 | scp cts-vts.py root@${IPADDRESS}:~/ 98 | 99 | # Run nnapi cts tests 100 | cmd="ssh root@${IPADDRESS} export ANDROID_LOG_TAGS=\"*:f\" && python cts-vts.py --cts" 101 | runCmd 102 | scp root@${IPADDRESS}:~/cts_*.csv . # Copy test result to host server 103 | ssh root@${IPADDRESS} rm -f cts_*.csv # Delete test result to save space in DUT 104 | 105 | # Run nnapi vts_1_0 tests 106 | cmd="ssh root@${IPADDRESS} python cts-vts.py --vts10" 107 | runCmd 108 | scp root@${IPADDRESS}:~/vts10_*.csv . # Copy test result to host server 109 | ssh root@${IPADDRESS} rm -f vts10_*.csv # Delete test result to save space in DUT 110 | 111 | # Run nnapi vts_1_1 tests 112 | cmd="ssh root@${IPADDRESS} python cts-vts.py --vts11" 113 | runCmd 114 | scp root@${IPADDRESS}:~/vts11_*.csv . # Copy test result to host server 115 | ssh root@${IPADDRESS} rm -f vts11_*.csv # Delete test result to save space in DUT 116 | 117 | # Run nnapi vts_1_2 tests 118 | cmd="ssh root@${IPADDRESS} python cts-vts.py --vts12" 119 | runCmd 120 | scp root@${IPADDRESS}:~/vts12_*.csv . # Copy test result to host server 121 | ssh root@${IPADDRESS} rm -f vts12_*.csv # Delete test result to save space in DUT 122 | 123 | # Run nnapi vts_1_3 tests 124 | cmd="ssh root@${IPADDRESS} python cts-vts.py --vts13" 125 | runCmd 126 | scp root@${IPADDRESS}:~/vts13_*.csv . # Copy test result to host server 127 | ssh root@${IPADDRESS} rm -f vts13_*.csv # Delete test result to save space in DUT 128 | fi 129 | -------------------------------------------------------------------------------- /ci/intel-nnhal-dev/intel-nnhal-dev-9999.ebuild: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The Chromium OS Authors. All rights reserved. 2 | # Distributed under the terms of the GNU General Public License v2 3 | 4 | EAPI=7 5 | 6 | CROS_WORKON_PROJECT=("chromiumos/platform2" "third_party/intel-nnhal-dev" "third_party/intel-openvino-dev") 7 | CROS_WORKON_LOCALNAME=("platform2" "third_party/intel-nnhal-dev" "third_party/intel-openvino-dev") 8 | CROS_WORKON_DESTDIR=("${S}/platform2" "${S}/platform2/intel-nnhal-dev" "${S}/platform2/intel-openvino-dev") 9 | CROS_WORKON_SUBTREE=("common-mk intel-nnhal-dev .gn" "" "") 10 | 11 | PLATFORM_SUBDIR="intel-nnhal-dev" 12 | 13 | inherit cros-debug cros-workon platform 14 | 15 | DESCRIPTION="Intel NNAPI HAL" 16 | HOMEPAGE="https://github.com/intel/nn-hal" 17 | 18 | LICENSE="BSD-Google" 19 | KEYWORDS="*" 20 | SLOT="0/0" 21 | 22 | RDEPEND=" 23 | chromeos-base/aosp-frameworks-ml-nn 24 | chromeos-base/intel-openvino-dev 25 | " 26 | 27 | DEPEND=" 28 | >=dev-libs/openssl-1.0.1:0 29 | ${RDEPEND} 30 | " 31 | RESTRICT="strip" 32 | 33 | src_prepare() { 34 | append-cxxflags "-g -O2 -ggdb" 35 | 36 | cros_enable_cxx_exceptions 37 | eapply_user 38 | } 39 | 40 | src_configure() { 41 | if use x86 || use amd64; then 42 | append-cppflags "-D_Float16=__fp16" 43 | append-cxxflags "-Xclang -fnative-half-type" 44 | append-cxxflags "-Xclang -fallow-half-arguments-and-returns" 45 | fi 46 | platform_src_configure 47 | } 48 | 49 | src_install() { 50 | dolib.so "${OUT}/lib/libvendor-nn-hal.so" 51 | dolib.so "${OUT}/lib/libintel_nnhal.so" 52 | #dostrip -x "${OUT}/lib/libintel_nnhal.so" 53 | } 54 | -------------------------------------------------------------------------------- /ci/intel-openvino-dev/intel-openvino-dev-9999.ebuild: -------------------------------------------------------------------------------- 1 | # Copyright 1999-2018 Gentoo Authors 2 | # Distributed under the terms of the GNU General Public License v2 3 | 4 | EAPI=7 5 | 6 | inherit cmake-utils git-r3 flag-o-matic cros-workon 7 | 8 | DESCRIPTION="Intel OpenVino Toolkit" 9 | HOMEPAGE="https://github.com/openvinotoolkit/openvino" 10 | 11 | CMAKE_BUILD_TYPE="Debug" 12 | LICENSE="BSD-Google" 13 | KEYWORDS="-* amd64" 14 | IUSE="+clang" 15 | SLOT="0" 16 | 17 | CROS_WORKON_PROJECT="third_party/intel-openvino-dev" 18 | CROS_WORKON_LOCALNAME="third_party/intel-openvino-dev" 19 | 20 | RDEPEND=" 21 | dev-libs/protobuf 22 | media-libs/opencv 23 | " 24 | 25 | DEPEND=" 26 | ${RDEPEND} 27 | " 28 | src_preapre() { 29 | eapply_user 30 | cmake-utils_src_prepare 31 | } 32 | 33 | src_configure() { 34 | cros_enable_cxx_exceptions 35 | append-flags "-Wno-error -frtti -msse4.2 -fvisibility=default -Wno-macro-redefined" 36 | CPPFLAGS="-I${S}/inference-engine/gna/include -I${S}/inference-engine/omp/include -I${S}/ngraph/src -I${S}/inference_engine/ngraph_ops ${CPPFLAGS}" 37 | 38 | local mycmakeargs=( 39 | -DCMAKE_INSTALL_PREFIX="/usr/local/" 40 | -DCMAKE_BUILD_TYPE=Debug 41 | -DENABLE_CLDNN=OFF 42 | -DENABLE_GNA=OFF 43 | -DENABLE_NGRAPH=ON 44 | -DENABLE_FUNCTIONAL_TESTS=OFF 45 | -DTHREADING=SEQ 46 | -DENABLE_MKL_DNN=ON 47 | -DTARGET_OS="CHROMEOS" 48 | -DENABLE_OPENCV=OFF 49 | -DENABLE_SAMPLES=ON 50 | -DENABLE_TESTS=OFF 51 | -DBUILD_SHARED_LIBS=ON 52 | -DENABLE_PROTOC=OFF 53 | -DNGRAPH_ONNX_IMPORT_ENABLE=OFF 54 | -DNGRAPH_TEST_UTIL_ENABLE=OFF 55 | -DENABLE_MYRIAD=OFF 56 | -DENABLE_VPU=ON 57 | -DENABLE_SPEECH_DEMO=OFF 58 | -DGFLAGS_INSTALL_HEADERS=OFF 59 | -DNGRAPH_ONNX_IMPORT_ENABLE=OFF 60 | -DNGRAPH_ONNX_FRONTEND_ENABLE=OFF 61 | -DNGRAPH_PDPD_FRONTEND_ENABLE=OFF 62 | ) 63 | cmake-utils_src_configure 64 | } 65 | 66 | src_install() { 67 | cmake-utils_src_install 68 | 69 | exeinto /usr/local/bin 70 | doexe ${S}/bin/intel64/Debug/hello_query_device 71 | } 72 | -------------------------------------------------------------------------------- /config/android.hardware.neuralnetworks@1.0-generic.rc: -------------------------------------------------------------------------------- 1 | service neuralnetworks-hal-1-0-cpu /vendor/bin/hw/android.hardware.neuralnetworks@1.0-generic-service -D CPU 2 | class hal 3 | user system 4 | group system 5 | 6 | 7 | -------------------------------------------------------------------------------- /config/android.hardware.neuralnetworks@1.1-generic-cpu.rc: -------------------------------------------------------------------------------- 1 | service neuralnetworks-hal-1-1-cpu /vendor/bin/hw/android.hardware.neuralnetworks@1.1-generic-service -D CPU 2 | class hal 3 | user system 4 | group system 5 | -------------------------------------------------------------------------------- /config/android.hardware.neuralnetworks@1.1-generic-gpu.rc: -------------------------------------------------------------------------------- 1 | service neuralnetworks-hal-1-1-gpu /vendor/bin/hw/android.hardware.neuralnetworks@1.1-generic-service -D GPU 2 | class hal 3 | user system 4 | group system 5 | -------------------------------------------------------------------------------- /config/android.hardware.neuralnetworks@1.2-generic-cpu.rc: -------------------------------------------------------------------------------- 1 | service neuralnetworks-hal-1-2-cpu /vendor/bin/hw/android.hardware.neuralnetworks@1.2-generic-service -D CPU 2 | class hal 3 | user system 4 | group system 5 | -------------------------------------------------------------------------------- /config/android.hardware.neuralnetworks@1.3-generic-cpu.rc: -------------------------------------------------------------------------------- 1 | service neuralnetworks-hal-1-3-cpu /vendor/bin/hw/android.hardware.neuralnetworks@1.3-generic-service -D CPU 2 | class hal 3 | user system 4 | group system 5 | -------------------------------------------------------------------------------- /cpu/CpuPreparedModel.cpp: -------------------------------------------------------------------------------- 1 | #include "CpuPreparedModel.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "ExecutionBurstServer.h" 8 | #include "ValidateHal.h" 9 | #include "utils.h" 10 | 11 | #undef LOG_TAG 12 | #define LOG_TAG "CpuPreparedModel" 13 | 14 | using namespace android::nn; 15 | 16 | namespace android { 17 | namespace hardware { 18 | namespace neuralnetworks { 19 | namespace nnhal { 20 | 21 | void CpuPreparedModel::deinitialize() { 22 | ALOGV("Entering %s", __func__); 23 | mModelInfo->unmapRuntimeMemPools(); 24 | 25 | ALOGV("Exiting %s", __func__); 26 | } 27 | 28 | bool CpuPreparedModel::initialize() { 29 | ALOGV("Entering %s", __func__); 30 | if (!mModelInfo->initRuntimeInfo()) { 31 | ALOGE("Failed to initialize Model runtime parameters!!"); 32 | return false; 33 | } 34 | mNgraphNetCreator = std::make_shared(mModelInfo, mTargetDevice); 35 | 36 | if (!mNgraphNetCreator->validateOperations()) return false; 37 | ALOGI("Generating IR Graph"); 38 | auto ngraph_function = mNgraphNetCreator->generateGraph(); 39 | if (ngraph_function == nullptr) { 40 | ALOGE("%s ngraph generation failed", __func__); 41 | return false; 42 | } 43 | try { 44 | cnnNetworkPtr = std::make_shared(ngraph_function); 45 | #if __ANDROID__ 46 | cnnNetworkPtr->serialize("/data/vendor/neuralnetworks/ngraph_ir.xml", 47 | "/data/vendor/neuralnetworks/ngraph_ir.bin"); 48 | #else 49 | cnnNetworkPtr->serialize("/tmp/ngraph_ir.xml", "/tmp/ngraph_ir.bin"); 50 | #endif 51 | mPlugin = std::make_shared(cnnNetworkPtr); 52 | mPlugin->loadNetwork(); 53 | } catch (const std::exception& ex) { 54 | ALOGE("%s Exception !!! %s", __func__, ex.what()); 55 | return false; 56 | } 57 | 58 | ALOGV("Exiting %s", __func__); 59 | return true; 60 | } 61 | 62 | Return CpuPreparedModel::configureExecutionBurst( 63 | const sp& callback, 64 | const MQDescriptorSync& requestChannel, 65 | const MQDescriptorSync& resultChannel, configureExecutionBurst_cb cb) { 66 | ALOGV("Entering %s", __func__); 67 | const sp burst = 68 | ExecutionBurstServer::create(callback, requestChannel, resultChannel, this); 69 | 70 | if (burst == nullptr) { 71 | cb(ErrorStatus::GENERAL_FAILURE, {}); 72 | ALOGI("%s GENERAL_FAILURE", __func__); 73 | } else { 74 | cb(ErrorStatus::NONE, burst); 75 | ALOGI("%s burst created", __func__); 76 | } 77 | return Void(); 78 | } 79 | 80 | #undef LOG_TAG 81 | 82 | } // namespace nnhal 83 | } // namespace neuralnetworks 84 | } // namespace hardware 85 | } // namespace android 86 | -------------------------------------------------------------------------------- /cpu/CpuPreparedModel.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 The Android Open Source Project 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #ifndef ANDROID_ML_NN_CPU_PREPAREDMODEL_H 18 | #define ANDROID_ML_NN_CPU_PREPAREDMODEL_H 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include "BasePreparedModel.h" 27 | 28 | using namespace InferenceEngine; 29 | 30 | namespace android { 31 | namespace hardware { 32 | namespace neuralnetworks { 33 | namespace nnhal { 34 | 35 | class CpuPreparedModel : public BasePreparedModel { 36 | public: 37 | CpuPreparedModel(const Model& model) : BasePreparedModel(IntelDeviceType::CPU, model) {} 38 | ~CpuPreparedModel() { deinitialize(); } 39 | 40 | bool initialize() override; 41 | Return configureExecutionBurst( 42 | const sp& callback, 43 | const MQDescriptorSync& requestChannel, 44 | const MQDescriptorSync& resultChannel, 45 | configureExecutionBurst_cb cb) override; 46 | 47 | protected: 48 | void deinitialize() override; 49 | }; 50 | 51 | } // namespace nnhal 52 | } // namespace neuralnetworks 53 | } // namespace hardware 54 | } // namespace android 55 | 56 | #endif // ANDROID_ML_NN_CPU_PREPAREDMODEL_H 57 | -------------------------------------------------------------------------------- /gna/GnaPreparedModel.cpp: -------------------------------------------------------------------------------- 1 | #define LOG_TAG "GnaPreparedModel" 2 | 3 | #include "GnaPreparedModel.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "ExecutionBurstServer.h" 10 | #include "ValidateHal.h" 11 | #include "utils.h" 12 | 13 | using namespace android::nn; 14 | 15 | namespace android { 16 | namespace hardware { 17 | namespace neuralnetworks { 18 | namespace nnhal { 19 | 20 | void GnaPreparedModel::deinitialize() { 21 | ALOGV("Entering %s", __func__); 22 | mModelInfo->unmapRuntimeMemPools(); 23 | 24 | ALOGV("Exiting %s", __func__); 25 | } 26 | 27 | bool GnaPreparedModel::initialize() { 28 | ALOGV("Entering %s", __func__); 29 | if (!mModelInfo->initRuntimeInfo()) { 30 | ALOGE("Failed to initialize Model runtime parameters!!"); 31 | return false; 32 | } 33 | mNgraphNetCreator = std::make_shared(mModelInfo, mTargetDevice); 34 | 35 | if (!mNgraphNetCreator->validateOperations()) return false; 36 | ALOGI("Generating IR Graph"); 37 | auto ngraph_function = mNgraphNetCreator->generateGraph(); 38 | if (ngraph_function == nullptr) { 39 | ALOGE("%s ngraph generation failed", __func__); 40 | return false; 41 | } 42 | auto ngraph_net = std::make_shared(ngraph_function); 43 | #if __ANDROID__ 44 | ngraph_net->serialize("/data/vendor/neuralnetworks/ngraph_ir.xml", 45 | "/data/vendor/neuralnetworks/ngraph_ir.bin"); 46 | #else 47 | ngraph_net->serialize("/tmp/ngraph_ir.xml", "/tmp/ngraph_ir.bin"); 48 | #endif 49 | mPlugin = std::make_shared(ngraph_net); 50 | mPlugin->loadNetwork(); 51 | 52 | ALOGV("Exiting %s", __func__); 53 | return true; 54 | } 55 | 56 | Return GnaPreparedModel::configureExecutionBurst( 57 | const sp& callback, 58 | const MQDescriptorSync& requestChannel, 59 | const MQDescriptorSync& resultChannel, configureExecutionBurst_cb cb) { 60 | ALOGV("Entering %s", __func__); 61 | const sp burst = 62 | ExecutionBurstServer::create(callback, requestChannel, resultChannel, this); 63 | 64 | if (burst == nullptr) { 65 | cb(ErrorStatus::GENERAL_FAILURE, {}); 66 | ALOGI("%s GENERAL_FAILURE", __func__); 67 | } else { 68 | cb(ErrorStatus::NONE, burst); 69 | ALOGI("%s burst created", __func__); 70 | } 71 | return Void(); 72 | } 73 | 74 | } // namespace nnhal 75 | } // namespace neuralnetworks 76 | } // namespace hardware 77 | } // namespace android 78 | -------------------------------------------------------------------------------- /gna/GnaPreparedModel.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 The Android Open Source Project 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #ifndef ANDROID_ML_NN_GNA_PREPAREDMODEL_H 18 | #define ANDROID_ML_NN_GNA_PREPAREDMODEL_H 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include "BasePreparedModel.h" 27 | 28 | using namespace InferenceEngine; 29 | 30 | namespace android { 31 | namespace hardware { 32 | namespace neuralnetworks { 33 | namespace nnhal { 34 | 35 | class GnaPreparedModel : public BasePreparedModel { 36 | public: 37 | GnaPreparedModel(const Model& model) : BasePreparedModel(IntelDeviceType::GNA, model) {} 38 | ~GnaPreparedModel() { deinitialize(); } 39 | 40 | bool initialize() override; 41 | Return configureExecutionBurst( 42 | const sp& callback, 43 | const MQDescriptorSync& requestChannel, 44 | const MQDescriptorSync& resultChannel, 45 | configureExecutionBurst_cb cb) override; 46 | 47 | protected: 48 | void deinitialize() override; 49 | }; 50 | 51 | } // namespace nnhal 52 | } // namespace neuralnetworks 53 | } // namespace hardware 54 | } // namespace android 55 | 56 | #endif // ANDROID_ML_NN_GNA_PREPAREDMODEL_H 57 | -------------------------------------------------------------------------------- /ngraph_creator/include/NgraphNetworkCreator.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include "ModelManager.h" 7 | #include "OperationsBase.hpp" 8 | 9 | namespace android { 10 | namespace hardware { 11 | namespace neuralnetworks { 12 | namespace nnhal { 13 | 14 | class NgraphNetworkCreator { 15 | private: 16 | std::shared_ptr mModelInfo; 17 | std::vector> mOperationNodes; 18 | std::shared_ptr mNgraphNodes; 19 | OperationsFactory mOpFactoryInstance; 20 | bool createInputParams(); 21 | bool initializeModel(); 22 | 23 | public: 24 | NgraphNetworkCreator(std::shared_ptr modelInfo, IntelDeviceType deviceType); 25 | ~NgraphNetworkCreator(); 26 | void getSupportedOperations(std::vector& supportedOperations); 27 | bool validateOperations(); 28 | 29 | const std::string& getNodeName(uint32_t index); 30 | 31 | std::shared_ptr generateGraph(); 32 | }; 33 | 34 | } // namespace nnhal 35 | } // namespace neuralnetworks 36 | } // namespace hardware 37 | } // namespace android 38 | -------------------------------------------------------------------------------- /ngraph_creator/include/NgraphNodes.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #undef LOG_TAG 9 | 10 | namespace android { 11 | namespace hardware { 12 | namespace neuralnetworks { 13 | namespace nnhal { 14 | 15 | class NgraphNodes { 16 | private: 17 | std::vector> mOutputAtOperandIndex; 18 | // mForcedNchw flag tracks whether a forced conversion to NCHW has been done at ngraph_creator 19 | // in the path to current Operand. 20 | std::vector mForcedNchw; 21 | std::vector> mInputParams; 22 | std::vector> mResultNodes; 23 | // mNodeNames are only populated when requested, as only Inputs and Result NodeNames are 24 | // required. 25 | std::map mNodeNames; 26 | 27 | public: 28 | NgraphNodes(size_t operandsSize, size_t resultsSize); 29 | ~NgraphNodes(); 30 | 31 | void addInputParam(std::shared_ptr inParam); 32 | void setOutputAtOperandIndex(size_t index, ngraph::Output output); 33 | ngraph::Output getOperationOutput(size_t index); 34 | void setResultNode(size_t outputIndex, std::shared_ptr resultNode); 35 | 36 | const std::string& getNodeName(size_t index); 37 | void removeInputParameter(std::string name, size_t index); 38 | 39 | std::shared_ptr generateGraph(); 40 | // Setting the node name to empty string "". Caller of getNodeName should validate against "". 41 | void setInvalidNode(size_t index); 42 | }; 43 | 44 | } // namespace nnhal 45 | } // namespace neuralnetworks 46 | } // namespace hardware 47 | } // namespace android 48 | -------------------------------------------------------------------------------- /ngraph_creator/include/OperationsFactory.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | #include 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | #include 58 | #include 59 | #include 60 | #include 61 | #include 62 | #include 63 | #include 64 | #include 65 | #include 66 | #include 67 | #include 68 | #include 69 | #include 70 | #include 71 | #include 72 | #include 73 | #include 74 | #include 75 | #include 76 | #include 77 | #include 78 | #include 79 | #include 80 | #include 81 | #include 82 | #include 83 | #include 84 | 85 | namespace android { 86 | namespace hardware { 87 | namespace neuralnetworks { 88 | namespace nnhal { 89 | 90 | class OperationsFactory { 91 | private: 92 | std::shared_ptr mNgraphNodes; 93 | 94 | public: 95 | OperationsFactory(IntelDeviceType deviceType, std::shared_ptr modelInfo, 96 | std::shared_ptr nodes); 97 | ~OperationsFactory(); 98 | std::shared_ptr getOperation(int operationIndex, 99 | const OperationType& operationType); 100 | }; 101 | 102 | } // namespace nnhal 103 | } // namespace neuralnetworks 104 | } // namespace hardware 105 | } // namespace android 106 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Abs.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Abs : public OperationsBase { 11 | public: 12 | Abs(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Add.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Add : public OperationsBase { 11 | public: 12 | Add(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | std::shared_ptr createNodeForPlugin() override; 16 | }; 17 | 18 | } // namespace nnhal 19 | } // namespace neuralnetworks 20 | } // namespace hardware 21 | } // namespace android 22 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Argmax.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Argmax : public OperationsBase { 11 | public: 12 | Argmax(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Argmin.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Argmin : public OperationsBase { 11 | public: 12 | Argmin(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/AveragePool2D.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class AveragePool2D : public OperationsBase { 11 | public: 12 | AveragePool2D(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/BatchToSpace.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class BatchToSpace : public OperationsBase { 11 | public: 12 | BatchToSpace(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/BidirectionalSequenceRNN.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class BidirectionalSequenceRNN : public OperationsBase { 11 | public: 12 | BidirectionalSequenceRNN(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | bool isValidInputTensor(uint32_t inputIndex); 16 | }; 17 | 18 | } // namespace nnhal 19 | } // namespace neuralnetworks 20 | } // namespace hardware 21 | } // namespace android 22 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Cast.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Cast : public OperationsBase { 11 | public: 12 | Cast(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ChannelShuffle.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ChannelShuffle : public OperationsBase { 11 | public: 12 | ChannelShuffle(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Concat.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Concat : public OperationsBase { 11 | public: 12 | Concat(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Conv2d.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Conv2d : public OperationsBase { 11 | public: 12 | Conv2d(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/DepthToSpace.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class DepthToSpace : public OperationsBase { 11 | public: 12 | DepthToSpace(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/DepthwiseConv2d.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class DepthwiseConv2d : public OperationsBase { 11 | public: 12 | DepthwiseConv2d(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Dequantize.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Dequantize : public OperationsBase { 11 | public: 12 | Dequantize(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Div.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Div : public OperationsBase { 11 | public: 12 | Div(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/EmbeddingLookup.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class EmbeddingLookup : public OperationsBase { 11 | public: 12 | EmbeddingLookup(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Equal.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Equal : public OperationsBase { 11 | public: 12 | Equal(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Exp.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Exp : public OperationsBase { 11 | public: 12 | Exp(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ExpandDims.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ExpandDims : public OperationsBase { 11 | public: 12 | ExpandDims(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Floor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Floor : public OperationsBase { 11 | public: 12 | Floor(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/FullyConnected.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class FullyConnected : public OperationsBase { 11 | public: 12 | FullyConnected(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Gather.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Gather : public OperationsBase { 11 | public: 12 | Gather(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Greater.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Greater : public OperationsBase { 11 | public: 12 | Greater(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/GreaterEqual.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class GreaterEqual : public OperationsBase { 11 | public: 12 | GreaterEqual(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/GroupedConv2d.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class GroupedConv2d : public OperationsBase { 11 | public: 12 | GroupedConv2d(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/HardSwish.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class HardSwish : public OperationsBase { 11 | public: 12 | HardSwish(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/InstanceNormalization.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class InstanceNormalization : public OperationsBase { 11 | public: 12 | InstanceNormalization(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/L2Normalization.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class L2Normalization : public OperationsBase { 11 | public: 12 | L2Normalization(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/L2Pooling2D.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class L2Pooling2D : public OperationsBase { 11 | public: 12 | L2Pooling2D(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LSTM.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LSTM : public OperationsBase { 11 | public: 12 | LSTM(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | void connectOperationToGraph() override; 16 | 17 | std::shared_ptr add(const ngraph::Output& lhs, 18 | const ngraph::Output& rhs); 19 | std::shared_ptr sub(const ngraph::Output& lhs, 20 | const ngraph::Output& rhs); 21 | std::shared_ptr mul(const ngraph::Output& lhs, 22 | const ngraph::Output& rhs); 23 | std::shared_ptr matMul(const ngraph::Output& lhs, 24 | const ngraph::Output& rhs, 25 | bool transpose_lhs, bool transpose_rhs); 26 | std::shared_ptr clip(const ngraph::Output& data, 27 | float m_clip) const; 28 | std::shared_ptr applyActivation(const std::shared_ptr& arg, 29 | int activationFn) const; 30 | std::shared_ptr LayerNorm(const ngraph::Output& input, 31 | const std::shared_ptr& normalizedweights, 32 | const std::shared_ptr& bias); 33 | 34 | bool isValidInputTensor(uint32_t inputIndex); 35 | }; 36 | 37 | } // namespace nnhal 38 | } // namespace neuralnetworks 39 | } // namespace hardware 40 | } // namespace android 41 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Less.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Less : public OperationsBase { 11 | public: 12 | Less(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LessEqual.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LessEqual : public OperationsBase { 11 | public: 12 | LessEqual(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Log.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Log : public OperationsBase { 11 | public: 12 | Log(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LogSoftmax.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LogSoftmax : public OperationsBase { 11 | public: 12 | LogSoftmax(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LogicalAnd.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LogicalAnd : public OperationsBase { 11 | public: 12 | LogicalAnd(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LogicalNot.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LogicalNot : public OperationsBase { 11 | public: 12 | LogicalNot(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/LogicalOr.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class LogicalOr : public OperationsBase { 11 | public: 12 | LogicalOr(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Logistic.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Logistic : public OperationsBase { 11 | public: 12 | Logistic(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/MaxPool2d.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class MaxPool2d : public OperationsBase { 11 | public: 12 | MaxPool2d(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Maximum.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Maximum : public OperationsBase { 11 | public: 12 | Maximum(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Mean.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Mean : public OperationsBase { 11 | public: 12 | Mean(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Minimum.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Minimum : public OperationsBase { 11 | public: 12 | Minimum(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Mul.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Mul : public OperationsBase { 11 | public: 12 | Mul(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Neg.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Neg : public OperationsBase { 11 | public: 12 | Neg(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/NgraphHelper.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | #undef LOG_TAG 7 | #define LOG_TAG "NgraphHelper" 8 | 9 | namespace android { 10 | namespace hardware { 11 | namespace neuralnetworks { 12 | namespace nnhal { 13 | using FusedActivationFunc = V1_0::FusedActivationFunc; 14 | 15 | static inline ngraph::Shape toNgraphShape(const std::vector& dimensions) { 16 | ngraph::Shape shapeVec; 17 | for (size_t i = 0; i < dimensions.size(); i++) { 18 | shapeVec.push_back(static_cast(dimensions[i])); 19 | } 20 | return shapeVec; 21 | } 22 | 23 | static inline std::shared_ptr applyActivation(std::shared_ptr inputNode, 24 | int32_t activationFn) { 25 | std::shared_ptr activationNode = nullptr; 26 | switch (activationFn) { 27 | case (int32_t)FusedActivationFunc::NONE: 28 | ALOGV("Adding No Activation"); 29 | return inputNode; 30 | break; 31 | case (int32_t)FusedActivationFunc::RELU: 32 | ALOGV("Adding relu"); 33 | activationNode = std::make_shared(inputNode); 34 | break; 35 | case (int32_t)FusedActivationFunc::RELU6: 36 | ALOGV("Adding relu6"); 37 | activationNode = std::make_shared(inputNode, 0, 6); 38 | break; 39 | case (int32_t)FusedActivationFunc::RELU1: 40 | ALOGV("Adding relu1"); 41 | activationNode = std::make_shared(inputNode, -1, 1); 42 | break; 43 | default: 44 | ALOGI("UNKNOWN ACTIVATION FUNCTION %d !!!!!", activationFn); 45 | return inputNode; 46 | } 47 | return activationNode; 48 | } 49 | 50 | static inline void calculateExplicitPadding(int32_t in_size, int32_t stride, int32_t filter_size, 51 | int32_t padding_implicit, int32_t* padding_head, 52 | int32_t* padding_tail) { 53 | *padding_head = 0; 54 | *padding_tail = 0; 55 | 56 | if (padding_implicit == 1) { 57 | int32_t out_size = (in_size + stride - 1) / stride; 58 | int32_t tmp = (out_size - 1) * stride + filter_size; 59 | if (tmp > in_size) { 60 | *padding_head = (tmp - in_size) / 2; 61 | *padding_tail = (tmp - in_size) - *padding_head; 62 | } 63 | } 64 | } 65 | 66 | } // namespace nnhal 67 | } // namespace neuralnetworks 68 | } // namespace hardware 69 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/NotEqual.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class NotEqual : public OperationsBase { 11 | public: 12 | NotEqual(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/PRelu.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class PRelu : public OperationsBase { 11 | public: 12 | PRelu(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Pad.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Pad : public OperationsBase { 11 | public: 12 | Pad(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/PadV2.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class PadV2 : public OperationsBase { 11 | public: 12 | PadV2(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Pow.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Pow : public OperationsBase { 11 | public: 12 | Pow(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Quantize.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Quantize : public OperationsBase { 11 | public: 12 | Quantize(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/RNN.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class RNN : public OperationsBase { 11 | public: 12 | RNN(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ROIAlign.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ROIAlign : public OperationsBase { 11 | public: 12 | ROIAlign(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ROIPooling.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ROIPooling : public OperationsBase { 11 | public: 12 | ROIPooling(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/RSQRT.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class RSQRT : public OperationsBase { 11 | public: 12 | RSQRT(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceAll.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceAll : public OperationsBase { 11 | public: 12 | ReduceAll(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceAny.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceAny : public OperationsBase { 11 | public: 12 | ReduceAny(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceMax.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceMax : public OperationsBase { 11 | public: 12 | ReduceMax(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceMin.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceMin : public OperationsBase { 11 | public: 12 | ReduceMin(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceProd.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceProd : public OperationsBase { 11 | public: 12 | ReduceProd(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ReduceSum.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ReduceSum : public OperationsBase { 11 | public: 12 | ReduceSum(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Relu.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Relu : public OperationsBase { 11 | public: 12 | Relu(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Relu1.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Relu1 : public OperationsBase { 11 | public: 12 | Relu1(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Relu6.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Relu6 : public OperationsBase { 11 | public: 12 | Relu6(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Reshape.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Reshape : public OperationsBase { 11 | public: 12 | Reshape(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ResizeBilinear.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ResizeBilinear : public OperationsBase { 11 | public: 12 | ResizeBilinear(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/ResizeNearestNeighbor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class ResizeNearestNeighbor : public OperationsBase { 11 | public: 12 | ResizeNearestNeighbor(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/SQRT.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class SQRT : public OperationsBase { 11 | public: 12 | SQRT(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Select.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Select : public OperationsBase { 11 | public: 12 | Select(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Sin.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Sin : public OperationsBase { 11 | public: 12 | Sin(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Softmax.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Softmax : public OperationsBase { 11 | public: 12 | Softmax(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/SpaceToBatch.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class SpaceToBatch : public OperationsBase { 11 | public: 12 | SpaceToBatch(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/SpaceToDepth.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class SpaceToDepth : public OperationsBase { 11 | public: 12 | SpaceToDepth(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Split.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Split : public OperationsBase { 11 | public: 12 | Split(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Squeeze.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Squeeze : public OperationsBase { 11 | public: 12 | Squeeze(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/StridedSlice.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class StridedSlice : public OperationsBase { 11 | public: 12 | StridedSlice(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | std::vector getMaskBits(int32_t maskValue, size_t vec_size); 16 | }; 17 | 18 | } // namespace nnhal 19 | } // namespace neuralnetworks 20 | } // namespace hardware 21 | } // namespace android 22 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Sub.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Sub : public OperationsBase { 11 | public: 12 | Sub(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Tanh.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Tanh : public OperationsBase { 11 | public: 12 | Tanh(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | }; 15 | 16 | } // namespace nnhal 17 | } // namespace neuralnetworks 18 | } // namespace hardware 19 | } // namespace android 20 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/TopkV2.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class TopkV2 : public OperationsBase { 11 | public: 12 | TopkV2(int operationIndex); 13 | std::shared_ptr createNode() override; 14 | void connectOperationToGraph() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/Transpose.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class Transpose : public OperationsBase { 11 | public: 12 | Transpose(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/TransposeConv2D.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class TransposeConv2D : public OperationsBase { 11 | public: 12 | TransposeConv2D(int operationIndex); 13 | bool validate() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/include/UnidirectionalSequenceRNN.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | class UnidirectionalSequenceRNN : public OperationsBase { 11 | public: 12 | UnidirectionalSequenceRNN(int operationIndex); 13 | void connectOperationToGraph() override; 14 | std::shared_ptr createNode() override; 15 | }; 16 | 17 | } // namespace nnhal 18 | } // namespace neuralnetworks 19 | } // namespace hardware 20 | } // namespace android 21 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Abs.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Abs" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Abs::Abs(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Abs::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Add.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Add" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Add::Add(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Add::validate() { 15 | ALOGV("%s PASSED", __func__); 16 | 17 | const auto& activationIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); 18 | if (!sModelInfo->isOperandLifeTimeConst(activationIndex)) { 19 | ALOGE("%s Due to OpenVINO API restrictions, Scalar input values must have CONST lifetime", 20 | __func__); 21 | return false; 22 | } 23 | 24 | return true; 25 | } 26 | 27 | std::shared_ptr Add::createNode() { 28 | // Creating input nodes 29 | std::shared_ptr input1, input2; 30 | 31 | input1 = getInputNode(0); 32 | input2 = getInputNode(1); 33 | 34 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 35 | 36 | auto addNode = 37 | std::make_shared(input1, input2, ngraph::op::AutoBroadcastType::NUMPY); 38 | 39 | auto outputNode = applyActivation(addNode, activationFn); 40 | 41 | return outputNode; 42 | } 43 | 44 | std::shared_ptr Add::createNodeForPlugin() { 45 | if (sPluginType == IntelDeviceType::VPU) { 46 | auto input = mNgraphNodes->getOperationOutput( 47 | sModelInfo->getOperationInput(mNnapiOperationIndex, 0)); 48 | std::shared_ptr constantOp = 49 | std::make_shared(ngraph::element::f32, input.get_shape()); 50 | auto transposedOp = transpose(NHWC_NCHW, constantOp); 51 | return std::make_shared(input, transposedOp, 52 | ngraph::op::AutoBroadcastType::NUMPY); 53 | } else { 54 | return createNode(); 55 | } 56 | } 57 | 58 | } // namespace nnhal 59 | } // namespace neuralnetworks 60 | } // namespace hardware 61 | } // namespace android 62 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Argmax.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Argmax" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Argmax::Argmax(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Argmax::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | int32_t axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 21 | ALOGD("createNode axis %d", axis); 22 | 23 | auto k_node = createConstNode(ngraph::element::i32, {}, convertToVector(1)); 24 | 25 | const auto topk = std::make_shared( 26 | input, k_node, axis, ngraph::opset3::TopK::Mode::MAX, ngraph::opset3::TopK::SortType::NONE); 27 | 28 | const auto axis_to_remove = 29 | createConstNode(ngraph::element::u32, {}, convertToVector(topk->get_axis())); 30 | auto outputNode = std::make_shared(topk->output(1), axis_to_remove); 31 | 32 | return outputNode; 33 | } 34 | 35 | } // namespace nnhal 36 | } // namespace neuralnetworks 37 | } // namespace hardware 38 | } // namespace android 39 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Argmin.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Argmin" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Argmin::Argmin(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Argmin::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | int32_t axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 21 | ALOGD("createNode axis %d", axis); 22 | 23 | auto k_node = createConstNode(ngraph::element::i32, {}, convertToVector(1)); 24 | 25 | auto topk = std::make_shared( 26 | input, k_node, axis, ngraph::opset3::TopK::Mode::MIN, ngraph::opset3::TopK::SortType::NONE); 27 | 28 | const auto axis_to_remove = 29 | createConstNode(ngraph::element::u32, {}, convertToVector(topk->get_axis())); 30 | auto outputNode = std::make_shared(topk->output(1), axis_to_remove); 31 | 32 | return outputNode; 33 | } 34 | 35 | } // namespace nnhal 36 | } // namespace neuralnetworks 37 | } // namespace hardware 38 | } // namespace android 39 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/BatchToSpace.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "BatchToSpace" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | BatchToSpace::BatchToSpace(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool BatchToSpace::validate() { 15 | // Check input rank 16 | const auto inputRank = getInputOperandDimensions(0).size(); 17 | 18 | if (inputRank != 4) return false; 19 | 20 | const auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 21 | 22 | // TODO: Add Support for all_tensors_as_inputs 23 | if (!sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex)) { 24 | ALOGE("%s Only Constant dimensions supported now", __func__); 25 | return false; 26 | } 27 | 28 | ALOGV("%s PASSED", __func__); 29 | return true; 30 | } 31 | 32 | std::shared_ptr BatchToSpace::createNode() { 33 | int32_t layout = 0; 34 | bool useNchw = false; 35 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 36 | ALOGD("%s inputsSize %lu", __func__, inputsSize); 37 | 38 | auto inputNode = getInputNode(0); 39 | auto& inDims = getInputOperandDimensions(0); 40 | const auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 41 | auto block_shape = sModelInfo->GetConstVecOperand(block_shape_OperandIndex); 42 | 43 | // Compensation for the shape to be same as the size of data input shape 44 | block_shape.insert(block_shape.begin(), 1); 45 | block_shape.insert(block_shape.begin(), 1); 46 | 47 | if (inputsSize == 3) layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 48 | if (layout) useNchw = true; 49 | 50 | std::vector shape(inDims.size(), 0); 51 | 52 | const auto block_shape_node = 53 | createConstNode(ngraph::element::i64, {inDims.size()}, block_shape); 54 | const auto crop_begin = createConstNode(ngraph::element::i64, {shape.size()}, shape); 55 | const auto crop_end = createConstNode(ngraph::element::i64, {shape.size()}, shape); 56 | 57 | if (!useNchw) // No conversion needed if useNchw set 58 | inputNode = transpose(NHWC_NCHW, inputNode); 59 | 60 | std::shared_ptr outputNode = std::make_shared( 61 | inputNode, block_shape_node, crop_begin, crop_end); 62 | 63 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 64 | 65 | return outputNode; 66 | } 67 | 68 | } // namespace nnhal 69 | } // namespace neuralnetworks 70 | } // namespace hardware 71 | } // namespace android 72 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Cast.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Cast" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Cast::Cast(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | void Cast::connectOperationToGraph() { createNode(); } 15 | 16 | std::shared_ptr Cast::createNode() { 17 | // Creating input nodes 18 | std::shared_ptr input; 19 | 20 | input = getInputNode(0, false); 21 | 22 | auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); 23 | auto outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 24 | 25 | const auto& inputType = sModelInfo->getOperationType(inputIndex); 26 | const auto& outputType = sModelInfo->getOperationType(outputIndex); 27 | 28 | ngraph::element::Type elementType; // change to outputbased element type 29 | std::shared_ptr outputNode; 30 | 31 | if (inputType == outputType) { 32 | outputNode = input; 33 | } else { 34 | if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 35 | elementType = ngraph::element::f32; 36 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) { 37 | elementType = ngraph::element::f16; 38 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_INT32)) { 39 | elementType = ngraph::element::i32; 40 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) { 41 | auto convertInput = 42 | std::make_shared(input, ngraph::element::i32); 43 | input = std::make_shared(convertInput, 0, 255); 44 | elementType = ngraph::element::u8; 45 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM_SIGNED) || 46 | checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_SYMM)) { 47 | auto convertInput = 48 | std::make_shared(input, ngraph::element::i32); 49 | input = std::make_shared(convertInput, -128, 127); 50 | elementType = ngraph::element::i8; 51 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT16_ASYMM)) { 52 | auto convertInput = 53 | std::make_shared(input, ngraph::element::i32); 54 | input = std::make_shared(convertInput, 0, 65535); 55 | elementType = ngraph::element::u16; 56 | } else if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT16_SYMM)) { 57 | auto convertInput = 58 | std::make_shared(input, ngraph::element::i32); 59 | input = std::make_shared(convertInput, -32768, 32767); 60 | elementType = ngraph::element::i16; 61 | } 62 | outputNode = std::make_shared(input, elementType); 63 | } 64 | 65 | mNgraphNodes->setOutputAtOperandIndex(outputIndex, outputNode); 66 | const auto op = sModelInfo->getOperand(outputIndex); 67 | if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { 68 | addResultNode(mDefaultOutputIndex, outputNode); 69 | } 70 | 71 | return nullptr; 72 | } 73 | 74 | } // namespace nnhal 75 | } // namespace neuralnetworks 76 | } // namespace hardware 77 | } // namespace android 78 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ChannelShuffle.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ChannelShuffle" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ChannelShuffle::ChannelShuffle(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool ChannelShuffle::validate() { 15 | // Check input rank 16 | const int64_t inputRank = getInputOperandDimensions(0).size(); 17 | if (inputRank > 4 || inputRank <= 0) { 18 | ALOGE("%s Invalid input dimensions size!", __func__); 19 | return false; 20 | } 21 | 22 | // Check axis range 23 | int64_t axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 24 | if (!(axis >= -inputRank && axis < inputRank)) { 25 | ALOGE("%s Axis %ld not in the range [-inputRank, inputRank)", __func__, axis); 26 | return false; 27 | } 28 | 29 | return true; 30 | } 31 | 32 | std::shared_ptr ChannelShuffle::createNode() { 33 | // Creating input nodes 34 | auto inputNode = getInputNode(0); 35 | int64_t group = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 36 | int64_t axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 37 | 38 | auto inputRank = getInputOperandDimensions(0).size(); 39 | axis = (axis >= 0) ? axis : (axis + inputRank); 40 | 41 | // Convert the inputNode to 4D 42 | std::shared_ptr squeezeAxes; 43 | if (inputRank < 4) { 44 | switch (inputRank) { 45 | case 1: 46 | squeezeAxes = std::make_shared( 47 | ngraph::element::i64, ngraph::Shape{3}, std::vector{1, 2, 3}); 48 | break; 49 | case 2: 50 | squeezeAxes = std::make_shared( 51 | ngraph::element::i64, ngraph::Shape{2}, std::vector{2, 3}); 52 | break; 53 | case 3: 54 | squeezeAxes = std::make_shared( 55 | ngraph::element::i64, ngraph::Shape{1}, std::vector{3}); 56 | break; 57 | default: 58 | break; 59 | } 60 | 61 | inputNode = std::make_shared(inputNode, squeezeAxes); 62 | } 63 | 64 | std::shared_ptr outputNode = 65 | std::make_shared(inputNode, axis, group); 66 | 67 | // Using squeeze to convert the shape of outputNode to shape of inputNode before unsqueeze 68 | if (inputRank < 4) { 69 | outputNode = std::make_shared(outputNode, squeezeAxes); 70 | } 71 | 72 | return outputNode; 73 | } 74 | 75 | } // namespace nnhal 76 | } // namespace neuralnetworks 77 | } // namespace hardware 78 | } // namespace android 79 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Concat.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Concat" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Concat::Concat(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Concat::validate() { 15 | // check concatenation axis 16 | auto n = sModelInfo->getOperationInputsSize(mNnapiOperationIndex) - 17 | 1; // 0 ~ n-1: The list of n input tensors 18 | for (size_t i = 0; i < n; i++) { 19 | if (!isValidInputTensor(i)) { 20 | ALOGE("%s Invalid dimensions for input", __func__); 21 | return false; 22 | } 23 | } 24 | ALOGV("%s PASSED", __func__); 25 | return true; 26 | } 27 | 28 | std::shared_ptr Concat::createNode() { 29 | auto n = sModelInfo->getOperationInputsSize(mNnapiOperationIndex) - 30 | 1; // 0 ~ n-1: The list of n input tensors 31 | auto axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 32 | n); // n: concatenation axis 33 | std::vector> inputs; 34 | ALOGD("createNode n %lu, axis %d", n, axis); 35 | for (size_t i = 0; i < n; i++) { 36 | auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, i); 37 | auto inputOp = getInputNode(i); 38 | const auto op = sModelInfo->getOperand(inputIndex); 39 | ALOGD("createNode inputIndex %d, lifetime %d", inputIndex, op.lifetime); 40 | inputs.push_back(inputOp); 41 | } 42 | 43 | std::shared_ptr outputNode = 44 | std::make_shared(inputs, axis); 45 | 46 | return outputNode; 47 | } 48 | 49 | } // namespace nnhal 50 | } // namespace neuralnetworks 51 | } // namespace hardware 52 | } // namespace android 53 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/DepthToSpace.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "DepthToSpace" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | DepthToSpace::DepthToSpace(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr DepthToSpace::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | bool useNchw = false; 18 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 19 | 20 | if (inputsSize == 3) { 21 | auto layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 22 | if (layout) useNchw = true; 23 | } 24 | 25 | input = getInputNode(0); 26 | auto block_size = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 27 | 28 | if (!useNchw) // No conversion needed if useNchw set 29 | input = transpose(NHWC_NCHW, input); 30 | 31 | std::shared_ptr outputNode; 32 | 33 | outputNode = std::make_shared( 34 | input, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, block_size); 35 | 36 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 37 | 38 | return outputNode; 39 | } 40 | 41 | } // namespace nnhal 42 | } // namespace neuralnetworks 43 | } // namespace hardware 44 | } // namespace android 45 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Dequantize.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Dequantize" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Dequantize::Dequantize(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Dequantize::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input, outputNode; 17 | input = getInputNode(0, false); 18 | const auto& inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); 19 | 20 | if (checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) 21 | outputNode = DequantizeNode(input, inputIndex, ngraph::element::f16); 22 | else 23 | outputNode = DequantizeNode(input, inputIndex, ngraph::element::f32); 24 | 25 | return outputNode; 26 | } 27 | 28 | } // namespace nnhal 29 | } // namespace neuralnetworks 30 | } // namespace hardware 31 | } // namespace android 32 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Div.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Div" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Div::Div(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Div::createNode() { 15 | // Creating input nodes 16 | auto input1 = getInputNode(0); 17 | auto input2 = getInputNode(1); 18 | 19 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 20 | 21 | auto DivNode = std::make_shared(input1, input2, 22 | ngraph::op::AutoBroadcastType::NUMPY); 23 | 24 | auto outputNode = applyActivation(DivNode, activationFn); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/EmbeddingLookup.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "EmbeddingLookup" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | EmbeddingLookup::EmbeddingLookup(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool EmbeddingLookup::validate() { 15 | const auto inputRank = getInputOperandDimensions(1).size(); 16 | if (inputRank < 2) return false; 17 | 18 | ALOGV("%s PASSED", __func__); 19 | return true; 20 | } 21 | 22 | std::shared_ptr EmbeddingLookup::createNode() { 23 | // Creating input nodes 24 | auto indices = getInputNode(0); 25 | auto input = getInputNode(1); 26 | 27 | auto axis = createConstNode(ngraph::element::i32, {}, std::vector{0}); 28 | auto outputNode = std::make_shared(input, indices, axis); 29 | 30 | return outputNode; 31 | } 32 | 33 | } // namespace nnhal 34 | } // namespace neuralnetworks 35 | } // namespace hardware 36 | } // namespace android 37 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Equal.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Equal" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Equal::Equal(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Equal::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | outputNode = std::make_shared(input1, input2, 23 | ngraph::op::AutoBroadcastType::NUMPY); 24 | 25 | return outputNode; 26 | } 27 | 28 | } // namespace nnhal 29 | } // namespace neuralnetworks 30 | } // namespace hardware 31 | } // namespace android 32 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Exp.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Exp" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Exp::Exp(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Exp::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ExpandDims.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ExpandDims" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ExpandDims::ExpandDims(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool ExpandDims::validate() { 15 | // Check input rank 16 | const auto inputRank = getInputOperandDimensions(0).size(); 17 | if (inputRank < 1) return false; 18 | 19 | return true; 20 | } 21 | 22 | std::shared_ptr ExpandDims::createNode() { 23 | // Creating input nodes 24 | auto input = getInputNode(0); 25 | auto index = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 26 | 27 | auto axes = createConstNode(ngraph::element::i32, {}, convertToVector(index)); 28 | 29 | auto outputNode = std::make_shared(input, axes); 30 | 31 | return outputNode; 32 | } 33 | 34 | } // namespace nnhal 35 | } // namespace neuralnetworks 36 | } // namespace hardware 37 | } // namespace android 38 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Floor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Floor" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Floor::Floor(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Floor::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/FullyConnected.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "FullyConnected" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | FullyConnected::FullyConnected(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | // Supports only FP32 input. Will add support for QUANT8 through decompose node 15 | // once the vpu and gna plugin support if confirmed 16 | bool FullyConnected::validate() { 17 | auto input0 = getInputOperand(0); 18 | 19 | if (isZeroSizedInput(0)) { 20 | ALOGE("%s Batch size of 0 is not supported", __func__); 21 | return false; 22 | } 23 | 24 | if (input0.dimensions.size() < 2) { 25 | ALOGE("%s Invalid input parameter dimensions!!!", __func__); 26 | return false; 27 | } 28 | 29 | ALOGD("%s succeeded", __func__); 30 | return true; 31 | } 32 | 33 | std::shared_ptr FullyConnected::createNode() { 34 | std::shared_ptr inputNode = getInputNode(0); 35 | std::shared_ptr weightsNode = getInputNode(1); 36 | std::shared_ptr biasNode, multiplyNode, addNode, activationNode; 37 | 38 | auto inputDims = getInputOperand(0).dimensions; 39 | auto weightDims = getInputOperand(1).dimensions; 40 | auto biasDims = getInputOperand(2).dimensions; 41 | 42 | if ((inputDims.size() > 2) || (inputDims[1] != weightDims[1])) { 43 | std::vector newShape = {getNumberOfElements(inputDims) / weightDims[1], 44 | weightDims[1]}; 45 | auto reshapeConstant = createConstNode(ngraph::element::i32, {2}, newShape); 46 | auto reshapeNode = 47 | std::make_shared(inputNode, reshapeConstant, false); 48 | multiplyNode = 49 | std::make_shared(reshapeNode, weightsNode, false, true); 50 | } else { 51 | multiplyNode = 52 | std::make_shared(inputNode, weightsNode, false, true); 53 | } 54 | 55 | if (!sModelInfo->isOmittedInput(mNnapiOperationIndex, 2) && biasDims.size() != 0) { 56 | biasNode = getInputNode(2); 57 | 58 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM) || 59 | checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) 60 | biasNode = 61 | DequantizeNode(biasNode, sModelInfo->getOperationInput(mNnapiOperationIndex, 2), 62 | ngraph::element::f32); 63 | 64 | addNode = std::make_shared(multiplyNode, biasNode, 65 | ngraph::op::AutoBroadcastType::NUMPY); 66 | } else { 67 | ALOGD("FullyConnected: Bias not provided !!!"); 68 | addNode = multiplyNode; 69 | } 70 | 71 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 3); 72 | activationNode = applyActivation(addNode, activationFn); 73 | return activationNode ? activationNode : addNode; 74 | } 75 | 76 | } // namespace nnhal 77 | } // namespace neuralnetworks 78 | } // namespace hardware 79 | } // namespace android 80 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Gather.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Gather" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Gather::Gather(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Gather::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr gatherVals; 17 | 18 | gatherVals = getInputNode(0); 19 | 20 | // axis range [-n, n] 21 | auto axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 22 | auto axisNode = createConstNode(ngraph::element::i32, {}, convertToVector(axis)); 23 | 24 | auto indices = getInputNode(2); 25 | 26 | std::shared_ptr outputNode; 27 | outputNode = std::make_shared(gatherVals, indices, axisNode); 28 | 29 | return outputNode; 30 | } 31 | 32 | } // namespace nnhal 33 | } // namespace neuralnetworks 34 | } // namespace hardware 35 | } // namespace android 36 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Greater.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Greater" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Greater::Greater(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Greater::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/GreaterEqual.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "GreaterEqual" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | GreaterEqual::GreaterEqual(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr GreaterEqual::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | outputNode = std::make_shared( 23 | input1, input2, ngraph::op::AutoBroadcastType::NUMPY); 24 | 25 | return outputNode; 26 | } 27 | 28 | } // namespace nnhal 29 | } // namespace neuralnetworks 30 | } // namespace hardware 31 | } // namespace android 32 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/HardSwish.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #define LOG_TAG "HardSwish" 3 | 4 | namespace android { 5 | namespace hardware { 6 | namespace neuralnetworks { 7 | namespace nnhal { 8 | 9 | HardSwish::HardSwish(int operationIndex) : OperationsBase(operationIndex) { 10 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 11 | } 12 | 13 | bool HardSwish::validate() { 14 | ALOGV("%s PASSED", __func__); 15 | return true; 16 | } 17 | 18 | std::shared_ptr HardSwish::createNode() { 19 | std::shared_ptr outputNode, inputNode; 20 | inputNode = getInputNode(0); 21 | 22 | outputNode = std::make_shared(inputNode); 23 | 24 | return outputNode; 25 | } 26 | 27 | } // namespace nnhal 28 | } // namespace neuralnetworks 29 | } // namespace hardware 30 | } // namespace android 31 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/InstanceNormalization.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "InstanceNormalization" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | InstanceNormalization::InstanceNormalization(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool InstanceNormalization::validate() { 15 | ALOGV("%s Entering", __func__); 16 | // check output type 17 | if (!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 18 | ALOGE("%s Output operand 0 is not of type FP32. Unsupported operation", __func__); 19 | return false; 20 | } 21 | 22 | // Check Input Type 23 | if (!checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 24 | ALOGE("%s Input operand 0 is not of type FP32. Unsupported operation", __func__); 25 | return false; 26 | } 27 | const auto inputRank = getInputOperandDimensions(0).size(); 28 | if ((inputRank > 4) || (!isValidInputTensor(0))) { 29 | ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputRank); 30 | return false; 31 | } 32 | 33 | ALOGV("%s PASSED", __func__); 34 | return true; 35 | } 36 | 37 | std::shared_ptr InstanceNormalization::createNode() { 38 | ALOGV("%s Entering", __func__); 39 | 40 | std::shared_ptr inputNode; 41 | bool useNchw = false; 42 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 43 | ALOGD("%s inputsSize %lu", __func__, inputsSize); 44 | 45 | // Read inputs 46 | inputNode = getInputNode(0); 47 | auto gamma = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 48 | auto beta = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 49 | auto epsilon = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 3); 50 | auto layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 4); 51 | if (layout) useNchw = true; 52 | 53 | if (!useNchw) // No conversion needed if useNchw set 54 | inputNode = transpose(NHWC_NCHW, inputNode); 55 | 56 | // output[b, h, w, c] = (input[b, h, w, c] - mean[b, c]) * gamma / 57 | // sqrt(var[b, c] + epsilon) + beta 58 | // Instance Normalizatiom = MVN * gamma + beta 59 | bool normalize_variance = true; 60 | auto gammaNode = createConstNode(ngraph::element::f32, {1}, convertToVector(gamma)); 61 | auto betaNode = createConstNode(ngraph::element::f32, {1}, convertToVector(beta)); 62 | 63 | // Axis along which mean and variance is calculated 64 | std::vector axes{2, 3}; 65 | std::shared_ptr inputAxesNode = createConstNode(ngraph::element::i32, {2}, axes); 66 | std::shared_ptr mvnNode = std::make_shared( 67 | inputNode, inputAxesNode, normalize_variance, epsilon, ngraph::op::MVNEpsMode::INSIDE_SQRT); 68 | 69 | auto mulGamma = std::make_shared( 70 | mvnNode, gammaNode, ngraph::op::AutoBroadcastType::NUMPY); 71 | std::shared_ptr outputNode = 72 | std::make_shared(mulGamma, betaNode); 73 | 74 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 75 | ALOGV("%s PASSED", __func__); 76 | 77 | return outputNode; 78 | } 79 | 80 | } // namespace nnhal 81 | } // namespace neuralnetworks 82 | } // namespace hardware 83 | } // namespace android 84 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/L2Normalization.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "L2Normalization" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | L2Normalization::L2Normalization(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool L2Normalization::validate() { 15 | const auto inputRank = getInputOperandDimensions(0).size(); 16 | if ((inputRank > 4) || (!isValidInputTensor(0))) { 17 | ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputRank); 18 | return false; 19 | } 20 | 21 | ALOGV("%s PASSED", __func__); 22 | return true; 23 | } 24 | 25 | std::shared_ptr L2Normalization::createNode() { 26 | std::shared_ptr inputNode; 27 | 28 | int32_t inputAxes = -1; 29 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 30 | ALOGD("%s inputsSize %lu", __func__, inputsSize); 31 | inputNode = getInputNode(0); 32 | // NN-HAL 1.2 specific optional input 33 | if (inputsSize == 2) { 34 | inputAxes = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 35 | } 36 | auto inputAxesNode = createConstNode(ngraph::element::i32, {1}, convertToVector(inputAxes)); 37 | // TODO: Add support for NNAPI feature level 4, if the elements along an axis are all zeros, the 38 | // result is undefined. Since NNAPI feature level 4, if the elements along an axis are all 39 | // zeros, the result is logical zero. 40 | 41 | /* 42 | * output[batch, row, col, channel] = 43 | * input[batch, row, col, channel] / 44 | * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 45 | */ 46 | auto mul = std::make_shared(inputNode, inputNode); 47 | auto sum = std::make_shared(mul, inputAxesNode, true); 48 | auto sqrt = std::make_shared(sum); 49 | auto outputNode = std::make_shared(inputNode, sqrt); 50 | 51 | return outputNode; 52 | } 53 | 54 | } // namespace nnhal 55 | } // namespace neuralnetworks 56 | } // namespace hardware 57 | } // namespace android 58 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Less.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Less" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Less::Less(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Less::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/LessEqual.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "LessEqual" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | LessEqual::LessEqual(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr LessEqual::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Log.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Log" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Log::Log(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Log::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/LogSoftmax.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "LogSoftmax" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | LogSoftmax::LogSoftmax(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr LogSoftmax::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input, outputNode; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr betaNode; 21 | 22 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) { 23 | auto beta = sModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 1); 24 | betaNode = createConstNode(ngraph::element::f16, {}, convertToVector(beta)); 25 | } else { 26 | auto beta = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 27 | betaNode = createConstNode(ngraph::element::f32, {}, convertToVector(beta)); 28 | } 29 | int axis = -1; 30 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 31 | if (inputsSize == 3) axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 32 | 33 | const auto axisNode = createConstNode(ngraph::element::i32, {1}, convertToVector(axis)); 34 | 35 | // logits * beta 36 | auto mul = std::make_shared(input, betaNode); 37 | // exp(logits * beta) 38 | auto exp = std::make_shared(mul); 39 | // reduce_sum(exp(logits * beta), axis) 40 | auto sum = std::make_shared(exp, axisNode, true); 41 | // log(reduce_sum(exp(logits * beta), axis)) 42 | auto log = std::make_shared(sum); 43 | // logits * beta - log(reduce_sum(exp(logits * beta), axis)) 44 | outputNode = std::make_shared(mul, log); 45 | 46 | return outputNode; 47 | } 48 | 49 | } // namespace nnhal 50 | } // namespace neuralnetworks 51 | } // namespace hardware 52 | } // namespace android 53 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/LogicalAnd.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "LogicalAnd" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | LogicalAnd::LogicalAnd(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr LogicalAnd::createNode() { 15 | // Creating input nodes 16 | auto input1 = getInputNode(0); 17 | auto input2 = getInputNode(1); 18 | 19 | auto outputNode = std::make_shared( 20 | input1, input2, ngraph::op::AutoBroadcastType::NUMPY); 21 | 22 | return outputNode; 23 | } 24 | 25 | } // namespace nnhal 26 | } // namespace neuralnetworks 27 | } // namespace hardware 28 | } // namespace android 29 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/LogicalNot.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "LogicalNot" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | LogicalNot::LogicalNot(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr LogicalNot::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/LogicalOr.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "LogicalOr" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | LogicalOr::LogicalOr(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr LogicalOr::createNode() { 15 | // Creating input nodes 16 | auto input1 = getInputNode(0); 17 | auto input2 = getInputNode(1); 18 | 19 | auto outputNode = std::make_shared( 20 | input1, input2, ngraph::op::AutoBroadcastType::NUMPY); 21 | 22 | return outputNode; 23 | } 24 | 25 | } // namespace nnhal 26 | } // namespace neuralnetworks 27 | } // namespace hardware 28 | } // namespace android 29 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Logistic.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Logistic" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Logistic::Logistic(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Logistic::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr outputNode; 21 | outputNode = std::make_shared(input); 22 | 23 | return outputNode; 24 | } 25 | 26 | } // namespace nnhal 27 | } // namespace neuralnetworks 28 | } // namespace hardware 29 | } // namespace android 30 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Maximum.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Maximum" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Maximum::Maximum(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Maximum::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Mean.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Mean" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Mean::Mean(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Mean::validate() { 15 | // TODO: Add Support for all_tensors_as_inputs 16 | const auto& axesOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 17 | 18 | if (!sModelInfo->isOperandLifeTimeConst(axesOperandIndex)) { 19 | ALOGE("%s Only Constant dimensions supported now", __func__); 20 | return false; 21 | } 22 | 23 | return true; 24 | } 25 | 26 | std::shared_ptr Mean::createNode() { 27 | // Creating input nodes 28 | std::shared_ptr input; 29 | 30 | input = getInputNode(0); 31 | 32 | auto reduction_axes = getInputNode(1); 33 | auto reduce_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 34 | bool keep_dims = (reduce_dims > 0) ? true : false; 35 | 36 | std::shared_ptr outputNode; 37 | outputNode = std::make_shared(input, reduction_axes, keep_dims); 38 | 39 | return outputNode; 40 | } 41 | 42 | } // namespace nnhal 43 | } // namespace neuralnetworks 44 | } // namespace hardware 45 | } // namespace android 46 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Minimum.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Minimum" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Minimum::Minimum(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Minimum::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Mul.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Mul" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Mul::Mul(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Mul::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 22 | 23 | auto mulNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | auto outputNode = applyActivation(mulNode, activationFn); 27 | 28 | return outputNode; 29 | } 30 | 31 | } // namespace nnhal 32 | } // namespace neuralnetworks 33 | } // namespace hardware 34 | } // namespace android 35 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Neg.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Neg" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Neg::Neg(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Neg::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | auto outputNode = std::make_shared(input); 21 | 22 | return outputNode; 23 | } 24 | 25 | } // namespace nnhal 26 | } // namespace neuralnetworks 27 | } // namespace hardware 28 | } // namespace android 29 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/NotEqual.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "NotEqual" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | NotEqual::NotEqual(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr NotEqual::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | std::shared_ptr outputNode; 22 | 23 | outputNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/PRelu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "PRelu" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | PRelu::PRelu(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool PRelu::validate() { 15 | ALOGV("%s PASSED", __func__); 16 | 17 | const auto& baseDims = getInputOperandDimensions(0); 18 | const auto& alphaDims = getInputOperandDimensions(1); 19 | const auto& baseRank = baseDims.size(); 20 | const auto& alphaRank = alphaDims.size(); 21 | // TODO: openvino only supports broadcasting alpha rank/value to base rank/value. If alpha 22 | // rank/value is greater than base rank/value, base rank/value should be broadcasted to alpha 23 | // rank/value (which is not supported in openvino 2021.4) 24 | if (alphaRank > baseRank) return false; 25 | 26 | if (alphaRank == baseRank) { 27 | for (uint32_t i = 0; i < alphaRank; i++) { 28 | if (alphaDims[i] > baseDims[i]) return false; 29 | } 30 | } 31 | 32 | return true; 33 | } 34 | 35 | std::shared_ptr PRelu::createNode() { 36 | // Creating input nodes 37 | auto base = getInputNode(0); 38 | auto alpha = getInputNode(1); 39 | 40 | auto outputNode = std::make_shared(base, alpha); 41 | 42 | return outputNode; 43 | } 44 | 45 | } // namespace nnhal 46 | } // namespace neuralnetworks 47 | } // namespace hardware 48 | } // namespace android 49 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Pad.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Pad" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Pad::Pad(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Pad::validate() { 15 | // Check input rank 16 | const auto inputRank = getInputOperandDimensions(0).size(); 17 | if (inputRank > 4) return false; 18 | 19 | // TODO: Add support for low_rank 20 | if (inputRank < 2) return false; 21 | 22 | // TODO: Add Support for all_tensors_as_inputs 23 | const auto& padOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 24 | 25 | if (!sModelInfo->isOperandLifeTimeConst(padOperandIndex)) { 26 | ALOGE("%s Only Constant dimensions supported now", __func__); 27 | return false; 28 | } 29 | 30 | return true; 31 | } 32 | 33 | std::shared_ptr Pad::createNode() { 34 | // Creating input nodes 35 | auto inputNode = getInputNode(0); 36 | 37 | const auto& paddingsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 38 | // Fetch the 2D paddings as a 1D vector, and then split it into 2 39 | auto paddings_2d = sModelInfo->GetConstVecOperand(paddingsOperandIndex); 40 | auto half_size = paddings_2d.size() / 2; 41 | std::vector paddings_0(half_size); 42 | std::vector paddings_1(half_size); 43 | for (size_t i = 0; i < half_size; i++) { 44 | paddings_0[i] = paddings_2d[2 * i]; 45 | paddings_1[i] = paddings_2d[2 * i + 1]; 46 | } 47 | const auto pads_begin = createConstNode(ngraph::element::i32, {half_size}, paddings_0); 48 | const auto pads_end = createConstNode(ngraph::element::i32, {half_size}, paddings_1); 49 | 50 | auto outputNode = std::make_shared(inputNode, pads_begin, pads_end, 51 | ngraph::op::PadMode::CONSTANT); 52 | ALOGV("outputNode Shape Size : %lu", outputNode->get_shape().size()); 53 | 54 | return outputNode; 55 | } 56 | 57 | } // namespace nnhal 58 | } // namespace neuralnetworks 59 | } // namespace hardware 60 | } // namespace android 61 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/PadV2.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "PadV2" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | PadV2::PadV2(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool PadV2::validate() { 15 | // Check input rank 16 | const auto inputRank = getInputOperandDimensions(0).size(); 17 | if (inputRank > 4) return false; 18 | 19 | // TODO: Add support for low_rank 20 | if (inputRank < 2) return false; 21 | 22 | // TODO: Add Support for all_tensors_as_inputs 23 | const auto& padOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 24 | 25 | if (!sModelInfo->isOperandLifeTimeConst(padOperandIndex)) { 26 | ALOGE("%s Only Constant dimensions supported now", __func__); 27 | return false; 28 | } 29 | 30 | return true; 31 | } 32 | 33 | std::shared_ptr PadV2::createNode() { 34 | // Creating input nodes 35 | auto inputNode = getInputNode(0); 36 | std::shared_ptr pad_value; 37 | auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); 38 | 39 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 40 | auto pad_scalar_value = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 41 | pad_value = createConstNode(ngraph::element::f32, {}, convertToVector(pad_scalar_value)); 42 | } else if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) { 43 | auto pad_scalar_value = sModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 2); 44 | pad_value = createConstNode(ngraph::element::f16, {}, convertToVector(pad_scalar_value)); 45 | } else if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM) || 46 | checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) { 47 | auto pad_scalar_value = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 48 | pad_value = createConstNode(ngraph::element::i32, {}, convertToVector(pad_scalar_value)); 49 | 50 | // scale and zeropoint of pad value has to be same as in inputNode. so inputIndex is passed 51 | // as second parameter to DequantizeNode 52 | pad_value = DequantizeNode(pad_value, inputIndex, ngraph::element::f32); 53 | } 54 | 55 | const auto& paddingsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 56 | // Fetch the 2D paddings as a 1D vector, and then split it into 2 57 | auto paddings_2d = sModelInfo->GetConstVecOperand(paddingsOperandIndex); 58 | auto half_size = paddings_2d.size() / 2; 59 | std::vector paddings_0(half_size); 60 | std::vector paddings_1(half_size); 61 | for (unsigned long i = 0; i < half_size; i++) { 62 | paddings_0[i] = paddings_2d[2 * i]; 63 | paddings_1[i] = paddings_2d[2 * i + 1]; 64 | } 65 | const auto pads_begin = createConstNode(ngraph::element::i32, {half_size}, paddings_0); 66 | const auto pads_end = createConstNode(ngraph::element::i32, {half_size}, paddings_1); 67 | 68 | auto outputNode = std::make_shared( 69 | inputNode, pads_begin, pads_end, pad_value, ngraph::op::PadMode::CONSTANT); 70 | 71 | return outputNode; 72 | } 73 | 74 | } // namespace nnhal 75 | } // namespace neuralnetworks 76 | } // namespace hardware 77 | } // namespace android 78 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Pow.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Pow" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Pow::Pow(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Pow::createNode() { 15 | // Creating input nodes 16 | auto base = getInputNode(0); 17 | auto exponent = getInputNode(1); 18 | 19 | auto outputNode = std::make_shared(base, exponent, 20 | ngraph::op::AutoBroadcastType::NUMPY); 21 | 22 | return outputNode; 23 | } 24 | 25 | } // namespace nnhal 26 | } // namespace neuralnetworks 27 | } // namespace hardware 28 | } // namespace android 29 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Quantize.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Quantize" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Quantize::Quantize(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | void Quantize::connectOperationToGraph() { createNode(); } 15 | 16 | std::shared_ptr Quantize::createNode() { 17 | // Creating input nodes 18 | auto input = getInputNode(0); 19 | const auto& outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 20 | auto outputNode = QuantizeNode(input, outputIndex, ngraph::element::u8); 21 | 22 | mNgraphNodes->setOutputAtOperandIndex(outputIndex, outputNode); 23 | const auto op = sModelInfo->getOperand(outputIndex); 24 | if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { 25 | addResultNode(mDefaultOutputIndex, outputNode); 26 | } 27 | 28 | return nullptr; 29 | } 30 | 31 | } // namespace nnhal 32 | } // namespace neuralnetworks 33 | } // namespace hardware 34 | } // namespace android 35 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/RNN.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | // Helper funciton 3 | #include 4 | #undef LOG_TAG 5 | #define LOG_TAG "RNN" 6 | 7 | namespace android { 8 | namespace hardware { 9 | namespace neuralnetworks { 10 | namespace nnhal { 11 | 12 | RNN::RNN(int operationIndex) : OperationsBase(operationIndex) { 13 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 14 | } 15 | 16 | void RNN::connectOperationToGraph() { createNode(); } 17 | 18 | std::shared_ptr RNN::createNode() { 19 | // Creating input nodes 20 | std::shared_ptr input, W, R, bias, initial_hidden_state; 21 | 22 | input = getInputNode(0); 23 | W = getInputNode(1); 24 | R = getInputNode(2); 25 | bias = getInputNode(3); 26 | initial_hidden_state = getInputNode(4); 27 | 28 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 5); 29 | 30 | // inputs * input_weights 31 | auto input_W = std::make_shared(input, W, false, true); 32 | // state * recurrent_weights 33 | auto Ht_R = std::make_shared(initial_hidden_state, R, false, true); 34 | // (state * recurrent_weights) + bias 35 | auto add = std::make_shared(Ht_R, bias); 36 | // (inputs * input_weights) + (state * recurrent_weights) + bias 37 | auto i_t = std::make_shared(input_W, add); 38 | 39 | auto outputNode = applyActivation(i_t, activationFn); 40 | 41 | for (int i = 0; i < 2; i++) { 42 | auto outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, i); 43 | std::shared_ptr outNode; 44 | if (i == 1) { 45 | outNode = outputNode; 46 | } else { 47 | // TODO: Implement properly 48 | // Creating a dummy node with same size as outputNode, initialized to 0 49 | // and then multiplying with outputNode so that it gets connected to the graph 50 | outNode = createConstNode(outputNode->get_element_type(), outputNode->get_shape(), 51 | convertToVector(0)); 52 | outNode = std::make_shared(outNode, outputNode); 53 | } 54 | 55 | mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); 56 | ALOGD("%s Set Output index %d", __func__, outputIndex); 57 | const auto op = sModelInfo->getOperand(outputIndex); 58 | if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { 59 | addResultNode(outputIndex, outNode); 60 | ALOGD("%s Add result %d", __func__, outputIndex); 61 | } 62 | } 63 | 64 | return nullptr; 65 | } 66 | 67 | } // namespace nnhal 68 | } // namespace neuralnetworks 69 | } // namespace hardware 70 | } // namespace android 71 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ROIAlign.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ROIAlign" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ROIAlign::ROIAlign(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool ROIAlign::validate() { 15 | ALOGV("%s Entering", __func__); 16 | 17 | // Check Output type 18 | if (!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 19 | ALOGE("%s Output operand 0 is not of type FP32. Unsupported operation", __func__); 20 | return false; 21 | } 22 | 23 | if (isZeroSizedInput(0) || isZeroSizedInput(1) || isZeroSizedInput(2)) { 24 | ALOGE("%s Not handling zero sized input for dimension 0", __func__); 25 | return false; 26 | } 27 | 28 | if (!checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 29 | ALOGE("%s Input operand 0 is not of type FP32. Unsupported operation", __func__); 30 | return false; 31 | } 32 | if (!checkInputOperandType(1, (int32_t)OperandType::TENSOR_FLOAT32)) { 33 | ALOGE("%s Input operand 1 is not of type FP32. Unsupported operation", __func__); 34 | return false; 35 | } 36 | 37 | // TODO: support for different height_ratio and width_ratio 38 | // values 39 | auto height_ratio = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 5); 40 | auto width_ratio = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 6); 41 | if (height_ratio != width_ratio) { 42 | ALOGE( 43 | "%s: Ratio of Height and Ratio of Width from orginal image to feature map must be same " 44 | "for ROI Align. Got %f and %f", 45 | __func__, height_ratio, width_ratio); 46 | return false; 47 | } 48 | 49 | ALOGV("%s PASSED", __func__); 50 | return true; 51 | } 52 | 53 | std::shared_ptr ROIAlign::createNode() { 54 | ALOGV("%s Entering", __func__); 55 | 56 | bool useNchw = false; 57 | 58 | // Read inputs 59 | auto feat_maps = getInputNode(0); // 4D tensor 60 | auto rois = getInputNode(1); // 2D tensor 61 | auto batch_indices = getInputNode(2); // 1D tensor 62 | auto output_height = sModelInfo->ParseOperationInput( 63 | mNnapiOperationIndex, 3); // height of the output tensor 64 | auto output_width = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 65 | 4); // width of the output tensor 66 | auto height_ratio = sModelInfo->ParseOperationInput( 67 | mNnapiOperationIndex, 68 | 5); // ratio from the height of original image to the height of feature map. 69 | // auto width_ratio = sModelInfo->ParseOperationInput( 70 | // mNnapiOperationIndex, 71 | // 6); // ratio from the width of original image to the height of feature map. 72 | auto sampling_pts_h = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 7); 73 | // auto sampling_pts_w = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 8); 74 | auto layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 9); 75 | 76 | if (layout) useNchw = true; 77 | 78 | if (!useNchw) // No conversion needed if useNchw set 79 | feat_maps = transpose(NHWC_NCHW, feat_maps); 80 | 81 | float spatial_scale = 1.0 / (height_ratio); 82 | int sampling_ratio = sampling_pts_h; 83 | 84 | std::shared_ptr outputNode = std::make_shared( 85 | feat_maps, rois, batch_indices, output_height, output_width, sampling_ratio, spatial_scale, 86 | "avg"); 87 | 88 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 89 | 90 | ALOGV("%s PASSED", __func__); 91 | 92 | return outputNode; 93 | } 94 | 95 | } // namespace nnhal 96 | } // namespace neuralnetworks 97 | } // namespace hardware 98 | } // namespace android 99 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ROIPooling.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ROIPooling" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ROIPooling::ROIPooling(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool ROIPooling::validate() { 15 | ALOGV("%s Entering", __func__); 16 | 17 | // Check Output type 18 | if (!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 19 | ALOGE("%s Output operand 0 is not of type FP32. Unsupported operation", __func__); 20 | return false; 21 | } 22 | 23 | // Check Input Type 24 | if (!checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 25 | ALOGE("%s Input operand 0 is not of type FP32. Unsupported operation", __func__); 26 | return false; 27 | } 28 | if (!checkInputOperandType(1, (int32_t)OperandType::TENSOR_FLOAT32)) { 29 | ALOGE("%s Input operand 1 is not of type FP32. Unsupported operation", __func__); 30 | return false; 31 | } 32 | 33 | // TODO: support for different height_ratio and width_ratio 34 | // values 35 | auto height_ratio = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 5); 36 | auto width_ratio = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 6); 37 | if (height_ratio != width_ratio) { 38 | ALOGE( 39 | "%s: Ratio of Height and Ratio of Width from orginal image to feature map must be same " 40 | "for ROI Pooling. Got %f and %f", 41 | __func__, height_ratio, width_ratio); 42 | return false; 43 | } 44 | 45 | ALOGV("%s PASSED", __func__); 46 | return true; 47 | } 48 | 49 | std::shared_ptr ROIPooling::createNode() { 50 | ALOGV("%s Entering", __func__); 51 | 52 | bool useNchw = false; 53 | 54 | // Read inputs 55 | auto feat_maps = getInputNode(0); // 4D tensor 56 | auto output_height = sModelInfo->ParseOperationInput( 57 | mNnapiOperationIndex, 3); // height of the output tensor 58 | auto output_width = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 59 | 4); // width of the output tensor 60 | auto height_ratio = sModelInfo->ParseOperationInput( 61 | mNnapiOperationIndex, 62 | 5); // ratio from the height of original image to the height of feature map. 63 | auto layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 7); 64 | 65 | if (layout) useNchw = true; 66 | 67 | if (!useNchw) // No conversion needed if useNchw set 68 | feat_maps = transpose(NHWC_NCHW, feat_maps); 69 | 70 | auto output_size = ngraph::Shape{(size_t)output_height, (size_t)output_width}; 71 | float spatial_scale = 1.0 / (height_ratio); 72 | 73 | // Concat batch index of shape[num_rois] and rois shape[num_rois, 4] 74 | // to create 2-D Tensor of shape[num_rois, 5] => bi,x1,y1,x2,y2 75 | std::vector> inputs; 76 | auto axis = 1; 77 | // add bi node to inputs for concat 78 | const auto& biOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); 79 | auto bi_vec = sModelInfo->GetConstVecOperand(biOperandIndex); 80 | const auto bi_node = 81 | createConstNode(ngraph::element::f32, ngraph::Shape{bi_vec.size(), 1}, bi_vec); 82 | inputs.push_back(bi_node); 83 | // add rois node to inputs for concat 84 | auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 85 | auto inputOp = mNgraphNodes->getOperationOutput(inputIndex); 86 | inputs.push_back(inputOp); 87 | 88 | std::shared_ptr roiNode = std::make_shared(inputs, axis); 89 | ALOGI("%s Concatinated roi_node created", __func__); 90 | 91 | std::shared_ptr outputNode = std::make_shared( 92 | feat_maps, roiNode, output_size, spatial_scale); 93 | 94 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 95 | 96 | ALOGV("%s PASSED", __func__); 97 | 98 | return outputNode; 99 | } 100 | 101 | } // namespace nnhal 102 | } // namespace neuralnetworks 103 | } // namespace hardware 104 | } // namespace android 105 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/RSQRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "RSQRT" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | RSQRT::RSQRT(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr RSQRT::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto sqrtNode = std::make_shared(input); 18 | 19 | std::shared_ptr constNode; 20 | 21 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) 22 | constNode = createConstNode(ngraph::element::f16, {1}, convertToVector(1.0)); 23 | else 24 | constNode = createConstNode(ngraph::element::f32, {1}, convertToVector(1.0)); 25 | 26 | auto outputNode = std::make_shared(constNode, sqrtNode); 27 | 28 | return outputNode; 29 | } 30 | 31 | } // namespace nnhal 32 | } // namespace neuralnetworks 33 | } // namespace hardware 34 | } // namespace android 35 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceAll.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceAll" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceAll::ReduceAll(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceAll::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto reduction_axes = getInputNode(1); 18 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 19 | 20 | auto outputNode = 21 | std::make_shared(input, reduction_axes, keep_dims); 22 | 23 | return outputNode; 24 | } 25 | 26 | } // namespace nnhal 27 | } // namespace neuralnetworks 28 | } // namespace hardware 29 | } // namespace android 30 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceAny.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceAny" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceAny::ReduceAny(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceAny::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto reduction_axes = getInputNode(1); 18 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 19 | 20 | auto outputNode = 21 | std::make_shared(input, reduction_axes, keep_dims); 22 | 23 | return outputNode; 24 | } 25 | 26 | } // namespace nnhal 27 | } // namespace neuralnetworks 28 | } // namespace hardware 29 | } // namespace android 30 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceMax.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceMax" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceMax::ReduceMax(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceMax::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto reduction_axes = getInputNode(1); 18 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 19 | 20 | std::shared_ptr outputNode; 21 | outputNode = std::make_shared(input, reduction_axes, keep_dims); 22 | 23 | return outputNode; 24 | } 25 | 26 | } // namespace nnhal 27 | } // namespace neuralnetworks 28 | } // namespace hardware 29 | } // namespace android 30 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceMin.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceMin" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceMin::ReduceMin(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceMin::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | auto reduction_axes = getInputNode(1); 21 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 22 | 23 | std::shared_ptr outputNode; 24 | outputNode = std::make_shared(input, reduction_axes, keep_dims); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceProd.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceProd" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceProd::ReduceProd(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceProd::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto reduction_axes = getInputNode(1); 18 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 19 | 20 | auto outputNode = 21 | std::make_shared(input, reduction_axes, keep_dims); 22 | 23 | return outputNode; 24 | } 25 | 26 | } // namespace nnhal 27 | } // namespace neuralnetworks 28 | } // namespace hardware 29 | } // namespace android 30 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/ReduceSum.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "ReduceSum" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | ReduceSum::ReduceSum(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr ReduceSum::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | auto reduction_axes = getInputNode(1); 18 | auto keep_dims = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 19 | 20 | auto outputNode = std::make_shared(input, reduction_axes, keep_dims); 21 | 22 | return outputNode; 23 | } 24 | 25 | } // namespace nnhal 26 | } // namespace neuralnetworks 27 | } // namespace hardware 28 | } // namespace android 29 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Relu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Relu" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Relu::Relu(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Relu::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr outputNode; 21 | 22 | outputNode = std::make_shared(input); 23 | 24 | return outputNode; 25 | } 26 | 27 | } // namespace nnhal 28 | } // namespace neuralnetworks 29 | } // namespace hardware 30 | } // namespace android 31 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Relu1.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Relu1" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Relu1::Relu1(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Relu1::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr outputNode; 21 | 22 | outputNode = std::make_shared(input, -1, 1); 23 | 24 | return outputNode; 25 | } 26 | 27 | } // namespace nnhal 28 | } // namespace neuralnetworks 29 | } // namespace hardware 30 | } // namespace android 31 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Relu6.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Relu6" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Relu6::Relu6(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Relu6::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr outputNode; 21 | 22 | outputNode = std::make_shared(input, 0, 6); 23 | 24 | return outputNode; 25 | } 26 | 27 | } // namespace nnhal 28 | } // namespace neuralnetworks 29 | } // namespace hardware 30 | } // namespace android 31 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Reshape.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Reshape" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Reshape::Reshape(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Reshape::validate() { 15 | const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 16 | if (!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex) || !isValidInputTensor(1)) { 17 | // TODO: Support CPU_reshape_all_tensors_as_inputs 18 | ALOGE("%s Only Constant non-zero dimensions supported now", __func__); 19 | return false; 20 | } 21 | ALOGV("%s PASSED", __func__); 22 | return true; 23 | } 24 | 25 | std::shared_ptr Reshape::createNode() { 26 | const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 27 | auto outDims = sModelInfo->GetConstVecOperand(dimsOperandIndex); 28 | VLOGDIMS(L3, outDims, "Reshape::createNode dims"); 29 | std::shared_ptr inputOp; 30 | inputOp = getInputNode(0); 31 | 32 | const auto& inDims = getInputOperandDimensions(0); 33 | auto numInputElements = 1; 34 | int strechDim = -1; 35 | auto numOutputElements = 1; 36 | 37 | for (size_t i = 0; i < inDims.size(); i++) numInputElements *= inDims[i]; 38 | 39 | for (size_t i = 0; i < outDims.size(); i++) { 40 | if ((int)outDims[i] < 0) { 41 | strechDim = i; 42 | continue; 43 | } 44 | numOutputElements *= outDims[i]; 45 | } 46 | if (strechDim >= 0) { 47 | auto strechValue = numInputElements / numOutputElements; 48 | outDims[strechDim] = (uint32_t)strechValue; 49 | numOutputElements *= strechValue; 50 | 51 | VLOGDIMS(L3, outDims, "Reshape::outDims with stretch dimension introduced"); 52 | } 53 | 54 | if (numInputElements != numOutputElements) { 55 | ALOGE("numInputElements = %d is not equal to numOutputElements = %d", numInputElements, 56 | numOutputElements); 57 | } 58 | 59 | auto shapeNode = std::make_shared( 60 | ngraph::element::i32, ngraph::Shape{outDims.size()}, outDims.data()); 61 | 62 | std::shared_ptr outputNode = 63 | std::make_shared(inputOp, shapeNode, true); 64 | 65 | return outputNode; 66 | } 67 | 68 | } // namespace nnhal 69 | } // namespace neuralnetworks 70 | } // namespace hardware 71 | } // namespace android 72 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/SQRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "SQRT" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | SQRT::SQRT(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr SQRT::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Select.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Select" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Select::Select(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Select::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2, input3; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | input3 = getInputNode(2); 21 | 22 | std::shared_ptr outputNode; 23 | 24 | outputNode = std::make_shared(input1, input2, input3); 25 | 26 | return outputNode; 27 | } 28 | 29 | } // namespace nnhal 30 | } // namespace neuralnetworks 31 | } // namespace hardware 32 | } // namespace android 33 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Sin.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Sin" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Sin::Sin(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Sin::createNode() { 15 | // Creating input nodes 16 | auto input = getInputNode(0); 17 | 18 | auto outputNode = std::make_shared(input); 19 | 20 | return outputNode; 21 | } 22 | 23 | } // namespace nnhal 24 | } // namespace neuralnetworks 25 | } // namespace hardware 26 | } // namespace android 27 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Softmax.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Softmax" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Softmax::Softmax(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Softmax::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input, outputNode; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr betaNode; 21 | 22 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) { 23 | auto beta = sModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 1); 24 | betaNode = createConstNode(ngraph::element::f16, {1}, convertToVector(beta)); 25 | } else { 26 | auto beta = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 27 | betaNode = createConstNode(ngraph::element::f32, {1}, convertToVector(beta)); 28 | } 29 | int axis = -1; 30 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 31 | if (inputsSize == 3) axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 32 | 33 | const auto axisNode = createConstNode(ngraph::element::i32, {1}, convertToVector(axis)); 34 | 35 | // max(input[batch, :] 36 | auto max = std::make_shared(input, axisNode, true); 37 | // input[batch, i] - max(input[batch, :]) 38 | auto sub = std::make_shared(input, max); 39 | // (input[batch, i] - max(input[batch, :])) * beta 40 | auto mul = std::make_shared(sub, betaNode); 41 | // exp((input[batch, i] - max(input[batch, :])) * beta) 42 | auto exp = std::make_shared(mul); 43 | // sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 44 | auto sum = std::make_shared(exp, axisNode, true); 45 | // exp((input[batch, i] - max(input[batch, :])) * beta) / sum_{k}{exp((input[batch, k] - 46 | // max(input[batch, :])) * beta)} 47 | outputNode = std::make_shared(exp, sum); 48 | 49 | return outputNode; 50 | } 51 | 52 | } // namespace nnhal 53 | } // namespace neuralnetworks 54 | } // namespace hardware 55 | } // namespace android 56 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/SpaceToBatch.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "SpaceToBatch" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | SpaceToBatch::SpaceToBatch(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool SpaceToBatch::validate() { 15 | // Check input rank 16 | const auto inputRank = getInputOperandDimensions(0).size(); 17 | 18 | if (inputRank != 4) return false; 19 | 20 | auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 21 | // TODO: Add Support for all_tensors_as_inputs 22 | if (!sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex)) { 23 | ALOGE("%s Only Constant dimensions supported now", __func__); 24 | return false; 25 | } 26 | 27 | auto pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); 28 | // TODO: Add Support for all_tensors_as_inputs 29 | if (!sModelInfo->isOperandLifeTimeConst(pad_OperandIndex)) { 30 | ALOGE("%s Only Constant dimensions supported now", __func__); 31 | return false; 32 | } 33 | 34 | ALOGV("%s PASSED", __func__); 35 | return true; 36 | } 37 | 38 | std::shared_ptr SpaceToBatch::createNode() { 39 | int32_t layout = 0; 40 | bool useNchw = false; 41 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 42 | 43 | auto inputNode = getInputNode(0); 44 | 45 | auto& inDims = getInputOperandDimensions(0); 46 | 47 | const auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 48 | auto block_shape = sModelInfo->GetConstVecOperand(block_shape_OperandIndex); 49 | 50 | // Compensation for the shape to be same as the size of data input shape 51 | block_shape.insert(block_shape.begin(), 1); 52 | block_shape.insert(block_shape.begin(), 1); 53 | 54 | const auto& pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); 55 | // Fetch the 2D pad as a 1D vector, and then split it into 2 56 | auto pad_2d = sModelInfo->GetConstVecOperand(pad_OperandIndex); 57 | auto half_size = pad_2d.size() / 2; 58 | std::vector pad_0(half_size); 59 | std::vector pad_1(half_size); 60 | for (size_t i = 0; i < half_size; i++) { 61 | pad_0[i] = pad_2d[2 * i]; 62 | pad_1[i] = pad_2d[2 * i + 1]; 63 | } 64 | 65 | // Compensation for the shape to be same as the size of data input shape 66 | pad_0.insert(pad_0.begin(), 0); 67 | pad_0.insert(pad_0.begin(), 0); 68 | 69 | // Compensation for the shape to be same as the size of data input shape 70 | pad_1.insert(pad_1.begin(), 0); 71 | pad_1.insert(pad_1.begin(), 0); 72 | 73 | const auto block_shape_node = 74 | createConstNode(ngraph::element::i64, {inDims.size()}, block_shape); 75 | const auto pad_begin = createConstNode(ngraph::element::i64, {inDims.size()}, pad_0); 76 | const auto pad_end = createConstNode(ngraph::element::i64, {inDims.size()}, pad_1); 77 | 78 | if (inputsSize == 4) layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 3); 79 | if (layout) useNchw = true; 80 | 81 | if (!useNchw) // No conversion needed if useNchw set 82 | inputNode = transpose(NHWC_NCHW, inputNode); 83 | 84 | std::shared_ptr outputNode = std::make_shared( 85 | inputNode, block_shape_node, pad_begin, pad_end); 86 | 87 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 88 | 89 | return outputNode; 90 | } 91 | 92 | } // namespace nnhal 93 | } // namespace neuralnetworks 94 | } // namespace hardware 95 | } // namespace android 96 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/SpaceToDepth.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "SpaceToDepth" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | SpaceToDepth::SpaceToDepth(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr SpaceToDepth::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | bool useNchw = false; 18 | 19 | const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); 20 | 21 | if (inputsSize == 3) { 22 | auto layout = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 23 | if (layout) useNchw = true; 24 | } 25 | 26 | input = getInputNode(0); 27 | auto block_size = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 28 | 29 | if (!useNchw) // No conversion needed if useNchw set 30 | input = transpose(NHWC_NCHW, input); 31 | 32 | std::shared_ptr outputNode; 33 | 34 | outputNode = std::make_shared( 35 | input, ngraph::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, block_size); 36 | 37 | if (!useNchw) outputNode = transpose(NCHW_NHWC, outputNode); 38 | 39 | return outputNode; 40 | } 41 | 42 | } // namespace nnhal 43 | } // namespace neuralnetworks 44 | } // namespace hardware 45 | } // namespace android 46 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Split.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Split" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Split::Split(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | void Split::connectOperationToGraph() { createNode(); } 15 | 16 | std::shared_ptr Split::createNode() { 17 | // Creating input nodes 18 | std::shared_ptr splitNode; 19 | 20 | splitNode = getInputNode(0, false); 21 | 22 | auto axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 23 | auto axisNode = createConstNode(ngraph::element::i32, {}, convertToVector(axis)); 24 | auto numSplits = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 25 | 26 | auto outputNode = 27 | std::make_shared(splitNode, axisNode, numSplits)->outputs(); 28 | 29 | for (size_t i = 0; i < numSplits; i++) { 30 | auto outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, i); 31 | // TODO: remove this dummy convert 32 | std::shared_ptr outNode; 33 | if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32)) { 34 | outNode = 35 | std::make_shared(outputNode[i], ngraph::element::f32); 36 | } else if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT16)) { 37 | outNode = 38 | std::make_shared(outputNode[i], ngraph::element::f16); 39 | } else if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_INT32)) { 40 | outNode = 41 | std::make_shared(outputNode[i], ngraph::element::i32); 42 | } else if (checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM) || 43 | checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) { 44 | outNode = std::make_shared(outputNode[i], ngraph::element::u8); 45 | } 46 | 47 | mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); 48 | const auto op = sModelInfo->getOperand(outputIndex); 49 | if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { 50 | addResultNode(outputIndex, outNode); 51 | } 52 | } 53 | 54 | return nullptr; 55 | } 56 | 57 | } // namespace nnhal 58 | } // namespace neuralnetworks 59 | } // namespace hardware 60 | } // namespace android 61 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Squeeze.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Squeeze" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Squeeze::Squeeze(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Squeeze::validate() { 15 | // TODO: Add Support for all_tensors_as_inputs 16 | const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 17 | 18 | // TODO: Support OmittedInput. 19 | // The empty 2nd argument in Squeeze op causes dynamic output 20 | // To add support, the dims will have to be calculated statically 21 | if (sModelInfo->isOmittedInput(mNnapiOperationIndex, 1) || 22 | !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { 23 | ALOGE("%s Only Constant dimensions supported now", __func__); 24 | return false; 25 | } 26 | 27 | return true; 28 | } 29 | 30 | std::shared_ptr Squeeze::createNode() { 31 | // Creating input nodes 32 | std::shared_ptr input; 33 | 34 | input = getInputNode(0); 35 | 36 | std::shared_ptr dims; 37 | 38 | if (!sModelInfo->isOmittedInput(mNnapiOperationIndex, 1)) 39 | dims = getInputNode(1); 40 | else 41 | dims = createConstNode(ngraph::element::i32, {0}, std::vector{}); 42 | 43 | std::shared_ptr outputNode; 44 | 45 | outputNode = std::make_shared(input, dims); 46 | 47 | return outputNode; 48 | } 49 | 50 | } // namespace nnhal 51 | } // namespace neuralnetworks 52 | } // namespace hardware 53 | } // namespace android 54 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/StridedSlice.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "StridedSlice" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | StridedSlice::StridedSlice(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool StridedSlice::validate() { 15 | // Check input rank 16 | const int64_t inputRank = getInputOperandDimensions(0).size(); 17 | if (inputRank > 4) { 18 | ALOGE("%s Invalid input dimensions size!", __func__); 19 | return false; 20 | } 21 | 22 | // TODO: Add Support for all_tensors_as_inputs 23 | auto& begins_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 24 | if (!sModelInfo->isOperandLifeTimeConst(begins_OperandIndex)) { 25 | ALOGE("%s Only Constant dimensions supported now", __func__); 26 | return false; 27 | } 28 | 29 | auto ends_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); 30 | if (!sModelInfo->isOperandLifeTimeConst(ends_OperandIndex)) { 31 | ALOGE("%s Only Constant dimensions supported now", __func__); 32 | return false; 33 | } 34 | 35 | auto& strides_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 3); 36 | if (!sModelInfo->isOperandLifeTimeConst(strides_OperandIndex)) { 37 | ALOGE("%s Only Constant dimensions supported now", __func__); 38 | return false; 39 | } 40 | 41 | auto shrink_axis_mask = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 6); 42 | std::vector shrink_axis_mask_bits = getMaskBits(shrink_axis_mask, inputRank); 43 | 44 | for (int i = 0; i < inputRank; i++) { 45 | if (shrink_axis_mask_bits[i]) { 46 | // Check for negative stride when shrink axis bit is set 47 | auto stridesVector = sModelInfo->GetConstVecOperand(strides_OperandIndex); 48 | if (stridesVector[i] < 0) { 49 | ALOGE("%s Negative stride value when shrink axis bit set is not supported", 50 | __func__); 51 | return false; 52 | } 53 | 54 | // check for slice size larger than expected output 55 | auto beginVector = sModelInfo->GetConstVecOperand(begins_OperandIndex); 56 | auto endVector = sModelInfo->GetConstVecOperand(ends_OperandIndex); 57 | if (((beginVector[i] - endVector[i]) > 1) || ((beginVector[i] - endVector[i]) < -1)) { 58 | ALOGE("%s Trying to access invalid slice size when shrink axis bit is set", 59 | __func__); 60 | return false; 61 | } 62 | } 63 | } 64 | 65 | return true; 66 | } 67 | 68 | std::shared_ptr StridedSlice::createNode() { 69 | // Creating input nodes 70 | std::shared_ptr data = getInputNode(0); 71 | std::shared_ptr begin = getInputNode(1); 72 | std::shared_ptr end = getInputNode(2); 73 | std::shared_ptr strides = getInputNode(3); 74 | 75 | auto begin_mask = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 4); 76 | auto end_mask = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 5); 77 | auto shrink_axis_mask = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 6); 78 | 79 | const auto data_dim_size = getInputOperandDimensions(0).size(); 80 | std::vector begin_mask_bits, end_mask_bits, shrink_axis_mask_bits; 81 | 82 | begin_mask_bits = getMaskBits(begin_mask, data_dim_size); 83 | end_mask_bits = getMaskBits(end_mask, data_dim_size); 84 | shrink_axis_mask_bits = getMaskBits(shrink_axis_mask, data_dim_size); 85 | const std::vector new_axis_mask = std::vector{}; 86 | const std::vector ellipsis_mask = std::vector{}; 87 | 88 | std::shared_ptr outputNode = std::make_shared( 89 | data, begin, end, strides, begin_mask_bits, end_mask_bits, new_axis_mask, 90 | shrink_axis_mask_bits, ellipsis_mask); 91 | 92 | return outputNode; 93 | } 94 | 95 | std::vector StridedSlice::getMaskBits(int32_t maskValue, size_t vec_size) { 96 | std::vector mask_bits(vec_size); 97 | int i = 0; 98 | while (maskValue != 0) { 99 | mask_bits[i] = (maskValue % 2) == 0 ? 0 : 1; 100 | maskValue /= 2; 101 | i++; 102 | } 103 | return mask_bits; 104 | } 105 | 106 | } // namespace nnhal 107 | } // namespace neuralnetworks 108 | } // namespace hardware 109 | } // namespace android 110 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Sub.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Sub" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Sub::Sub(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Sub::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input1, input2; 17 | 18 | input1 = getInputNode(0); 19 | input2 = getInputNode(1); 20 | 21 | auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); 22 | 23 | auto subNode = std::make_shared(input1, input2, 24 | ngraph::op::AutoBroadcastType::NUMPY); 25 | 26 | auto outputNode = applyActivation(subNode, activationFn); 27 | 28 | return outputNode; 29 | } 30 | 31 | } // namespace nnhal 32 | } // namespace neuralnetworks 33 | } // namespace hardware 34 | } // namespace android 35 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Tanh.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Tanh" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Tanh::Tanh(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | std::shared_ptr Tanh::createNode() { 15 | // Creating input nodes 16 | std::shared_ptr input; 17 | 18 | input = getInputNode(0); 19 | 20 | std::shared_ptr outputNode; 21 | 22 | outputNode = std::make_shared(input); 23 | 24 | return outputNode; 25 | } 26 | 27 | } // namespace nnhal 28 | } // namespace neuralnetworks 29 | } // namespace hardware 30 | } // namespace android 31 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/TopkV2.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "TopkV2" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | TopkV2::TopkV2(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | void TopkV2::connectOperationToGraph() { createNode(); } 15 | 16 | std::shared_ptr TopkV2::createNode() { 17 | // Creating input nodes 18 | std::shared_ptr input; 19 | 20 | input = getInputNode(0); 21 | 22 | auto k = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); 23 | int axis = -1; // to find largest entries for the last dimension. 24 | 25 | auto k_node = createConstNode(ngraph::element::i32, {}, convertToVector(k)); 26 | const auto topk = 27 | std::make_shared(input, k_node, axis, ngraph::opset3::TopK::Mode::MAX, 28 | ngraph::opset3::TopK::SortType::SORT_VALUES); 29 | 30 | auto outputNode = topk->outputs(); 31 | 32 | for (int i = 0; i < 2; i++) { 33 | auto outputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, i); 34 | // TODO: remove this dummy convert 35 | std::shared_ptr outNode; 36 | if (checkOutputOperandType(i, (int32_t)OperandType::TENSOR_FLOAT32)) { 37 | outNode = 38 | std::make_shared(outputNode[i], ngraph::element::f32); 39 | } else if (checkOutputOperandType(i, (int32_t)OperandType::TENSOR_FLOAT16)) { 40 | outNode = 41 | std::make_shared(outputNode[i], ngraph::element::f16); 42 | } else if (checkOutputOperandType(i, (int32_t)OperandType::TENSOR_INT32)) { 43 | outNode = 44 | std::make_shared(outputNode[i], ngraph::element::i32); 45 | } else if (checkOutputOperandType(i, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) { 46 | outNode = 47 | std::make_shared(outputNode[i], ngraph::element::f32); 48 | outNode = QuantizeNode(outNode, outputIndex, ngraph::element::u8); 49 | } else if (checkOutputOperandType(i, (int32_t)OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) { 50 | outNode = 51 | std::make_shared(outputNode[i], ngraph::element::f32); 52 | outNode = QuantizeNode(outNode, outputIndex, ngraph::element::i8); 53 | } 54 | 55 | mNgraphNodes->setOutputAtOperandIndex(outputIndex, outNode); 56 | ALOGD("%s Set Output index %d", __func__, outputIndex); 57 | const auto op = sModelInfo->getOperand(outputIndex); 58 | if (op.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { 59 | addResultNode(outputIndex, outNode); 60 | ALOGD("%s Add result %d", __func__, outputIndex); 61 | } 62 | } 63 | return nullptr; 64 | } 65 | 66 | } // namespace nnhal 67 | } // namespace neuralnetworks 68 | } // namespace hardware 69 | } // namespace android 70 | -------------------------------------------------------------------------------- /ngraph_creator/operations/src/Transpose.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "Transpose" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | 10 | Transpose::Transpose(int operationIndex) : OperationsBase(operationIndex) { 11 | mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); 12 | } 13 | 14 | bool Transpose::validate() { 15 | // TODO: Add Support for all_tensors_as_inputs 16 | const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); 17 | const auto& dims = getInputOperandDimensions(1); 18 | if (!dims.empty() && dims[0] != 0 && !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { 19 | ALOGE("%s Only Constant dimensions supported now", __func__); 20 | return false; 21 | } 22 | 23 | return true; 24 | } 25 | 26 | std::shared_ptr Transpose::createNode() { 27 | // Creating input nodes 28 | std::shared_ptr input; 29 | 30 | input = getInputNode(0); 31 | 32 | std::shared_ptr order; 33 | 34 | const auto& dims = getInputOperandDimensions(1); 35 | if (!dims.empty() && dims[0] != 0) { 36 | order = getInputNode(1); 37 | } else { 38 | order = createConstNode(ngraph::element::i32, {0}, convertToVector(0)); 39 | } 40 | 41 | std::shared_ptr outputNode; 42 | 43 | outputNode = std::make_shared(input, order); 44 | 45 | return outputNode; 46 | } 47 | 48 | } // namespace nnhal 49 | } // namespace neuralnetworks 50 | } // namespace hardware 51 | } // namespace android 52 | -------------------------------------------------------------------------------- /ngraph_creator/src/NgraphNodes.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #undef LOG_TAG 3 | #define LOG_TAG "NgraphNodes" 4 | 5 | namespace android { 6 | namespace hardware { 7 | namespace neuralnetworks { 8 | namespace nnhal { 9 | NgraphNodes::NgraphNodes(size_t operandsSize, size_t resultsSize) { 10 | mOutputAtOperandIndex.resize(operandsSize); 11 | mForcedNchw.assign(operandsSize, false); 12 | mResultNodes.reserve(resultsSize); 13 | ALOGV("%s Constructed operandsSize %zu, resultsSize %zu", __func__, operandsSize, resultsSize); 14 | } 15 | 16 | NgraphNodes::~NgraphNodes() { ALOGV("%s Destructed", __func__); } 17 | 18 | void NgraphNodes::addInputParam(std::shared_ptr inParam) { 19 | mInputParams.push_back(inParam); 20 | } 21 | void NgraphNodes::setOutputAtOperandIndex(size_t index, ngraph::Output output) { 22 | ALOGV("%s index %zu", __func__, index); 23 | mOutputAtOperandIndex[index] = output; 24 | } 25 | ngraph::Output NgraphNodes::getOperationOutput(size_t index) { 26 | return mOutputAtOperandIndex[index]; 27 | } 28 | 29 | void NgraphNodes::setResultNode(size_t outputIndex, std::shared_ptr resultNode) { 30 | ALOGD("setResultNode %zu", outputIndex); 31 | mResultNodes.push_back(resultNode); 32 | } 33 | 34 | const std::string& NgraphNodes::getNodeName(size_t index) { 35 | if (mNodeNames.find(index) == mNodeNames.end()) { 36 | mNodeNames[index] = mOutputAtOperandIndex[index].get_node_shared_ptr()->get_name(); 37 | ALOGD("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); 38 | } 39 | ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); 40 | return mNodeNames[index]; 41 | } 42 | // remove null input node parameter 43 | void NgraphNodes::removeInputParameter(std::string name, size_t index) { 44 | for (size_t i = 0; i < mInputParams.size(); i++) { 45 | if (name.compare(mInputParams[i]->get_name()) == 0) { 46 | mInputParams.erase(mInputParams.begin() + i); 47 | setInvalidNode(index); 48 | } 49 | } 50 | } 51 | 52 | std::shared_ptr NgraphNodes::generateGraph() { 53 | return std::make_shared(mResultNodes, mInputParams); 54 | } 55 | 56 | void NgraphNodes::setInvalidNode(size_t index) { mNodeNames[index] = ""; } 57 | 58 | } // namespace nnhal 59 | } // namespace neuralnetworks 60 | } // namespace hardware 61 | } // namespace android 62 | -------------------------------------------------------------------------------- /service.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 The Android Open Source Project 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include 18 | #include "Driver.h" 19 | #define MAX_LENGTH (255) 20 | 21 | #if __ANDROID__ 22 | #include 23 | #include 24 | 25 | #undef LOG_TAG 26 | #define LOG_TAG "neuralnetworks-hal-service" 27 | 28 | using android::hardware::configureRpcThreadpool; 29 | using android::hardware::joinRpcThreadpool; 30 | using android::hardware::neuralnetworks::nnhal::Driver; 31 | 32 | int main(int argc, char* argv[]) { 33 | if (argc > 2 && argv[2] != NULL && strnlen(argv[2], MAX_LENGTH) > 0) { 34 | if (strcmp(argv[1], "-D") != 0) return 0; 35 | const char* deviceType = argv[2]; 36 | android::sp device; 37 | 38 | if (strncmp(deviceType, "GNA", 3) == 0) 39 | device = new Driver(android::hardware::neuralnetworks::nnhal::IntelDeviceType::GNA); 40 | else if (strncmp(deviceType, "VPU", 3) == 0) 41 | device = new Driver(android::hardware::neuralnetworks::nnhal::IntelDeviceType::VPU); 42 | else if (strncmp(deviceType, "GPU", 3) == 0) 43 | device = new Driver(android::hardware::neuralnetworks::nnhal::IntelDeviceType::GPU); 44 | else 45 | device = new Driver(android::hardware::neuralnetworks::nnhal::IntelDeviceType::CPU); 46 | 47 | ALOGD("NN-HAL-1.3(%s) is ready.", deviceType); 48 | configureRpcThreadpool(4, true); 49 | android::status_t status = device->registerAsService(deviceType); 50 | LOG_ALWAYS_FATAL_IF(status != android::OK, "Error while registering as service for %s: %d", 51 | deviceType, status); 52 | joinRpcThreadpool(); 53 | } 54 | 55 | return 0; 56 | } 57 | #else 58 | // This registers the SampleDriverFull into the DeviceManager. 59 | namespace android { 60 | namespace hardware { 61 | namespace neuralnetworks { 62 | 63 | ::android::sp V1_0::IDevice::getService(const std::string& serviceName, bool dummy) { 64 | ALOGD("Initializaing the Intel NNHAL driver. Service name: %s", serviceName.c_str()); 65 | return new nnhal::Driver(nnhal::IntelDeviceType::CPU); 66 | } 67 | 68 | } // namespace neuralnetworks 69 | } // namespace hardware 70 | } // namespace android 71 | #endif 72 | --------------------------------------------------------------------------------