├── README.md ├── dlinfer.cpp ├── dlinfer.h ├── dlinfer.swigcxx ├── examples └── basic_classification │ └── main.go └── model.go /README.md: -------------------------------------------------------------------------------- 1 | # A "deep learning" inference engine for Go 2 | 3 | This project intends to make trained models, such as AlexNet, GoogleNet, etc., accessible within Go. It utilizes the functionality of [Intel's inference engine](https://software.intel.com/en-us/deep-learning-sdk) (part of the deep learning SDK) to interact with trained and optimized neural networks. 4 | 5 | _Note_: This is work in progress. 6 | 7 | # Dependencies 8 | 9 | - [Go 1.8+](https://golang.org/) 10 | - Ubuntu 14.04 11 | - [Intel's Deep Learning SDK Deployment Tools](https://software.intel.com/en-us/deep-learning-sdk) 12 | - [swig 3.0.6+](http://www.swig.org/) 13 | 14 | # Use 15 | 16 | - get the dlinfer package: 17 | 18 | ``` 19 | go get github.com/gopherds/dlinfer 20 | ``` 21 | 22 | - declare the following environmental variable: 23 | 24 | ``` 25 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/bin/intel64/lib:/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/lib/intel64 26 | ``` 27 | 28 | - build/install your Go progams as you normally would with `go build` and `go install`. See [here](examples/basic_classification/main.go) for an example. 29 | -------------------------------------------------------------------------------- /dlinfer.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | // Copyright (c) 2016 Intel Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | */ 16 | 17 | /** 18 | * \brief Implementations of methods for working with Inference Engine API 19 | * \file InferenceEngineConfigurator.cpp 20 | * \example inference_engine_classification_sample/core/InferenceEngineConfigurator.cpp 21 | */ 22 | #include "dlinfer.h" 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | using namespace InferenceEngine; 37 | 38 | static std::string fileNameNoExt(const std::string &filepath) { 39 | auto pos = filepath.rfind('.'); 40 | if (pos == std::string::npos) return filepath; 41 | return filepath.substr(0, pos); 42 | } 43 | 44 | // trim from both ends (in place) 45 | inline std::string &trim(std::string &s) { 46 | s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); 47 | s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); 48 | } 49 | 50 | static inline std::string stringFormat(const char *msg, ...) { 51 | va_list va; 52 | va_start(va, msg); 53 | char buffer[65536]; 54 | 55 | vsnprintf(buffer, sizeof(buffer), msg, va); 56 | va_end(va); 57 | return buffer; 58 | } 59 | 60 | 61 | InferenceEngineConfigurator::InferenceEngineConfigurator(const std::string &modelFile, 62 | const std::vector &pluginPath, 63 | const std::string &pluginName, const std::string &labelFile) 64 | : _plugin(selectPlugin(pluginPath, pluginName)) /* connect to plugin */, imageLoaded(false) { 65 | // Create network reader and load it from file 66 | network.ReadNetwork(modelFile); 67 | if (!network.isParseSuccess()) THROW_IE_EXCEPTION << "cannot load a failed Model"; 68 | _plugin->Unload(); 69 | 70 | // Get file names for files with weights and labels 71 | std::string binFileName = fileNameNoExt(modelFile) + ".bin"; 72 | network.ReadWeights(binFileName.c_str()); 73 | 74 | std::string labelFileName = fileNameNoExt(modelFile) + ".labels"; 75 | 76 | // Change path to labels file if necessary 77 | if (!labelFile.empty()) { 78 | labelFileName = labelFile; 79 | } 80 | 81 | // Try to read labels file 82 | readLabels(labelFileName); 83 | } 84 | 85 | /* 86 | * Method reads labels file 87 | * @param fileName - the file path 88 | * @return true if all success else false 89 | */ 90 | bool InferenceEngineConfigurator::readLabels(const std::string &fileName) { 91 | _classes.clear(); 92 | 93 | std::ifstream inputFile; 94 | inputFile.open(fileName, std::ios::in); 95 | if (!inputFile.is_open()) 96 | return false; 97 | 98 | std::string strLine; 99 | while (std::getline(inputFile, strLine)) { 100 | trim(strLine); 101 | _classes.push_back(strLine); 102 | } 103 | 104 | return true; 105 | } 106 | 107 | void InferenceEngineConfigurator::loadImages(const std::string &image) { 108 | std::vector imageVector; 109 | imageVector.push_back(image); 110 | loadImages(imageVector); 111 | } 112 | 113 | void InferenceEngineConfigurator::loadImages(const std::vector &images) { 114 | InferenceEngine::SizeVector inputDims; 115 | network.getInputDimentions(inputDims); 116 | size_t batchSize = inputDims.at(inputDims.size() - 1); 117 | inputDims.at(inputDims.size() - 1) = 1; 118 | 119 | int inputNetworkSize = std::accumulate(inputDims.begin(), inputDims.end(), 1, std::multiplies()); 120 | 121 | if (!inputDims.size()) { 122 | THROW_IE_EXCEPTION << "Error: Incorrect network input dimensions!"; 123 | } 124 | 125 | std::vector> readImages; 126 | 127 | for (auto i = 0; i < images.size(); i++) { 128 | FormatReader::ReaderPtr reader(images.at(i).c_str()); 129 | if (reader.get() == nullptr) { 130 | std::cerr << "[WARNING]: Image " << images.at(i) << " cannot be read!" << std::endl; 131 | continue; 132 | } 133 | if (reader->size() != inputNetworkSize) { 134 | std::cerr << "[WARNING]: Input sizes mismatch, got " << reader->size() << " bytes, expecting " 135 | << inputNetworkSize << std::endl; 136 | continue; 137 | } 138 | readImages.push_back(reader->getData()); 139 | imageNames.push_back(images.at(i)); 140 | } 141 | 142 | if (readImages.size() == 0) { 143 | THROW_IE_EXCEPTION << "Valid input images were not found!"; 144 | } 145 | 146 | if (batchSize == 1) { 147 | network.getNetwork().setBatchSize(readImages.size()); 148 | } else { 149 | if (batchSize > readImages.size()) { 150 | auto readImagesSize = readImages.size(); 151 | size_t diff = batchSize / readImagesSize; 152 | 153 | for (auto i = 1; i < diff; i++) { 154 | for (auto j = 0; j < readImagesSize; j++) { 155 | imageNames.push_back(imageNames.at(j)); 156 | readImages.push_back(readImages.at(j)); 157 | } 158 | } 159 | if (readImagesSize * diff != batchSize) { 160 | for (auto j = 0; j < batchSize - readImagesSize * diff; j++) { 161 | imageNames.push_back(imageNames.at(j)); 162 | readImages.push_back(readImages.at(j)); 163 | } 164 | } 165 | } else if (batchSize < readImages.size()) { 166 | while (readImages.size() != batchSize) { 167 | auto name = imageNames.at(imageNames.size() - 1); 168 | std::cerr << "[WARNING]: Image " << name << " skipped!" << std::endl; 169 | imageNames.pop_back(); 170 | readImages.pop_back(); 171 | } 172 | } 173 | } 174 | 175 | inputDims = network.getNetwork().getInput()->dims; 176 | InferenceEngine::SizeVector outputDims = network.getNetwork().getOutput()->dims; 177 | 178 | switch (network.getNetwork().getPrecision()) { 179 | case Precision::FP32 : 180 | _input = InferenceEngine::make_shared_blob(inputDims); 181 | break; 182 | case Precision::Q78 : 183 | case Precision::I16 : 184 | _input = InferenceEngine::make_shared_blob(inputDims); 185 | break; 186 | case Precision::U8 : 187 | _input = InferenceEngine::make_shared_blob(inputDims); 188 | break; 189 | default: 190 | THROW_IE_EXCEPTION << "Unsupported network precision: " << network.getNetwork().getPrecision(); 191 | } 192 | _input->allocate(); 193 | 194 | _output = InferenceEngine::make_shared_blob(outputDims); 195 | _output->allocate(); 196 | 197 | std::shared_ptr imagesData; 198 | size_t imagesSize = readImages.size() * inputNetworkSize; 199 | imagesData.reset(new unsigned char[imagesSize], std::default_delete()); 200 | 201 | for (auto i = 0, k = 0; i < readImages.size(); i++) { 202 | for (auto j = 0; j < inputNetworkSize; j++, k++) { 203 | imagesData.get()[k] = readImages.at(i).get()[j]; 204 | } 205 | } 206 | 207 | readImages.clear(); 208 | 209 | InferenceEngine::ConvertImageToInput(imagesData.get(), imagesSize, *_input); 210 | 211 | imageLoaded = true; 212 | } 213 | 214 | void InferenceEngineConfigurator::infer() { 215 | if (!imageLoaded) { 216 | THROW_IE_EXCEPTION << "Scoring failed! Input data is not loaded!"; 217 | } 218 | InferenceEngine::ResponseDesc dsc; 219 | InferenceEngine::StatusCode sts = _plugin->Infer(*_input, *_output, &dsc); 220 | 221 | // Check errors 222 | if (sts == InferenceEngine::GENERAL_ERROR) { 223 | THROW_IE_EXCEPTION << "Scoring failed! Critical error: " << dsc.msg; 224 | } else if (sts == InferenceEngine::NOT_IMPLEMENTED) { 225 | THROW_IE_EXCEPTION << "Scoring failed! Input data is incorrect and not supported!"; 226 | } else if (sts == InferenceEngine::NETWORK_NOT_LOADED) { 227 | THROW_IE_EXCEPTION << "Scoring failed! " << dsc.msg; 228 | } 229 | wasInfered = true; 230 | } 231 | 232 | std::vector InferenceEngineConfigurator::getTopResult(unsigned int topCount) { 233 | if (!wasInfered) { 234 | THROW_IE_EXCEPTION << "Cannot get top results!"; 235 | } 236 | std::vector results; 237 | // Get top N results 238 | InferenceEngine::TopResults(topCount, *_output, results); 239 | 240 | // Save top N results to vector with InferenceEngineConfigurator::InferenceResults objects 241 | std::vector outputResults; 242 | size_t batchSize = _output->dims()[1]; 243 | 244 | topCount = std::min(_output->dims()[0], topCount); 245 | 246 | if (batchSize != imageNames.size()) { 247 | THROW_IE_EXCEPTION << "Batch size is not equal to the number of images!"; 248 | } 249 | for (size_t i = 0; i < batchSize; i++) { 250 | InferenceResults imageResult(imageNames.at(i)); 251 | for (size_t j = 0; j < topCount; j++) { 252 | unsigned result = results[i * topCount + j]; 253 | std::string label = 254 | result < _classes.size() ? _classes[result] : stringFormat("label #%d", result); 255 | imageResult.addResult( 256 | {static_cast(result), _output->data()[result + i * (_output->size() / batchSize)], label}); 257 | } 258 | outputResults.push_back(imageResult); 259 | } 260 | 261 | if (outputResults.size()) { 262 | std::cout << std::endl << "Top " << topCount << " results:" << std::endl << std::endl; 263 | for (size_t i = 0; i < outputResults.size(); i++) { 264 | std::cout << "Image " << outputResults.at(i).getName() << std::endl << std::endl; 265 | const std::vector imageResults = outputResults.at(i).getResults(); 266 | for (size_t j = 0; j < imageResults.size(); j++) { 267 | std::cout << imageResults.at(j).getLabelIndex() << " " << imageResults.at(j).getProbability() << " " 268 | << imageResults.at(j).getLabel() << std::endl; 269 | } 270 | std::cout << std::endl; 271 | } 272 | } 273 | 274 | return outputResults; 275 | } 276 | 277 | void InferenceEngineConfigurator::printGetPerformanceCounts(std::ostream &stream) { 278 | long long totalTime = 0; 279 | std::map perfomanceMap; 280 | // Get perfomance counts 281 | _plugin->GetPerformanceCounts(perfomanceMap, nullptr); 282 | // Print perfomance counts 283 | stream << std::endl << "Perfomance counts:" << std::endl << std::endl; 284 | for (std::map::const_iterator it = perfomanceMap.begin(); 285 | it != perfomanceMap.end(); ++it) { 286 | stream << std::setw(30) << std::left << it->first + ":"; 287 | switch (it->second.status) { 288 | case InferenceEngine::InferenceEngineProileInfo::EXECUTED: 289 | stream << std::setw(15) << std::left << "EXECUTED"; 290 | break; 291 | case InferenceEngine::InferenceEngineProileInfo::NOT_RUN: 292 | stream << std::setw(15) << std::left << "NOT_RUN"; 293 | break; 294 | case InferenceEngine::InferenceEngineProileInfo::OPTIMIZED_OUT: 295 | stream << std::setw(15) << std::left << "OPTIMIZED_OUT"; 296 | break; 297 | } 298 | stream << std::setw(20) << std::left << "realTime: " + std::to_string(it->second.realTime_uSec); 299 | stream << " cpu: " << it->second.cpu_uSec << std::endl; 300 | if (it->second.realTime_uSec > 0) { 301 | totalTime += it->second.realTime_uSec; 302 | } 303 | } 304 | stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl; 305 | } 306 | 307 | /* 308 | * Set the path to plugin 309 | * @param input - plugin name 310 | * @return Plugin path 311 | */ 312 | std::string InferenceEngineConfigurator::make_plugin_name(const std::string &path, const std::string &input) { 313 | std::string separator = "/"; 314 | if (path.empty()) 315 | separator = ""; 316 | return path + separator + "lib" + input + ".so"; 317 | } 318 | 319 | 320 | void InferenceEngineConfigurator::setISLVC2012MeanScalars() { 321 | // TODO: Put mean image from user 322 | network.getNetwork().setMeanScalars({104.00698793f, 116.66876762f, 122.67891434f}); 323 | } 324 | 325 | void InferenceEngineConfigurator::loadModel() { 326 | wasInfered = false; 327 | InferenceEngine::ResponseDesc dsc; 328 | // TODO: this need to be handled in smart wrapper over inference engine plugin 329 | InferenceEngine::StatusCode sts = _plugin->LoadNetwork(network.getNetwork(), &dsc); 330 | if (sts == InferenceEngine::GENERAL_ERROR) { 331 | THROW_IE_EXCEPTION << dsc.msg; 332 | } else if (sts == InferenceEngine::NOT_IMPLEMENTED) { 333 | THROW_IE_EXCEPTION << "Model cannot be loaded! Plugin is not supported this model!"; 334 | } 335 | } 336 | 337 | static std::ostream & operator << (std::ostream & os, const Version *version) { 338 | os << "\tPlugin version ......... "; 339 | if (nullptr == version) { 340 | os << "UNKNOWN"; 341 | } else { 342 | os << version->apiVersion.major << "." << version->apiVersion.minor; 343 | } 344 | 345 | os << "\n\tPlugin name ............ "; 346 | if (nullptr == version || version->description == nullptr) { 347 | std :: cout << "UNKNOWN"; 348 | } else { 349 | os << version->description; 350 | } 351 | 352 | os << "\n\tPlugin build ........... "; 353 | if (nullptr == version || version->buildNumber == nullptr) { 354 | std :: cout << "UNKNOWN"; 355 | } else { 356 | os << version->buildNumber; 357 | } 358 | 359 | return os; 360 | } 361 | 362 | InferenceEnginePluginPtr InferenceEngineConfigurator::selectPlugin(const std::vector &pluginDirs, 363 | const std::string &name) { 364 | std::stringstream errs; 365 | for (auto &pluginPath : pluginDirs) { 366 | try { 367 | InferenceEnginePluginPtr plugin(make_plugin_name(pluginPath, name)); 368 | const Version *version; 369 | plugin->GetVersion(version); 370 | std::cout << version << std::endl; 371 | return plugin; 372 | } 373 | catch (const std::exception &ex) { 374 | errs << "cannot load plugin: " << name << " from " << pluginPath << ": " << ex.what() << ", skipping\n"; 375 | } 376 | } 377 | std::cerr << errs.str(); 378 | THROW_IE_EXCEPTION << "cannot load plugin: " << name; 379 | } 380 | -------------------------------------------------------------------------------- /dlinfer.h: -------------------------------------------------------------------------------- 1 | /* 2 | // Copyright (c) 2016 Intel Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | */ 16 | 17 | /** 18 | * \brief Declaration of methods and classes for working with Inference Engine API 19 | * \file InferenceEngineConfigurator.h 20 | * \example inference_engine_classification_sample/core/InferenceEngineConfigurator.h 21 | */ 22 | #pragma once 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | class LabelProbability { 32 | private: 33 | /// Index of current label 34 | int labelIdx = 0; 35 | /// Name of class from file with labels 36 | std::string className; 37 | /// The probability of prediction 38 | float probability = 0.0f; 39 | 40 | public: 41 | /** 42 | * Constructor of InferenceResults class 43 | * @param labelIdx - index of current label 44 | * @param probability - the probability of prediction 45 | * @param className - name of class from file with labels 46 | * @return InferenceResults object 47 | */ 48 | LabelProbability(int labelIdx, float probability, std::string className) : labelIdx(labelIdx), 49 | className(className), 50 | probability(probability) {} 51 | 52 | /** 53 | * Get label index 54 | * @return index of current label 55 | */ 56 | const int &getLabelIndex() const { 57 | return labelIdx; 58 | } 59 | 60 | /** 61 | * Get label name 62 | * @return label 63 | */ 64 | const std::string &getLabel() const { 65 | return className; 66 | } 67 | 68 | /** 69 | * Get probability 70 | * @return probability 71 | */ 72 | const float &getProbability() const { 73 | return probability; 74 | } 75 | }; 76 | 77 | /** 78 | * \class InferenceResults 79 | * \brief Represents predicted data in easy to use format 80 | */ 81 | class InferenceResults { 82 | private: 83 | std::string image; 84 | std::vector results; 85 | 86 | public: 87 | explicit InferenceResults(std::string &name) { 88 | image = name; 89 | } 90 | 91 | void addResult(LabelProbability result) { 92 | results.push_back(result); 93 | } 94 | 95 | const std::string &getName() const { 96 | return image; 97 | } 98 | 99 | const std::vector &getResults() const { 100 | return results; 101 | } 102 | }; 103 | 104 | /** 105 | * \class InferenceEngineConfigurator 106 | * \brief This class communicates with Inference Engine 107 | */ 108 | class InferenceEngineConfigurator { 109 | public: 110 | /** 111 | * Constructor of InferenceEngineConfigurator class 112 | * @param modelFile - the path to model in .xml format 113 | * @param pluginPath - the path to plugin 114 | * @param pluginName - the name of plugin for prediction 115 | * @param labelFile - the path to custom file with labels (Default is empty) 116 | * @return InferenceEngineConfigurator object 117 | */ 118 | InferenceEngineConfigurator(const std::string &modelFile, const std::vector &pluginPath, 119 | const std::string &pluginName, const std::string &labelFile = ""); 120 | 121 | /** 122 | * This method loads image for prediction to blob 123 | * @param images - the image path for prediction 124 | */ 125 | void loadImages(const std::vector &images); 126 | 127 | /** 128 | * This method loads image for prediction to blob 129 | * @param images - the image path for prediction 130 | */ 131 | void loadImages(const std::string &image); 132 | 133 | /** 134 | * Method needs to call prediction 135 | */ 136 | void infer(); 137 | 138 | std::vector getTopResult(unsigned int topCount); 139 | 140 | /** 141 | * Function prints perfomance counts 142 | * @param stream - output stream 143 | */ 144 | void printGetPerformanceCounts(std::ostream &stream); 145 | 146 | /** 147 | * Externally specify meanimage values 148 | */ 149 | void setISLVC2012MeanScalars(); 150 | 151 | /** 152 | * Method to be called prior to infer 153 | */ 154 | void loadModel(); 155 | 156 | private: 157 | InferenceEngine::CNNNetReader network; 158 | InferenceEngine::Blob::Ptr _input; 159 | InferenceEngine::TBlob::Ptr _output; 160 | InferenceEngine::InferenceEnginePluginPtr _plugin; 161 | std::vector _classes; 162 | bool imageLoaded = false; 163 | bool wasInfered = false; 164 | 165 | std::vector imageNames; 166 | 167 | static std::string make_plugin_name(const std::string &path, const std::string &input); 168 | 169 | bool readLabels(const std::string &fileName); 170 | 171 | InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector &vector, 172 | const std::string &basic_string); 173 | }; 174 | 175 | -------------------------------------------------------------------------------- /dlinfer.swigcxx: -------------------------------------------------------------------------------- 1 | %module dlinfer 2 | %{ 3 | #include "dlinfer.h" 4 | %} 5 | 6 | %include 7 | %include "std_string.i" 8 | %include "std_vector.i" 9 | 10 | // This will create 2 wrapped types in Go called 11 | // "StringVector" and "ByteVector" for their respective 12 | // types. 13 | namespace std { 14 | %template(StringVector) vector; 15 | %template(ByteVector) vector; 16 | } 17 | 18 | %include "dlinfer.h" 19 | -------------------------------------------------------------------------------- /examples/basic_classification/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | "github.com/gopherds/dlinfer" 8 | ) 9 | 10 | const ( 11 | model = "/CaffeNet.xml" 12 | pluginPath = "/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/lib/intel64/" 13 | plugin = "MKLDNNPlugin" 14 | labelsFile = "/CaffeNet.labels" 15 | ) 16 | 17 | func main() { 18 | 19 | // Read in the image file name. 20 | args := os.Args 21 | if len(args) != 2 { 22 | log.Fatal("Please provide an image as input") 23 | } 24 | imageFile := args[1] 25 | 26 | // Create an inference configurator value. 27 | pluginPaths := dlinfer.NewStringVector() 28 | pluginPaths.Add(pluginPath) 29 | configurator := dlinfer.NewInferenceEngineConfigurator(model, pluginPaths, plugin, labelsFile) 30 | 31 | // Load our image. 32 | images := dlinfer.NewStringVector() 33 | images.Add(imageFile) 34 | configurator.LoadImages(images) 35 | 36 | // Load the model. 37 | configurator.LoadModel() 38 | 39 | // Infer the content of the image. 40 | configurator.Infer() 41 | 42 | // Get the top results for our image. 43 | configurator.GetTopResult(5) 44 | } 45 | -------------------------------------------------------------------------------- /model.go: -------------------------------------------------------------------------------- 1 | // All material is licensed under the Apache License Version 2.0, January 2004 2 | // http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | package dlinfer 5 | 6 | import "os" 7 | 8 | // #cgo CXXFLAGS: -std=c++11 -I/usr/include -I/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/include -I/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/samples/format_reader 9 | // #cgo LDFLAGS: -L/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/bin/intel64/lib -L/opt/intel/deep_learning_sdk_2016.1.0.861/deployment_tools/inference_engine/lib/intel64 -ldl -linference_engine -lformat_reader 10 | import "C" 11 | 12 | // Configurator inncludes the necessary pieces of an 13 | // Inferenence Engine Configurator, as used in the Intel 14 | // Deep Learning iSDK. 15 | type Configurator struct { 16 | modelFile string 17 | pluginPath string 18 | pluginName string 19 | labelFile string 20 | } 21 | 22 | // NewConfigurator creates a new configurator for a particular 23 | // trained model. 24 | func NewConfigurator(modelFile, pluginPath, pluginName, labelFile string) (*Configurator, error) { 25 | 26 | // Validate the model file. 27 | if _, err := os.Stat(modelFile); os.IsNotExist(err) { 28 | return nil, err 29 | } 30 | 31 | // Return the configurator. 32 | return &Configurator{ 33 | modelFile: modelFile, 34 | pluginPath: pluginPath, 35 | pluginName: pluginName, 36 | labelFile: labelFile, 37 | }, nil 38 | } 39 | 40 | // LoadImage loads an image as input to an inference. 41 | func LoadImage() { 42 | 43 | } 44 | --------------------------------------------------------------------------------