├── .gitignore ├── CMakeLists.txt ├── README.md ├── example ├── Makefile ├── example.c ├── example2.c └── example3.c ├── get_deps.sh ├── tinydnnc.cc └── tinydnnc.h /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | build/ 3 | deps/ 4 | *.o 5 | *.swp 6 | example/example 7 | example/example? 8 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | set(CMAKE_CXX_STANDARD 11) 3 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 4 | 5 | include_directories(deps/tiny-dnn) 6 | 7 | add_library(tinydnnc SHARED tinydnnc.cc) 8 | 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tinydnnc 2 | 3 | Minimal C bindings for [tiny-dnn](https://github.com/tiny-dnn/tiny-dnn). 4 | 5 | **WARNING**: IN FLUX. 6 | 7 | ## Build 8 | 9 | To build the library: 10 | ``` 11 | sh get_deps.sh 12 | mkdir build 13 | cd build 14 | cmake -DCMAKE_BUILD_TYPE:STRING=Release ../ && make 15 | ``` 16 | 17 | To build the examples (mnist classification and XOR regression): 18 | ``` 19 | cd example 20 | make 21 | ``` 22 | 23 | ## License 24 | 25 | BSD license https://opensource.org/licenses/BSD-3-Clause 26 | 27 | Copyright 2016, Luca Antiga, Orobix Srl (www.orobix.com). 28 | 29 | -------------------------------------------------------------------------------- /example/Makefile: -------------------------------------------------------------------------------- 1 | 2 | FINAL_CFLAGS=$(CFLAGS) 3 | FINAL_LDFLAGS=$(LDFLAGS) 4 | FINAL_LDPATH= 5 | TDNN_CC=gcc 6 | 7 | DEPS_PATH=../deps 8 | 9 | EXAMPLE_OBJ = example.o 10 | EXAMPLE2_OBJ = example2.o 11 | EXAMPLE3_OBJ = example3.o 12 | 13 | INCLUDE_FLAGS=-I ../ -I $(DEPS_PATH)/tiny-dnn -I $(DEPS_PATH)/mnist 14 | 15 | ifeq ($(uname_S),Linux) 16 | FINAL_CFLAGS += -fPIC -std=gnu99 17 | FINAL_LDFLAGS += -shared -Bsymbolic -lc 18 | FINAL_LDPATH += LD_LIBRARY_PATH=../build 19 | else 20 | FINAL_CFLAGS += -dynamic -fno-common -std=c99 21 | # FINAL_LDFLAGS += -bundle -undefined dynamic_lookup -lc 22 | FINAL_LDFLAGS += -undefined dynamic_lookup -lc 23 | FINAL_LDPATH += DYLD_LIBRARY_PATH=../build 24 | endif 25 | 26 | all: example example2 example3 27 | 28 | example.o: example.c 29 | $(TDNN_CC) $(INCLUDE_FLAGS) -c $(FINAL_CFLAGS) example.c 30 | 31 | example: example.o 32 | $(TDNN_CC) -L ../build $(FINAL_LDFLAGS) -ltinydnnc -o example $(EXAMPLE_OBJ) 33 | 34 | example2.o: example2.c 35 | $(TDNN_CC) $(INCLUDE_FLAGS) -c $(FINAL_CFLAGS) example2.c 36 | 37 | example2: example2.o 38 | $(TDNN_CC) -L ../build $(FINAL_LDFLAGS) -ltinydnnc -o example2 $(EXAMPLE2_OBJ) 39 | 40 | example3.o: example3.c 41 | $(TDNN_CC) $(INCLUDE_FLAGS) -c $(FINAL_CFLAGS) example3.c 42 | 43 | example3: example3.o 44 | $(TDNN_CC) -L ../build $(FINAL_LDFLAGS) -ltinydnnc -o example3 $(EXAMPLE3_OBJ) 45 | 46 | clean: 47 | rm example example2 example3 *.o 48 | 49 | -------------------------------------------------------------------------------- /example/example.c: -------------------------------------------------------------------------------- 1 | 2 | #include "stdio.h" 3 | #include "tinydnnc.h" 4 | 5 | #define USE_MNIST_LOADER 6 | #define MNIST_DOUBLE 7 | #include "mnist.h" 8 | 9 | #include 10 | #include 11 | 12 | clock_t tick; 13 | 14 | typedef struct CallbackData { 15 | float *test_images; 16 | long *test_labels; 17 | long nsamples; 18 | long sample_size; 19 | } CallbackData; 20 | 21 | void batchCb(DNN_Network *net, void *data) 22 | { 23 | } 24 | 25 | long epoch = 0; 26 | 27 | void epochCb(DNN_Network *net, void *data) 28 | { 29 | CallbackData *cbdata = (CallbackData *)data; 30 | // Clone just for sake of cloning 31 | DNN_Network *net2 = DNN_NetworkClone(net); 32 | float error = DNN_GetError(net2, 33 | cbdata->test_images, 34 | cbdata->test_labels, 35 | cbdata->nsamples, 36 | cbdata->sample_size); 37 | clock_t diff = clock() - tick; 38 | float elapsed = (float)diff / CLOCKS_PER_SEC; 39 | printf("Epoch: %03ld; Error: %f; Elapsed(s): %f\n",epoch,error,elapsed); 40 | epoch += 1; 41 | tick = clock(); 42 | DNN_NetworkDelete(net2); 43 | } 44 | 45 | int main(int argc, char* argv[]) 46 | { 47 | int imgsize = 28; 48 | int nclasses = 10; 49 | 50 | int npixels = imgsize * imgsize; 51 | 52 | mnist_data *mnist_train_data; 53 | mnist_data *mnist_test_data; 54 | unsigned int nimages; 55 | unsigned int n_test_images; 56 | int ret; 57 | 58 | // MEMO 59 | //typedef struct mnist_data { 60 | // MNIST_DATA_TYPE data[28][28]; /* 28x28 data for the image */ 61 | // unsigned int label; /* label : 0 to 9 */ 62 | //} mnist_data; 63 | 64 | ret = mnist_load("../deps/tiny-dnn/data/train-images.idx3-ubyte", 65 | "../deps/tiny-dnn/data/train-labels.idx1-ubyte", 66 | &mnist_train_data, &nimages); 67 | 68 | if (ret) { 69 | printf("Error reading mnist data: %d\n", ret); 70 | exit(1); 71 | } 72 | 73 | float *train_images = (float *)malloc(nimages*npixels*sizeof(float)); 74 | long *train_labels = (long *)malloc(nimages*sizeof(long)); 75 | 76 | for (int n=0; n 2 | #include 3 | #include 4 | 5 | #include "tinydnnc.h" 6 | 7 | clock_t tick; 8 | 9 | float inputDataset[8] = {0, 0, 0, 1, 1, 0, 1, 1}; 10 | float outputDataset[4] = {0, 1, 1, 0}; 11 | #define NUM_INPUTS 2 12 | #define NUM_OUTPUTS 1 13 | #define SET_SIZE 4 14 | 15 | long epoch = 0; 16 | 17 | void epochCb(DNN_Network *net, void *data) 18 | { 19 | float error = DNN_GetLoss(net, 20 | DNN_LOSS_MSE, 21 | inputDataset, 22 | outputDataset, 23 | SET_SIZE, 24 | NUM_INPUTS, 25 | NUM_OUTPUTS); 26 | clock_t diff = clock() - tick; 27 | float elapsed = (float)diff / CLOCKS_PER_SEC; 28 | if (!(epoch % 100)) 29 | printf("Epoch: %03ld; Error: %f; Elapsed(s): %f\n",epoch,error,elapsed); 30 | epoch += 1; 31 | tick = clock(); 32 | } 33 | 34 | void useNetwork(DNN_Network *net) { 35 | float inputs[2]; 36 | float outputs[1]; 37 | for (int j = 0; j < SET_SIZE; j++) { 38 | memcpy(inputs,inputDataset+j*2,sizeof(float)*2); 39 | DNN_Predict(net,inputs,outputs,NUM_INPUTS,NUM_OUTPUTS); 40 | printf("%f XOR %f -> %f\n", inputs[0], inputs[1], outputs[0]); 41 | } 42 | } 43 | 44 | int main(int argc, char* argv[]) 45 | { 46 | DNN_Network *net = DNN_SequentialNetwork(); 47 | 48 | DNN_Layer *fc = DNN_FullyConnectedLayer(DNN_ACTIVATION_SIGMOID, 49 | NUM_INPUTS, 50 | NUM_INPUTS,1, 51 | DNN_BACKEND_TINYDNN); 52 | DNN_Layer *fc2 = DNN_FullyConnectedLayer(DNN_ACTIVATION_SIGMOID, 53 | NUM_INPUTS, 54 | NUM_OUTPUTS,1, 55 | DNN_BACKEND_TINYDNN); 56 | 57 | DNN_SequentialAdd(net,fc); 58 | DNN_SequentialAdd(net,fc2); 59 | 60 | DNN_Optimizer *optimizer = DNN_AdamOptimizer(0.001,0.9,0.999,0.9,0.999); 61 | //DNN_Optimizer *optimizer = DNN_SGDOptimizer(0.01,0.0); 62 | 63 | tick = clock(); 64 | epoch = 0; 65 | 66 | DNN_Fit(net, optimizer, DNN_LOSS_MSE, 67 | inputDataset, outputDataset, 68 | SET_SIZE, NUM_INPUTS, NUM_OUTPUTS, 2 /* minibatch */, 10000, 69 | NULL, epochCb, NULL, 70 | 0, 2, NULL); 71 | 72 | useNetwork(net); 73 | 74 | DNN_NetworkDelete(net); 75 | DNN_LayerDelete(fc); 76 | DNN_LayerDelete(fc2); 77 | DNN_OptimizerDelete(optimizer); 78 | printf("DONE\n"); 79 | return 0; 80 | } 81 | 82 | -------------------------------------------------------------------------------- /example/example3.c: -------------------------------------------------------------------------------- 1 | 2 | #include "stdio.h" 3 | #include "tinydnnc.h" 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | clock_t tick; 11 | long epoch = 0; 12 | 13 | void epochCb(DNN_Network *net, void *data) 14 | { 15 | clock_t diff = clock() - tick; 16 | float elapsed = (float)diff / CLOCKS_PER_SEC; 17 | printf("Epoch: %03ld; Elapsed(s): %f\n", epoch, elapsed); 18 | epoch += 1; 19 | tick = clock(); 20 | } 21 | 22 | int main(int argc, char* argv[]) 23 | { 24 | int inputdim = 2; 25 | int nclasses = 3; 26 | 27 | int ntrain = 1000; 28 | int ntest = 100; 29 | 30 | int ret; 31 | 32 | float *train_data = (float *)malloc(inputdim*ntrain*sizeof(float)); 33 | long *train_labels = (long *)malloc(ntrain*sizeof(long)); 34 | 35 | float k = 0.0; 36 | long errors = 0; 37 | 38 | const char *charset = ".0/ "; 39 | long c; 40 | float x, y; 41 | char cmap[80][80]; 42 | for (int y=0; y<80; y++) { 43 | for (int x=0; x<80; x++) { 44 | cmap[x][y] = 3; 45 | } 46 | } 47 | for (int n=0; n 10 | // Extension to graph is planned 11 | 12 | #define DNN_NewLayerMacro(LAYER,LAYERNAME,LAYERCLASS,...) \ 13 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 14 | LAYER->type = LAYERNAME; \ 15 | LAYER->acttype = DNN_ACTIVATION_NONE; \ 16 | LAYER->addfn = DNN_Add_##LAYERNAME; \ 17 | LAYER->deletefn = DNN_Delete_##LAYERNAME; 18 | 19 | #define DNN_NewFFLayerMacro(LAYER,LAYERNAME,LAYERCLASS,ACTTYPE,...) \ 20 | switch (ACTTYPE) { \ 21 | case DNN_ACTIVATION_NONE: \ 22 | assert(0); \ 23 | break; \ 24 | case DNN_ACTIVATION_IDENTITY: \ 25 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 26 | break; \ 27 | case DNN_ACTIVATION_SIGMOID: \ 28 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 29 | break; \ 30 | case DNN_ACTIVATION_RELU: \ 31 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 32 | break; \ 33 | case DNN_ACTIVATION_LEAKYRELU: \ 34 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 35 | break; \ 36 | case DNN_ACTIVATION_ELU: \ 37 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 38 | break; \ 39 | case DNN_ACTIVATION_SOFTMAX: \ 40 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 41 | break; \ 42 | case DNN_ACTIVATION_TANH: \ 43 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 44 | break; \ 45 | case DNN_ACTIVATION_TANHP1M2: \ 46 | LAYER->ptr = new LAYERCLASS(__VA_ARGS__); \ 47 | break; \ 48 | } \ 49 | LAYER->type = LAYERNAME; \ 50 | LAYER->acttype = acttype; \ 51 | LAYER->addfn = DNN_Add_##LAYERNAME; \ 52 | LAYER->deletefn = DNN_Delete_##LAYERNAME; 53 | 54 | #define DNN_AddLayerMacro(NETWORK,LAYER,LAYERCLASS) \ 55 | *((network *)NETWORK->ptr) << *((LAYERCLASS *)LAYER->ptr); 56 | 57 | #define DNN_AddFFLayerMacro(NETWORK,LAYER,LAYERCLASS) \ 58 | switch (LAYER->acttype) { \ 59 | case DNN_ACTIVATION_NONE: \ 60 | assert(0); \ 61 | break; \ 62 | case DNN_ACTIVATION_IDENTITY: \ 63 | *((network *)NETWORK->ptr) << \ 64 | *((LAYERCLASS *)LAYER->ptr); \ 65 | break; \ 66 | case DNN_ACTIVATION_SIGMOID: \ 67 | *((network *)NETWORK->ptr) << \ 68 | *((LAYERCLASS *)LAYER->ptr); \ 69 | break; \ 70 | case DNN_ACTIVATION_RELU: \ 71 | *((network *)NETWORK->ptr) << \ 72 | *((LAYERCLASS *)LAYER->ptr); \ 73 | break; \ 74 | case DNN_ACTIVATION_LEAKYRELU: \ 75 | *((network *)NETWORK->ptr) << \ 76 | *((LAYERCLASS *)LAYER->ptr); \ 77 | break; \ 78 | case DNN_ACTIVATION_ELU: \ 79 | *((network *)NETWORK->ptr) << \ 80 | *((LAYERCLASS *)LAYER->ptr); \ 81 | break; \ 82 | case DNN_ACTIVATION_SOFTMAX: \ 83 | *((network *)NETWORK->ptr) << \ 84 | *((LAYERCLASS *)LAYER->ptr); \ 85 | break; \ 86 | case DNN_ACTIVATION_TANH: \ 87 | *((network *)NETWORK->ptr) << \ 88 | *((LAYERCLASS *)LAYER->ptr); \ 89 | break; \ 90 | case DNN_ACTIVATION_TANHP1M2: \ 91 | *((network *)NETWORK->ptr) << \ 92 | *((LAYERCLASS *)LAYER->ptr); \ 93 | break; \ 94 | } 95 | 96 | #define DNN_DeleteLayerMacro(LAYER,LAYERCLASS) \ 97 | delete (LAYERCLASS *)LAYER->ptr; 98 | 99 | #define DNN_DeleteFFLayerMacro(LAYER,LAYERCLASS) \ 100 | switch (LAYER->acttype) { \ 101 | case DNN_ACTIVATION_NONE: \ 102 | assert(0); \ 103 | break; \ 104 | case DNN_ACTIVATION_IDENTITY: \ 105 | delete (LAYERCLASS *)LAYER->ptr; \ 106 | break; \ 107 | case DNN_ACTIVATION_SIGMOID: \ 108 | delete (LAYERCLASS *)LAYER->ptr; \ 109 | break; \ 110 | case DNN_ACTIVATION_RELU: \ 111 | delete (LAYERCLASS *)LAYER->ptr; \ 112 | break; \ 113 | case DNN_ACTIVATION_LEAKYRELU: \ 114 | delete (LAYERCLASS *)LAYER->ptr; \ 115 | break; \ 116 | case DNN_ACTIVATION_ELU: \ 117 | delete (LAYERCLASS *)LAYER->ptr; \ 118 | break; \ 119 | case DNN_ACTIVATION_SOFTMAX: \ 120 | delete (LAYERCLASS *)LAYER->ptr; \ 121 | break; \ 122 | case DNN_ACTIVATION_TANH: \ 123 | delete (LAYERCLASS *)LAYER->ptr; \ 124 | break; \ 125 | case DNN_ACTIVATION_TANHP1M2: \ 126 | delete (LAYERCLASS *)LAYER->ptr; \ 127 | break; \ 128 | } 129 | 130 | #define DNN_DefineLayerMacro(LAYERNAME,LAYERCLASS) \ 131 | void DNN_Add_##LAYERNAME(DNN_Network *net, DNN_Layer *layer) \ 132 | { \ 133 | DNN_AddLayerMacro(net,layer,LAYERCLASS); \ 134 | } \ 135 | void DNN_Delete_##LAYERNAME(DNN_Layer *layer) \ 136 | { \ 137 | DNN_DeleteLayerMacro(layer,LAYERCLASS); \ 138 | } 139 | 140 | #define DNN_DefineFFLayerMacro(LAYERNAME,LAYERCLASS) \ 141 | void DNN_Add_##LAYERNAME(DNN_Network *net, DNN_Layer *layer) \ 142 | { \ 143 | DNN_AddFFLayerMacro(net,layer,LAYERCLASS); \ 144 | } \ 145 | void DNN_Delete_##LAYERNAME(DNN_Layer *layer) \ 146 | { \ 147 | DNN_DeleteFFLayerMacro(layer,LAYERCLASS); \ 148 | } 149 | 150 | #define DNN_DefineOptimizerMacro(OPTNAME,OPTCLASS) \ 151 | void DNN_Delete_##OPTNAME(DNN_Optimizer *opt) \ 152 | { \ 153 | delete (OPTCLASS *)opt->ptr; \ 154 | } 155 | 156 | #define DNN_SetupOptimizerMacro(OPT,OPTNAME,OPTPTR) \ 157 | OPT->type = OPTNAME; \ 158 | OPT->ptr = OPTPTR; \ 159 | OPT->deletefn = DNN_Delete_##OPTNAME; 160 | 161 | #define DNN_SetupOptimizerMacro(OPT,OPTNAME,OPTPTR) \ 162 | OPT->type = OPTNAME; \ 163 | OPT->ptr = OPTPTR; \ 164 | OPT->deletefn = DNN_Delete_##OPTNAME; 165 | 166 | #define DNN_SetBackendMacro(BACKEND,BACKENDTYPE) \ 167 | switch (BACKENDTYPE) { \ 168 | case DNN_BACKEND_TINYDNN: \ 169 | BACKEND = backend_t::tiny_dnn; \ 170 | break; \ 171 | case DNN_BACKEND_NNPACK: \ 172 | BACKEND = backend_t::nnpack; \ 173 | break; \ 174 | case DNN_BACKEND_LIBDNN: \ 175 | BACKEND = backend_t::libdnn; \ 176 | break; \ 177 | case DNN_BACKEND_AVX: \ 178 | BACKEND = backend_t::avx; \ 179 | break; \ 180 | case DNN_BACKEND_OPENCL: \ 181 | BACKEND = backend_t::opencl; \ 182 | break; \ 183 | } 184 | 185 | void DNN_LayerDelete(DNN_Layer *layer) 186 | { 187 | (*layer->deletefn)(layer); 188 | delete layer; 189 | } 190 | 191 | DNN_Network *DNN_SequentialNetwork() 192 | { 193 | DNN_Network *net = new DNN_Network; 194 | net->type = DNN_NETWORK_SEQUENTIAL; 195 | net->ptr = new network; 196 | return net; 197 | } 198 | 199 | void DNN_SequentialAdd(DNN_Network *net, DNN_Layer *layer) 200 | { 201 | layer->addfn(net,layer); 202 | } 203 | 204 | DNN_Network *DNN_NetworkClone(DNN_Network *fromnet) 205 | { 206 | DNN_Network *net = new DNN_Network; 207 | net->type = DNN_NETWORK_SEQUENTIAL; 208 | net->ptr = new network; 209 | *(network *)net->ptr = *(network *)fromnet->ptr; 210 | return net; 211 | } 212 | 213 | void DNN_NetworkDelete(DNN_Network *net) 214 | { 215 | delete (network *)net->ptr; 216 | delete net; 217 | } 218 | 219 | void DNN_OptimizerDelete(DNN_Optimizer *opt) 220 | { 221 | opt->deletefn(opt); 222 | delete opt; 223 | } 224 | 225 | ////////////////////////////////////////////// 226 | // Layers 227 | ////////////////////////////////////////////// 228 | 229 | DNN_DefineLayerMacro(DNN_LAYER_EADD,elementwise_add_layer); 230 | DNN_Layer *DNN_ElwiseAddLayer(long num_args, 231 | long dim) 232 | { 233 | DNN_Layer *layer = new DNN_Layer; 234 | 235 | DNN_NewLayerMacro(layer, DNN_LAYER_EADD, elementwise_add_layer, 236 | num_args, dim); 237 | 238 | return layer; 239 | } 240 | 241 | DNN_DefineFFLayerMacro(DNN_LAYER_AVGPOOL,average_pooling_layer); 242 | DNN_Layer *DNN_AveragePoolLayer(enum DNN_ActivationType acttype, 243 | long in_width, long in_height, 244 | long in_channels, long pool_size, 245 | long stride) 246 | { 247 | DNN_Layer *layer = new DNN_Layer; 248 | 249 | DNN_NewFFLayerMacro(layer, DNN_LAYER_AVGPOOL, average_pooling_layer, 250 | acttype, 251 | in_width, in_height, in_channels, pool_size, stride); 252 | 253 | return layer; 254 | } 255 | 256 | DNN_DefineFFLayerMacro(DNN_LAYER_AVGUNPOOL,average_unpooling_layer); 257 | DNN_Layer *DNN_AverageUnpoolLayer(enum DNN_ActivationType acttype, 258 | long in_width, long in_height, 259 | long in_channels, long pool_size, 260 | long stride) 261 | { 262 | DNN_Layer *layer = new DNN_Layer; 263 | 264 | DNN_NewFFLayerMacro(layer, DNN_LAYER_AVGUNPOOL, average_unpooling_layer, 265 | acttype, 266 | in_width, in_height, in_channels, pool_size, stride); 267 | 268 | return layer; 269 | } 270 | 271 | DNN_DefineLayerMacro(DNN_LAYER_BATCHNORM,batch_normalization_layer); 272 | DNN_Layer *DNN_BatchNormalizationLayer(long in_spatial_size, 273 | long in_channels, 274 | float epsilon, // 1e-5 275 | float momentum, // 0.999 276 | enum DNN_Phase phase) // DNN_PHASE_TRAIN 277 | { 278 | DNN_Layer *layer = new DNN_Layer; 279 | 280 | net_phase nphase; 281 | switch (phase) { 282 | case DNN_PHASE_TRAIN: 283 | nphase = net_phase::train; 284 | break; 285 | case DNN_PHASE_TEST: 286 | nphase = net_phase::test; 287 | break; 288 | } 289 | 290 | DNN_NewLayerMacro(layer, DNN_LAYER_BATCHNORM, batch_normalization_layer, 291 | in_spatial_size, in_channels, epsilon, momentum, 292 | nphase); 293 | 294 | return layer; 295 | } 296 | 297 | DNN_DefineLayerMacro(DNN_LAYER_CONCAT,concat_layer); 298 | DNN_Layer *DNN_ConcatLayer(long num_args, 299 | long ndim) 300 | { 301 | DNN_Layer *layer = new DNN_Layer; 302 | 303 | DNN_NewLayerMacro(layer, DNN_LAYER_CONCAT, concat_layer, 304 | num_args, ndim); 305 | 306 | return layer; 307 | } 308 | 309 | DNN_DefineFFLayerMacro(DNN_LAYER_CONV,convolutional_layer); 310 | DNN_Layer *DNN_ConvolutionalLayer(enum DNN_ActivationType acttype, 311 | long in_width, long in_height, 312 | long window_width, long window_height, 313 | long in_channels, long out_channels, 314 | enum DNN_Padding padtype, 315 | long has_bias, 316 | long w_stride, long h_stride, 317 | enum DNN_BackendType backend_type) 318 | { 319 | enum padding pad; 320 | switch (padtype) { 321 | case DNN_PADDING_SAME: 322 | pad = padding::same; 323 | break; 324 | case DNN_PADDING_VALID: 325 | pad = padding::valid; 326 | break; 327 | } 328 | 329 | enum backend_t backend; 330 | DNN_SetBackendMacro(backend,backend_type); 331 | 332 | DNN_Layer *layer = new DNN_Layer; 333 | 334 | DNN_NewFFLayerMacro(layer, DNN_LAYER_CONV, convolutional_layer, 335 | acttype, 336 | in_width, in_height, 337 | window_width, window_height, 338 | in_channels, out_channels, 339 | pad, has_bias, 340 | w_stride, h_stride, 341 | backend); 342 | 343 | return layer; 344 | } 345 | 346 | DNN_DefineFFLayerMacro(DNN_LAYER_DECONV,deconvolutional_layer); 347 | DNN_Layer *DNN_DeconvolutionalLayer(enum DNN_ActivationType acttype, 348 | long in_width, long in_height, 349 | long window_width, long window_height, 350 | long in_channels, long out_channels, 351 | enum DNN_Padding padtype, 352 | long has_bias, 353 | long w_stride, long h_stride, 354 | enum DNN_BackendType backend_type) 355 | { 356 | enum padding pad; 357 | switch (padtype) { 358 | case DNN_PADDING_SAME: 359 | pad = padding::same; 360 | break; 361 | case DNN_PADDING_VALID: 362 | pad = padding::valid; 363 | break; 364 | } 365 | 366 | enum backend_t backend; 367 | DNN_SetBackendMacro(backend,backend_type); 368 | 369 | DNN_Layer *layer = new DNN_Layer; 370 | 371 | DNN_NewFFLayerMacro(layer, DNN_LAYER_DECONV, deconvolutional_layer, 372 | acttype, 373 | in_width, in_height, 374 | window_width, window_height, 375 | in_channels, out_channels, 376 | pad, has_bias, 377 | w_stride, h_stride, 378 | backend); 379 | 380 | return layer; 381 | } 382 | 383 | DNN_DefineLayerMacro(DNN_LAYER_DROPOUT,dropout_layer); 384 | DNN_Layer *DNN_ConcatLayer(long in_dim, 385 | float dropout_rate, 386 | enum DNN_Phase phase) 387 | { 388 | DNN_Layer *layer = new DNN_Layer; 389 | 390 | net_phase nphase; 391 | switch (phase) { 392 | case DNN_PHASE_TRAIN: 393 | nphase = net_phase::train; 394 | break; 395 | case DNN_PHASE_TEST: 396 | nphase = net_phase::test; 397 | break; 398 | } 399 | 400 | DNN_NewLayerMacro(layer, DNN_LAYER_DROPOUT, dropout_layer, 401 | in_dim, dropout_rate, nphase); 402 | 403 | return layer; 404 | } 405 | 406 | DNN_DefineFFLayerMacro(DNN_LAYER_FC,fully_connected_layer); 407 | DNN_Layer *DNN_FullyConnectedLayer(enum DNN_ActivationType acttype, 408 | long in_dim, long out_dim, 409 | long has_bias, 410 | enum DNN_BackendType backend_type) 411 | { 412 | enum backend_t backend; 413 | DNN_SetBackendMacro(backend,backend_type); 414 | 415 | DNN_Layer *layer = new DNN_Layer; 416 | 417 | DNN_NewFFLayerMacro(layer, DNN_LAYER_FC, fully_connected_layer, 418 | acttype, 419 | in_dim, out_dim, has_bias, 420 | backend); 421 | 422 | return layer; 423 | } 424 | 425 | DNN_DefineLayerMacro(DNN_LAYER_INPUT,input_layer); 426 | DNN_Layer *DNN_InputLayer(long dim0, long dim1, long dim2) 427 | { 428 | DNN_Layer *layer = new DNN_Layer; 429 | 430 | DNN_NewLayerMacro(layer, DNN_LAYER_INPUT, input_layer, 431 | shape3d(dim0,dim1,dim2)); 432 | 433 | return layer; 434 | } 435 | 436 | DNN_DefineFFLayerMacro(DNN_LAYER_LINEAR,linear_layer); 437 | DNN_Layer *DNN_LinearLayer(enum DNN_ActivationType acttype, 438 | long dim, float scale, float bias) 439 | { 440 | DNN_Layer *layer = new DNN_Layer; 441 | 442 | DNN_NewFFLayerMacro(layer, DNN_LAYER_LINEAR, linear_layer, 443 | acttype, 444 | dim, scale, bias); 445 | 446 | return layer; 447 | } 448 | 449 | DNN_DefineFFLayerMacro(DNN_LAYER_LRN,lrn_layer); 450 | DNN_Layer *DNN_LRNLayer(enum DNN_ActivationType acttype, 451 | long in_width, long in_height, 452 | long local_size, long in_channels, 453 | float alpha, float beta, 454 | enum DNN_Region region) 455 | { 456 | DNN_Layer *layer = new DNN_Layer; 457 | 458 | norm_region nregion; 459 | switch (region) { 460 | case DNN_REGION_ACROSS: 461 | nregion = norm_region::across_channels; 462 | break; 463 | case DNN_REGION_WITHIN: 464 | nregion = norm_region::within_channels; 465 | break; 466 | } 467 | 468 | DNN_NewFFLayerMacro(layer, DNN_LAYER_LRN, lrn_layer, 469 | acttype, 470 | in_width, in_height, 471 | local_size, in_channels, 472 | alpha, beta, nregion); 473 | 474 | return layer; 475 | } 476 | 477 | DNN_DefineFFLayerMacro(DNN_LAYER_MAXPOOL,max_pooling_layer); 478 | DNN_Layer *DNN_MaxPoolLayer(enum DNN_ActivationType acttype, 479 | long in_width, long in_height, 480 | long in_channels, long pool_size, 481 | long stride, 482 | enum DNN_BackendType backend_type) 483 | { 484 | enum backend_t backend; 485 | DNN_SetBackendMacro(backend,backend_type); 486 | 487 | DNN_Layer *layer = new DNN_Layer; 488 | 489 | DNN_NewFFLayerMacro(layer, DNN_LAYER_MAXPOOL, max_pooling_layer, 490 | acttype, 491 | in_width, in_height, in_channels, 492 | pool_size, stride, 493 | backend); 494 | 495 | return layer; 496 | } 497 | 498 | // FIXME COMPILER ERROR 499 | //DNN_DefineFFLayerMacro(DNN_LAYER_MAXUNPOOL,max_unpooling_layer); 500 | //DNN_Layer *DNN_MaxUnpoolLayer(enum DNN_ActivationType acttype, 501 | // long in_width, long in_height, 502 | // long in_channels, long pool_size, 503 | // long stride) 504 | //{ 505 | // DNN_Layer *layer = new DNN_Layer; 506 | // 507 | // DNN_NewFFLayerMacro(layer, DNN_LAYER_MAXUNPOOL, max_unpooling_layer, 508 | // acttype, 509 | // in_width, in_height, in_channels, 510 | // pool_size, stride); 511 | // 512 | // return layer; 513 | //} 514 | 515 | // TODO partial_connected_layer (requires connecting manually) 516 | 517 | DNN_DefineLayerMacro(DNN_LAYER_POWER,power_layer); 518 | DNN_Layer *DNN_PowerLayer(long dim0, long dim1, long dim2, float factor) 519 | { 520 | DNN_Layer *layer = new DNN_Layer; 521 | 522 | DNN_NewLayerMacro(layer, DNN_LAYER_POWER, power_layer, 523 | shape3d(dim0,dim1,dim2), factor); 524 | 525 | return layer; 526 | } 527 | 528 | DNN_DefineFFLayerMacro(DNN_LAYER_QCONV,quantized_convolutional_layer); 529 | DNN_Layer *DNN_QuantizedConvolutionalLayer(enum DNN_ActivationType acttype, 530 | long in_width, long in_height, 531 | long window_width, long window_height, 532 | long in_channels, long out_channels, 533 | enum DNN_Padding padtype, 534 | long has_bias, 535 | long w_stride, long h_stride, 536 | enum DNN_BackendType backend_type) 537 | { 538 | enum padding pad; 539 | switch (padtype) { 540 | case DNN_PADDING_SAME: 541 | pad = padding::same; 542 | break; 543 | case DNN_PADDING_VALID: 544 | pad = padding::valid; 545 | break; 546 | } 547 | 548 | enum backend_t backend; 549 | DNN_SetBackendMacro(backend,backend_type); 550 | 551 | DNN_Layer *layer = new DNN_Layer; 552 | 553 | DNN_NewFFLayerMacro(layer, DNN_LAYER_QCONV, quantized_convolutional_layer, 554 | acttype, 555 | in_width, in_height, 556 | window_width, window_height, 557 | in_channels, out_channels, 558 | pad, has_bias, 559 | w_stride, h_stride, 560 | backend); 561 | 562 | return layer; 563 | } 564 | 565 | DNN_DefineFFLayerMacro(DNN_LAYER_QDECONV,quantized_deconvolutional_layer); 566 | DNN_Layer *DNN_QuantizedDeconvolutionalLayer(enum DNN_ActivationType acttype, 567 | long in_width, long in_height, 568 | long window_width, long window_height, 569 | long in_channels, long out_channels, 570 | enum DNN_Padding padtype, 571 | long has_bias, 572 | long w_stride, long h_stride, 573 | enum DNN_BackendType backend_type) 574 | { 575 | enum padding pad; 576 | switch (padtype) { 577 | case DNN_PADDING_SAME: 578 | pad = padding::same; 579 | break; 580 | case DNN_PADDING_VALID: 581 | pad = padding::valid; 582 | break; 583 | } 584 | 585 | enum backend_t backend; 586 | DNN_SetBackendMacro(backend,backend_type); 587 | 588 | DNN_Layer *layer = new DNN_Layer; 589 | 590 | DNN_NewFFLayerMacro(layer, DNN_LAYER_QDECONV, quantized_deconvolutional_layer, 591 | acttype, 592 | in_width, in_height, 593 | window_width, window_height, 594 | in_channels, out_channels, 595 | pad, has_bias, 596 | w_stride, h_stride, 597 | backend); 598 | 599 | return layer; 600 | } 601 | 602 | DNN_DefineFFLayerMacro(DNN_LAYER_QFC,quantized_fully_connected_layer); 603 | DNN_Layer *DNN_QuantizedFullyConnectedLayer(enum DNN_ActivationType acttype, 604 | long in_dim, long out_dim, 605 | long has_bias, 606 | enum DNN_BackendType backend_type) 607 | { 608 | enum backend_t backend; 609 | DNN_SetBackendMacro(backend,backend_type); 610 | 611 | DNN_Layer *layer = new DNN_Layer; 612 | 613 | DNN_NewFFLayerMacro(layer, DNN_LAYER_QFC, quantized_fully_connected_layer, 614 | acttype, 615 | in_dim, out_dim, has_bias, 616 | backend); 617 | 618 | return layer; 619 | } 620 | 621 | DNN_DefineLayerMacro(DNN_LAYER_SLICE,slice_layer); 622 | DNN_Layer *DNN_SliceLayer(long dim0, long dim1, long dim2, 623 | enum DNN_Slice slice_type, 624 | long num_outputs) 625 | { 626 | DNN_Layer *layer = new DNN_Layer; 627 | 628 | enum slice_type stype; 629 | switch (slice_type) { 630 | case DNN_SLICE_SAMPLES: 631 | stype = slice_type::slice_samples; 632 | break; 633 | case DNN_SLICE_CHANNELS: 634 | stype = slice_type::slice_channels; 635 | break; 636 | } 637 | 638 | DNN_NewLayerMacro(layer, DNN_LAYER_SLICE, slice_layer, 639 | shape3d(dim0,dim1,dim2), stype, 640 | num_outputs); 641 | 642 | return layer; 643 | } 644 | 645 | 646 | 647 | ////////////////////////////////////////////// 648 | // Optimizers 649 | ////////////////////////////////////////////// 650 | 651 | DNN_DefineOptimizerMacro(DNN_OPTIMIZER_ADAGRAD,adagrad); 652 | DNN_Optimizer *DNN_AdaGradOptimizer(float alpha) // 0.01 653 | { 654 | adagrad *optptr = new adagrad; 655 | optptr->alpha = alpha; 656 | 657 | DNN_Optimizer *opt = new DNN_Optimizer; 658 | DNN_SetupOptimizerMacro(opt,DNN_OPTIMIZER_ADAGRAD,optptr); 659 | 660 | return opt; 661 | } 662 | 663 | DNN_DefineOptimizerMacro(DNN_OPTIMIZER_RMSPROP,RMSprop); 664 | DNN_Optimizer *DNN_RMSPropOptimizer(float alpha, // 0.0001 665 | float mu) // 0.99 666 | { 667 | RMSprop *optptr = new RMSprop; 668 | optptr->alpha = alpha; 669 | optptr->mu = mu; 670 | 671 | DNN_Optimizer *opt = new DNN_Optimizer; 672 | DNN_SetupOptimizerMacro(opt,DNN_OPTIMIZER_RMSPROP,optptr); 673 | 674 | return opt; 675 | } 676 | 677 | DNN_DefineOptimizerMacro(DNN_OPTIMIZER_ADAM,adam); 678 | DNN_Optimizer *DNN_AdamOptimizer(float alpha, // 0.001 679 | float b1, // 0.9 680 | float b2, // 0.999 681 | float b1_t, // 0.9 682 | float b2_t) // 0.999 683 | { 684 | adam *optptr = new adam; 685 | optptr->alpha = alpha; 686 | optptr->b1 = b1; 687 | optptr->b2 = b2; 688 | optptr->b1_t = b1_t; 689 | optptr->b2_t = b2_t; 690 | 691 | DNN_Optimizer *opt = new DNN_Optimizer; 692 | DNN_SetupOptimizerMacro(opt,DNN_OPTIMIZER_ADAM,optptr); 693 | 694 | return opt; 695 | } 696 | 697 | DNN_DefineOptimizerMacro(DNN_OPTIMIZER_SGD,gradient_descent); 698 | DNN_Optimizer *DNN_SGDOptimizer(float alpha, // 0.01 699 | float lambda) // 0.0 700 | { 701 | gradient_descent *optptr = new gradient_descent; 702 | optptr->alpha = alpha; 703 | optptr->lambda = lambda; 704 | 705 | DNN_Optimizer *opt = new DNN_Optimizer; 706 | DNN_SetupOptimizerMacro(opt,DNN_OPTIMIZER_SGD,optptr); 707 | 708 | return opt; 709 | } 710 | 711 | DNN_DefineOptimizerMacro(DNN_OPTIMIZER_MOMENTUM,momentum); 712 | DNN_Optimizer *DNN_MomentumOptimizer(float alpha, // 0.01 713 | float lambda, // 0.0 714 | float mu) // 0.9 715 | { 716 | momentum *optptr = new momentum; 717 | optptr->alpha = alpha; 718 | optptr->lambda = lambda; 719 | optptr->mu = mu; 720 | 721 | DNN_Optimizer *opt = new DNN_Optimizer; 722 | DNN_SetupOptimizerMacro(opt,DNN_OPTIMIZER_MOMENTUM,optptr); 723 | 724 | return opt; 725 | } 726 | 727 | 728 | ////////////////////////////////////////////// 729 | // Training 730 | ////////////////////////////////////////////// 731 | 732 | #define DNN_TrainCallMacro(NET,METHOD,LOSSCLASS,OPT,OPTCLASS,...) \ 733 | (*(network *)NET->ptr).METHOD(*(OPTCLASS *)OPT->ptr, __VA_ARGS__); 734 | 735 | #define DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,OPTCLASS,...) \ 736 | switch (LOSSTYPE) { \ 737 | case DNN_LOSS_MSE: \ 738 | DNN_TrainCallMacro(NET,METHOD,mse,OPT,OPTCLASS,__VA_ARGS__); \ 739 | break; \ 740 | case DNN_LOSS_ABSOLUTE: \ 741 | DNN_TrainCallMacro(NET,METHOD,absolute,OPT,OPTCLASS,__VA_ARGS__); \ 742 | break; \ 743 | case DNN_LOSS_CROSSENTROPY: \ 744 | DNN_TrainCallMacro(NET,METHOD,cross_entropy,OPT,OPTCLASS,__VA_ARGS__); \ 745 | break; \ 746 | case DNN_LOSS_CROSSENTROPY_MULTICLASS: \ 747 | DNN_TrainCallMacro(NET,METHOD,cross_entropy_multiclass,OPT,OPTCLASS,__VA_ARGS__); \ 748 | break; \ 749 | } 750 | 751 | #define DNN_DispatchTrain(NET,METHOD,LOSSTYPE,OPT,...) \ 752 | switch(OPT->type) { \ 753 | case DNN_OPTIMIZER_ADAGRAD: \ 754 | DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,adagrad,__VA_ARGS__); \ 755 | break; \ 756 | case DNN_OPTIMIZER_RMSPROP: \ 757 | DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,RMSprop,__VA_ARGS__); \ 758 | break; \ 759 | case DNN_OPTIMIZER_ADAM: \ 760 | DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,adam,__VA_ARGS__); \ 761 | break; \ 762 | case DNN_OPTIMIZER_SGD: \ 763 | DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,gradient_descent,__VA_ARGS__); \ 764 | break; \ 765 | case DNN_OPTIMIZER_MOMENTUM: \ 766 | DNN_DispatchTrainOnLoss(NET,METHOD,LOSSTYPE,OPT,momentum,__VA_ARGS__); \ 767 | break; \ 768 | } 769 | 770 | void DNN_Train(DNN_Network *net, 771 | DNN_Optimizer *opt, 772 | enum DNN_LossType losstype, 773 | float *inputs, 774 | long *outputs, 775 | long n_samples, 776 | long sample_size, 777 | long batch_size, 778 | long epochs, 779 | void (*batch_cb)(DNN_Network *net, void *data), // on_batch_callback 780 | void (*epoch_cb)(DNN_Network *net, void *data), // on_epoch_callback 781 | void *cb_data, 782 | long reset_weights, // false 783 | long n_threads, 784 | float *cost) // relative target costs, can be NULL 785 | { 786 | 787 | std::vector dnn_inputs(n_samples); 788 | std::vector dnn_outputs(n_samples); 789 | std::vector dnn_cost; 790 | 791 | if (cost) { 792 | dnn_cost.resize(n_samples); 793 | } 794 | 795 | for (int n=0; n &dnn_net = *(network *)net->ptr; 810 | dnn_net.set_netphase(net_phase::train); 811 | }, 812 | [&](){ if (epoch_cb == NULL) return; 813 | epoch_cb(net, cb_data); 814 | network &dnn_net = *(network *)net->ptr; 815 | dnn_net.set_netphase(net_phase::train); 816 | }, 817 | reset_weights, n_threads, 818 | dnn_cost); 819 | 820 | } 821 | 822 | void DNN_Fit(DNN_Network *net, 823 | DNN_Optimizer *opt, 824 | enum DNN_LossType losstype, 825 | float *inputs, 826 | float *outputs, 827 | long n_samples, 828 | long sample_size, 829 | long output_size, 830 | long batch_size, 831 | long epochs, 832 | void (*batch_cb)(DNN_Network *net, void *data), // on_batch_callback 833 | void (*epoch_cb)(DNN_Network *net, void *data), // on_epoch_callback 834 | void *cb_data, 835 | long reset_weights, // false 836 | long n_threads, 837 | float *cost) // relative target costs, can be NULL 838 | { 839 | std::vector dnn_inputs(n_samples); 840 | std::vector dnn_outputs(n_samples); 841 | std::vector dnn_cost; 842 | 843 | if (cost) { 844 | dnn_cost.resize(n_samples); 845 | } 846 | 847 | for (int n=0; n &dnn_net = *(network *)net->ptr; 863 | batch_cb(net, cb_data); 864 | dnn_net.set_netphase(net_phase::train); 865 | }, 866 | [&](){ if (epoch_cb == NULL) return; 867 | network &dnn_net = *(network *)net->ptr; 868 | epoch_cb(net, cb_data); 869 | dnn_net.set_netphase(net_phase::train); 870 | }, 871 | reset_weights, n_threads, 872 | dnn_cost); 873 | } 874 | 875 | void DNN_Predict(DNN_Network *net, 876 | float *input, 877 | float *output, 878 | long input_size, 879 | long output_size) 880 | { 881 | vec_t dnn_input; 882 | dnn_input.assign(input, input+input_size); 883 | 884 | network& dnn_net = (*(network *)net->ptr); 885 | dnn_net.set_netphase(net_phase::test); 886 | vec_t dnn_output = dnn_net.predict(dnn_input); 887 | 888 | for (int i=0; i& dnn_net = (*(network *)net->ptr); 901 | dnn_net.set_netphase(net_phase::test); 902 | label_t label = dnn_net.predict_label(dnn_input); 903 | 904 | return label; 905 | } 906 | 907 | float DNN_GetError(DNN_Network *net, 908 | float *inputs, 909 | long *outputs, 910 | long n_samples, 911 | long sample_size) 912 | { 913 | std::vector dnn_inputs(n_samples); 914 | std::vector dnn_outputs(n_samples); 915 | 916 | // FIXME: copying every time is a waste. 917 | // This issue applies to Train, Fit, GetError and GetLoss. 918 | // The issue is that apparently the only safe way to 919 | // initialize a std::vector from a C array is through 920 | // a copy, because the std::vector can't know what the 921 | // allocator was. 922 | // The alternative here is to introduce a DNN_Data struct 923 | // that holds an opaque pointer to a std::vector allocated 924 | // on the C++ side. Passing this struct to GetLoss, GetError, 925 | // Train and Fit would avoid the copy at every call. 926 | for (int n=0; n &dnn_net = *(network *)net->ptr; 933 | 934 | result res = dnn_net.test(dnn_inputs,dnn_outputs); 935 | 936 | float error = 1.0 - 0.01 * res.accuracy(); 937 | 938 | return error; 939 | } 940 | 941 | float DNN_GetLoss(DNN_Network *net, 942 | enum DNN_LossType losstype, 943 | float *inputs, 944 | float *outputs, 945 | long n_samples, 946 | long sample_size, 947 | long output_size) 948 | { 949 | std::vector dnn_inputs(n_samples); 950 | std::vector dnn_outputs(n_samples); 951 | 952 | // FIXME: copying every time is a waste. See above. 953 | for (int n=0; n &dnn_net = *(network *)net->ptr; 961 | 962 | float loss_value; 963 | 964 | switch (losstype) { 965 | case DNN_LOSS_MSE: 966 | loss_value = dnn_net.get_loss(dnn_inputs,dnn_outputs); 967 | break; 968 | case DNN_LOSS_ABSOLUTE: 969 | loss_value = dnn_net.get_loss(dnn_inputs,dnn_outputs); 970 | break; 971 | case DNN_LOSS_CROSSENTROPY: 972 | loss_value = dnn_net.get_loss(dnn_inputs,dnn_outputs); 973 | break; 974 | case DNN_LOSS_CROSSENTROPY_MULTICLASS: 975 | loss_value = dnn_net.get_loss(dnn_inputs,dnn_outputs); 976 | break; 977 | } 978 | 979 | return loss_value; 980 | } 981 | 982 | -------------------------------------------------------------------------------- /tinydnnc.h: -------------------------------------------------------------------------------- 1 | 2 | enum DNN_NetworkType { 3 | DNN_NETWORK_SEQUENTIAL 4 | }; 5 | 6 | typedef struct DNN_Network { 7 | enum DNN_NetworkType type; 8 | void *ptr; 9 | } DNN_Network; 10 | 11 | enum DNN_LayerType { 12 | DNN_LAYER_EADD, 13 | DNN_LAYER_AVGPOOL, 14 | DNN_LAYER_AVGUNPOOL, 15 | DNN_LAYER_BATCHNORM, 16 | DNN_LAYER_CONCAT, 17 | DNN_LAYER_CONV, 18 | DNN_LAYER_DECONV, 19 | DNN_LAYER_DROPOUT, 20 | DNN_LAYER_FC, 21 | DNN_LAYER_INPUT, 22 | DNN_LAYER_LINEAR, 23 | DNN_LAYER_LRN, 24 | DNN_LAYER_MAXPOOL, 25 | DNN_LAYER_MAXUNPOOL, 26 | DNN_LAYER_PARTIALCONN, 27 | DNN_LAYER_POWER, 28 | DNN_LAYER_QCONV, 29 | DNN_LAYER_QDECONV, 30 | DNN_LAYER_QFC, 31 | DNN_LAYER_SLICE 32 | }; 33 | 34 | enum DNN_ActivationType { 35 | DNN_ACTIVATION_NONE, 36 | DNN_ACTIVATION_IDENTITY, 37 | DNN_ACTIVATION_SIGMOID, 38 | DNN_ACTIVATION_RELU, 39 | DNN_ACTIVATION_LEAKYRELU, 40 | DNN_ACTIVATION_ELU, 41 | DNN_ACTIVATION_SOFTMAX, 42 | DNN_ACTIVATION_TANH, 43 | DNN_ACTIVATION_TANHP1M2 44 | }; 45 | 46 | enum DNN_BackendType { 47 | DNN_BACKEND_TINYDNN, 48 | DNN_BACKEND_NNPACK, 49 | DNN_BACKEND_LIBDNN, 50 | DNN_BACKEND_AVX, 51 | DNN_BACKEND_OPENCL 52 | }; 53 | 54 | typedef struct DNN_Layer { 55 | enum DNN_LayerType type; 56 | enum DNN_ActivationType acttype; 57 | void *ptr; 58 | void (*addfn)(DNN_Network *net, struct DNN_Layer *layer); 59 | void (*deletefn)(struct DNN_Layer *layer); 60 | } DNN_Layer; 61 | 62 | enum DNN_OptimizerType { 63 | DNN_OPTIMIZER_ADAGRAD, 64 | DNN_OPTIMIZER_RMSPROP, 65 | DNN_OPTIMIZER_ADAM, 66 | DNN_OPTIMIZER_SGD, 67 | DNN_OPTIMIZER_MOMENTUM 68 | }; 69 | 70 | typedef struct DNN_Optimizer { 71 | enum DNN_OptimizerType type; 72 | void *ptr; 73 | void (*deletefn)(struct DNN_Optimizer *opt); 74 | } DNN_Optimizer; 75 | 76 | enum DNN_LossType { 77 | DNN_LOSS_MSE, 78 | DNN_LOSS_ABSOLUTE, 79 | DNN_LOSS_CROSSENTROPY, 80 | DNN_LOSS_CROSSENTROPY_MULTICLASS 81 | }; 82 | 83 | enum DNN_Padding { 84 | DNN_PADDING_VALID, 85 | DNN_PADDING_SAME 86 | }; 87 | 88 | enum DNN_Phase { 89 | DNN_PHASE_TRAIN, 90 | DNN_PHASE_TEST 91 | }; 92 | 93 | enum DNN_Region { 94 | DNN_REGION_ACROSS, 95 | DNN_REGION_WITHIN 96 | }; 97 | 98 | enum DNN_Slice { 99 | DNN_SLICE_SAMPLES, 100 | DNN_SLICE_CHANNELS 101 | }; 102 | 103 | 104 | 105 | #ifdef __cplusplus 106 | extern "C" { 107 | #endif 108 | 109 | DNN_Network *DNN_SequentialNetwork(); 110 | 111 | void DNN_SequentialAdd(DNN_Network *net, DNN_Layer *layer); 112 | 113 | DNN_Network *DNN_NetworkClone(DNN_Network *net); 114 | 115 | void DNN_NetworkDelete(DNN_Network *net); 116 | 117 | void DNN_LayerDelete(DNN_Layer *layer); 118 | 119 | void DNN_OptimizerDelete(DNN_Optimizer *opt); 120 | 121 | DNN_Layer *DNN_ElwiseAddLayer(long num_args, 122 | long dim); 123 | 124 | DNN_Layer *DNN_AveragePoolLayer(enum DNN_ActivationType acttype, 125 | long in_width, long in_height, 126 | long in_channels, long pool_size, 127 | long stride); 128 | 129 | DNN_Layer *DNN_AverageUnpoolLayer(enum DNN_ActivationType acttype, 130 | long in_width, long in_height, 131 | long in_channels, long pool_size, 132 | long stride); 133 | 134 | DNN_Layer *DNN_BatchNormalizationLayer(long in_spatial_size, 135 | long in_channels, 136 | float epsilon, // 1e-5 137 | float momentum, // 0.999 138 | enum DNN_Phase phase); // DNN_PHASE_TRAIN 139 | DNN_Layer *DNN_ConcatLayer(long num_args, 140 | long ndim); 141 | 142 | DNN_Layer *DNN_ConvolutionalLayer(enum DNN_ActivationType activation, 143 | long in_width, long in_height, 144 | long window_width, long window_height, 145 | long in_channels, long out_channels, 146 | enum DNN_Padding pad_type, 147 | long has_bias, 148 | long w_stride, long h_stride, 149 | enum DNN_BackendType backend_type); 150 | 151 | DNN_Layer *DNN_DeconvolutionalLayer(enum DNN_ActivationType acttype, 152 | long in_width, long in_height, 153 | long window_width, long window_height, 154 | long in_channels, long out_channels, 155 | enum DNN_Padding padtype, 156 | long has_bias, 157 | long w_stride, long h_stride, 158 | enum DNN_BackendType backend_type); 159 | 160 | DNN_Layer *DNN_FullyConnectedLayer(enum DNN_ActivationType acttype, 161 | long in_dim, long out_dim, 162 | long has_bias, 163 | enum DNN_BackendType backend_type); 164 | 165 | DNN_Layer *DNN_InputLayer(long dim0, long dim1, long dim2); 166 | 167 | DNN_Layer *DNN_LinearLayer(enum DNN_ActivationType acttype, 168 | long dim, float scale, float bias); 169 | 170 | DNN_Layer *DNN_LRNLayer(enum DNN_ActivationType acttype, 171 | long in_width, long in_height, 172 | long local_size, long in_channels, 173 | float alpha, float beta, 174 | enum DNN_Region region); 175 | 176 | DNN_Layer *DNN_MaxPoolLayer(enum DNN_ActivationType acttype, 177 | long in_width, long in_height, 178 | long in_channels, long pool_size, 179 | long stride, 180 | enum DNN_BackendType backend_type); 181 | 182 | DNN_Layer *DNN_PowerLayer(long dim0, long dim1, long dim2, float factor); 183 | 184 | DNN_Layer *DNN_QuantizedConvolutionalLayer(enum DNN_ActivationType acttype, 185 | long in_width, long in_height, 186 | long window_width, long window_height, 187 | long in_channels, long out_channels, 188 | enum DNN_Padding padtype, 189 | long has_bias, 190 | long w_stride, long h_stride, 191 | enum DNN_BackendType backend_type); 192 | 193 | DNN_Layer *DNN_QuantizedDeconvolutionalLayer(enum DNN_ActivationType acttype, 194 | long in_width, long in_height, 195 | long window_width, long window_height, 196 | long in_channels, long out_channels, 197 | enum DNN_Padding padtype, 198 | long has_bias, 199 | long w_stride, long h_stride, 200 | enum DNN_BackendType backend_type); 201 | 202 | DNN_Layer *DNN_QuantizedFullyConnectedLayer(enum DNN_ActivationType acttype, 203 | long in_dim, long out_dim, 204 | long has_bias, 205 | enum DNN_BackendType backend_type); 206 | 207 | DNN_Layer *DNN_SliceLayer(long dim0, long dim1, long dim2, 208 | enum DNN_Slice slice_type, 209 | long num_outputs); 210 | 211 | 212 | 213 | 214 | DNN_Optimizer *DNN_AdaGradOptimizer(float alpha); // 0.01 215 | 216 | DNN_Optimizer *DNN_RMSPropOptimizer(float alpha, // 0.0001 217 | float mu); // 0.99 218 | 219 | DNN_Optimizer *DNN_AdamOptimizer(float alpha, // 0.001 220 | float b1, // 0.9 221 | float b2, // 0.999 222 | float b1_t, // 0.9 223 | float b2_t); // 0.999 224 | 225 | DNN_Optimizer *DNN_SGDOptimizer(float alpha, // 0.01 226 | float lambda); // 0.0 227 | 228 | DNN_Optimizer *DNN_MomentumOptimizer(float alpha, // 0.01 229 | float lambda, // 0.0 230 | float mu); // 0.9 231 | 232 | void DNN_Train(DNN_Network *net, 233 | DNN_Optimizer *opt, 234 | enum DNN_LossType losstype, 235 | float *inputs, 236 | long *outputs, 237 | long n_samples, 238 | long sample_size, 239 | long batch_size, 240 | long epochs, 241 | void (*batch_cb)(DNN_Network *net, void *data), // on_batch_callback 242 | void (*epoch_cb)(DNN_Network *net, void *data), // on_epoch_callback 243 | void *cb_data, 244 | long reset_weights, // false 245 | long n_threads, 246 | float *cost); // relative target costs, can be NULL 247 | 248 | void DNN_Fit(DNN_Network *net, 249 | DNN_Optimizer *opt, 250 | enum DNN_LossType losstype, 251 | float *inputs, 252 | float *outputs, 253 | long n_samples, 254 | long sample_size, 255 | long output_size, 256 | long batch_size, 257 | long epochs, 258 | void (*batch_cb)(DNN_Network *net, void *data), // on_batch_callback 259 | void (*epoch_cb)(DNN_Network *net, void *data), // on_epoch_callback 260 | void *cb_data, 261 | long reset_weights, // false 262 | long n_threads, 263 | float *cost); // relative target costs, can be NULL 264 | 265 | void DNN_Predict(DNN_Network *net, 266 | float *input, 267 | float *output, 268 | long input_size, 269 | long output_size); 270 | 271 | long DNN_PredictLabel(DNN_Network *net, 272 | float *input, 273 | long input_size); 274 | 275 | float DNN_GetError(DNN_Network *net, 276 | float *inputs, // std::vector&, aka tensor_t& 277 | long *outputs, // std::vector& 278 | long n_samples, 279 | long sample_size); 280 | 281 | float DNN_GetLoss(DNN_Network *net, 282 | enum DNN_LossType losstype, 283 | float *inputs, // std::vector&, aka tensor_t& 284 | float *outputs, // std::vector& 285 | long n_samples, 286 | long sample_size, 287 | long output_size); 288 | 289 | #ifdef __cplusplus 290 | } 291 | #endif 292 | 293 | --------------------------------------------------------------------------------