├── visual ├── iris │ ├── YOLOWrapper │ │ ├── darknet │ │ │ ├── src │ │ │ │ ├── classifier.h │ │ │ │ ├── layer.h │ │ │ │ ├── demo.h │ │ │ │ ├── parser.h │ │ │ │ ├── list.h │ │ │ │ ├── tree.h │ │ │ │ ├── box.h │ │ │ │ ├── matrix.h │ │ │ │ ├── col2im.h │ │ │ │ ├── im2col.h │ │ │ │ ├── l2norm_layer.h │ │ │ │ ├── cuda.h │ │ │ │ ├── logistic_layer.h │ │ │ │ ├── option_list.h │ │ │ │ ├── upsample_layer.h │ │ │ │ ├── activation_layer.h │ │ │ │ ├── region_layer.h │ │ │ │ ├── batchnorm_layer.h │ │ │ │ ├── shortcut_layer.h │ │ │ │ ├── lstm_layer.h │ │ │ │ ├── crop_layer.h │ │ │ │ ├── reorg_layer.h │ │ │ │ ├── route_layer.h │ │ │ │ ├── dropout_layer.h │ │ │ │ ├── yolo_layer.h │ │ │ │ ├── detection_layer.h │ │ │ │ ├── cost_layer.h │ │ │ │ ├── softmax_layer.h │ │ │ │ ├── gru_layer.h │ │ │ │ ├── avgpool_layer.h │ │ │ │ ├── network.h │ │ │ │ ├── rnn_layer.h │ │ │ │ ├── maxpool_layer.h │ │ │ │ ├── normalization_layer.h │ │ │ │ ├── crnn_layer.h │ │ │ │ ├── connected_layer.h │ │ │ │ ├── deconvolutional_layer.h │ │ │ │ ├── gemm.h │ │ │ │ ├── local_layer.h │ │ │ │ ├── dropout_layer_kernels.cu │ │ │ │ ├── col2im.c │ │ │ │ ├── im2col.c │ │ │ │ ├── list.c │ │ │ │ ├── dropout_layer.c │ │ │ │ ├── utils.h │ │ │ │ ├── avgpool_layer_kernels.cu │ │ │ │ ├── activation_layer.c │ │ │ │ ├── l2norm_layer.c │ │ │ │ ├── avgpool_layer.c │ │ │ │ ├── data.h │ │ │ │ ├── logistic_layer.c │ │ │ │ ├── convolutional_layer.h │ │ │ │ ├── im2col_kernels.cu │ │ │ │ ├── image.h │ │ │ │ ├── col2im_kernels.cu │ │ │ │ ├── activations.h │ │ │ │ ├── crop_layer.c │ │ │ │ ├── shortcut_layer.c │ │ │ │ ├── maxpool_layer_kernels.cu │ │ │ │ ├── upsample_layer.c │ │ │ │ ├── option_list.c │ │ │ │ ├── softmax_layer.c │ │ │ │ ├── activations.c │ │ │ │ ├── tree.c │ │ │ │ ├── maxpool_layer.c │ │ │ │ ├── route_layer.c │ │ │ │ ├── layer.c │ │ │ │ ├── cuda.c │ │ │ │ ├── matrix.c │ │ │ │ ├── deconvolutional_kernels.cu │ │ │ │ ├── reorg_layer.c │ │ │ │ └── cost_layer.c │ │ │ ├── scripts │ │ │ │ ├── gen_tactic.sh │ │ │ │ ├── imagenet_label.sh │ │ │ │ ├── dice_label.sh │ │ │ │ ├── get_coco_dataset.sh │ │ │ │ └── voc_label.py │ │ │ ├── .gitignore │ │ │ ├── LICENSE.meta │ │ │ ├── README.md │ │ │ ├── LICENSE.v1 │ │ │ ├── LICENSE.fuck │ │ │ ├── LICENSE │ │ │ ├── examples │ │ │ │ ├── detector.py │ │ │ │ ├── art.c │ │ │ │ ├── detector-scipy-opencv.py │ │ │ │ ├── swag.c │ │ │ │ ├── dice.c │ │ │ │ ├── super.c │ │ │ │ ├── tag.c │ │ │ │ ├── writing.c │ │ │ │ └── voxel.c │ │ │ ├── python │ │ │ │ ├── proverbot.py │ │ │ │ └── darknet.py │ │ │ ├── LICENSE.mit │ │ │ └── Makefile │ │ ├── test │ │ │ ├── 1.jpg │ │ │ ├── 2.jpg │ │ │ └── 3.jpg │ │ ├── README.md │ │ ├── YOLOWrapper.h │ │ ├── Makefile │ │ ├── main.cpp │ │ └── YOLOWrapper.cpp │ └── README.md └── facial │ ├── Makefile │ └── src │ └── train_facial_classifier.cpp ├── extractedDNA └── AllelesClassifier │ ├── Makefile │ ├── classifier.h │ ├── classifier.cpp │ ├── test.cpp │ └── train.cpp ├── rawDNA └── LSTM │ ├── Makefile │ ├── models │ └── lstm_train_save.pbtxt │ └── src │ └── train_LSTM.cpp └── README.md /visual/iris/YOLOWrapper/darknet/src/classifier.h: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/layer.h: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/demo.h: -------------------------------------------------------------------------------- 1 | #ifndef DEMO_H 2 | #define DEMO_H 3 | 4 | #include "image.h" 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/test/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeneSourceCodeChain/AI_Components/HEAD/visual/iris/YOLOWrapper/test/1.jpg -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/test/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeneSourceCodeChain/AI_Components/HEAD/visual/iris/YOLOWrapper/test/2.jpg -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/test/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeneSourceCodeChain/AI_Components/HEAD/visual/iris/YOLOWrapper/test/3.jpg -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/scripts/gen_tactic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: 3 | # wget http://pjreddie.com/media/files/peek.weights 4 | # scripts/gen_tactic.sh < data/goal.txt 5 | ./darknet rnn generatetactic cfg/gru.cfg peek.weights 2>/dev/null 6 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/parser.h: -------------------------------------------------------------------------------- 1 | #ifndef PARSER_H 2 | #define PARSER_H 3 | #include "darknet.h" 4 | #include "network.h" 5 | 6 | void save_network(network net, char *filename); 7 | void save_weights_double(network net, char *filename); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/list.h: -------------------------------------------------------------------------------- 1 | #ifndef LIST_H 2 | #define LIST_H 3 | #include "darknet.h" 4 | 5 | list *make_list(); 6 | int list_find(list *l, void *val); 7 | 8 | void list_insert(list *, void *); 9 | 10 | 11 | void free_list_contents(list *l); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/tree.h: -------------------------------------------------------------------------------- 1 | #ifndef TREE_H 2 | #define TREE_H 3 | #include "darknet.h" 4 | 5 | int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride); 6 | float get_hierarchy_probability(float *x, tree *hier, int c, int stride); 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/box.h: -------------------------------------------------------------------------------- 1 | #ifndef BOX_H 2 | #define BOX_H 3 | #include "darknet.h" 4 | 5 | typedef struct{ 6 | float dx, dy, dw, dh; 7 | } dbox; 8 | 9 | float box_rmse(box a, box b); 10 | dbox diou(box a, box b); 11 | box decode_box(box b, box anchor); 12 | box encode_box(box b, box anchor); 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/matrix.h: -------------------------------------------------------------------------------- 1 | #ifndef MATRIX_H 2 | #define MATRIX_H 3 | #include "darknet.h" 4 | 5 | matrix copy_matrix(matrix m); 6 | void print_matrix(matrix m); 7 | 8 | matrix hold_out_matrix(matrix *m, int n); 9 | matrix resize_matrix(matrix m, int size); 10 | 11 | float *pop_column(matrix *m, int c); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /extractedDNA/AllelesClassifier/Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS=-I. -std=c++14 2 | LIBS=-lboost_serialization -lboost_program_options 3 | OBJS=$(patsubst %.cpp,%.o,$(wildcard *.cpp)) 4 | 5 | all: train test 6 | 7 | train: train.o 8 | $(CXX) $^ $(LIBS) -o ${@} 9 | 10 | test: test.o classifier.o 11 | $(CXX) $^ $(LIBS) -o ${@} 12 | 13 | clean: 14 | $(RM) train test $(OBJS) 15 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.dSYM 3 | *.csv 4 | *.out 5 | *.png 6 | *.jpg 7 | *.pyc 8 | old/ 9 | mnist/ 10 | data/ 11 | caffe/ 12 | grasp/ 13 | images/ 14 | opencv/ 15 | convnet/ 16 | decaf/ 17 | submission/ 18 | cfg/ 19 | darknet 20 | .fuse* 21 | 22 | # OS Generated # 23 | .DS_Store* 24 | ehthumbs.db 25 | Icon? 26 | Thumbs.db 27 | *.swp 28 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/LICENSE.meta: -------------------------------------------------------------------------------- 1 | META-LICENSE 2 | Version 1, June 21 2017 3 | 4 | Any and all licenses may be applied to the software either individually 5 | or in concert. Any issues, ambiguities, paradoxes, or metaphysical quandries 6 | arising from this combination should be discussed with a local faith leader, 7 | hermit, or guru. The Oxford comma shall be used. 8 | 9 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/col2im.h: -------------------------------------------------------------------------------- 1 | #ifndef COL2IM_H 2 | #define COL2IM_H 3 | 4 | void col2im_cpu(float* data_col, 5 | int channels, int height, int width, 6 | int ksize, int stride, int pad, float* data_im); 7 | 8 | #ifdef GPU 9 | void col2im_gpu(float *data_col, 10 | int channels, int height, int width, 11 | int ksize, int stride, int pad, float *data_im); 12 | #endif 13 | #endif 14 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/scripts/imagenet_label.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p labelled 4 | wd=`pwd` 5 | 6 | for f in val/*.xml; 7 | do 8 | label=`grep -m1 "" $f | grep -oP '\K[^<]*'` 9 | im=`echo $f | sed 's/val/imgs/; s/xml/JPEG/'` 10 | out=`echo $im | sed 's/JPEG/'${label}'.JPEG/; s/imgs/labelled/'` 11 | ln -s ${wd}/$im ${wd}/$out 12 | done 13 | 14 | find ${wd}/labelled -name \*.JPEG > inet.val.list 15 | 16 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/im2col.h: -------------------------------------------------------------------------------- 1 | #ifndef IM2COL_H 2 | #define IM2COL_H 3 | 4 | void im2col_cpu(float* data_im, 5 | int channels, int height, int width, 6 | int ksize, int stride, int pad, float* data_col); 7 | 8 | #ifdef GPU 9 | 10 | void im2col_gpu(float *im, 11 | int channels, int height, int width, 12 | int ksize, int stride, int pad,float *data_col); 13 | 14 | #endif 15 | #endif 16 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/README.md: -------------------------------------------------------------------------------- 1 | ![Darknet Logo](http://pjreddie.com/media/files/darknet-black-small.png) 2 | 3 | # Darknet # 4 | Darknet is an open source neural network framework written in C and CUDA. It is fast, easy to install, and supports CPU and GPU computation. 5 | 6 | For more information see the [Darknet project website](http://pjreddie.com/darknet). 7 | 8 | For questions or issues please use the [Google Group](https://groups.google.com/forum/#!forum/darknet). 9 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/l2norm_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef L2NORM_LAYER_H 2 | #define L2NORM_LAYER_H 3 | #include "layer.h" 4 | #include "network.h" 5 | 6 | layer make_l2norm_layer(int batch, int inputs); 7 | void forward_l2norm_layer(const layer l, network net); 8 | void backward_l2norm_layer(const layer l, network net); 9 | 10 | #ifdef GPU 11 | void forward_l2norm_layer_gpu(const layer l, network net); 12 | void backward_l2norm_layer_gpu(const layer l, network net); 13 | #endif 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/cuda.h: -------------------------------------------------------------------------------- 1 | #ifndef CUDA_H 2 | #define CUDA_H 3 | 4 | #include "darknet.h" 5 | 6 | #ifdef GPU 7 | 8 | void check_error(cudaError_t status); 9 | cublasHandle_t blas_handle(); 10 | int *cuda_make_int_array(int *x, size_t n); 11 | void cuda_random(float *x_gpu, size_t n); 12 | float cuda_compare(float *x_gpu, float *x, size_t n, char *s); 13 | dim3 cuda_gridsize(size_t n); 14 | 15 | #ifdef CUDNN 16 | cudnnHandle_t cudnn_handle(); 17 | #endif 18 | 19 | #endif 20 | #endif 21 | -------------------------------------------------------------------------------- /visual/iris/README.md: -------------------------------------------------------------------------------- 1 | # Visual recognition based on visual information of iris 2 | 3 | ### Introduction 4 | This component is for detecting potential illness and doing biometric identification base on visual information extracted from iris. 5 | 6 | ### Components 7 | 8 | 1. Iris detection(visual/iris/YOLOWrapper): detect Iris location with YOLOv3 algorithm. 9 | 10 | 2. Iris recognition(will be released) 11 | 12 | 3. illness detection based on eye image(will be released) 13 | 14 | 4. more 15 | 16 | 17 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/logistic_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef LOGISTIC_LAYER_H 2 | #define LOGISTIC_LAYER_H 3 | #include "layer.h" 4 | #include "network.h" 5 | 6 | layer make_logistic_layer(int batch, int inputs); 7 | void forward_logistic_layer(const layer l, network net); 8 | void backward_logistic_layer(const layer l, network net); 9 | 10 | #ifdef GPU 11 | void forward_logistic_layer_gpu(const layer l, network net); 12 | void backward_logistic_layer_gpu(const layer l, network net); 13 | #endif 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/option_list.h: -------------------------------------------------------------------------------- 1 | #ifndef OPTION_LIST_H 2 | #define OPTION_LIST_H 3 | #include "list.h" 4 | 5 | typedef struct{ 6 | char *key; 7 | char *val; 8 | int used; 9 | } kvp; 10 | 11 | 12 | int read_option(char *s, list *options); 13 | void option_insert(list *l, char *key, char *val); 14 | char *option_find(list *l, char *key); 15 | float option_find_float(list *l, char *key, float def); 16 | float option_find_float_quiet(list *l, char *key, float def); 17 | void option_unused(list *l); 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/LICENSE.v1: -------------------------------------------------------------------------------- 1 | YOLO LICENSE 2 | Version 1, July 10 2015 3 | 4 | THIS SOFTWARE LICENSE IS PROVIDED "ALL CAPS" SO THAT YOU KNOW IT IS SUPER 5 | SERIOUS AND YOU DON'T MESS AROUND WITH COPYRIGHT LAW BECAUSE YOU WILL GET IN 6 | TROUBLE HERE ARE SOME OTHER BUZZWORDS COMMONLY IN THESE THINGS WARRANTIES 7 | LIABILITY CONTRACT TORT LIABLE CLAIMS RESTRICTION MERCHANTABILITY SUBJECT TO 8 | THE FOLLOWING CONDITIONS: 9 | 10 | 1. #yolo 11 | 2. #swag 12 | 3. #blazeit 13 | 14 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/LICENSE.fuck: -------------------------------------------------------------------------------- 1 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2 | Version 2, December 2004 3 | 4 | Copyright (C) 2004 Sam Hocevar 5 | 6 | Everyone is permitted to copy and distribute verbatim or modified 7 | copies of this license document, and changing it is allowed as long 8 | as the name is changed. 9 | 10 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 11 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 12 | 13 | 0. You just DO WHAT THE FUCK YOU WANT TO. 14 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/upsample_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef UPSAMPLE_LAYER_H 2 | #define UPSAMPLE_LAYER_H 3 | #include "darknet.h" 4 | 5 | layer make_upsample_layer(int batch, int w, int h, int c, int stride); 6 | void forward_upsample_layer(const layer l, network net); 7 | void backward_upsample_layer(const layer l, network net); 8 | void resize_upsample_layer(layer *l, int w, int h); 9 | 10 | #ifdef GPU 11 | void forward_upsample_layer_gpu(const layer l, network net); 12 | void backward_upsample_layer_gpu(const layer l, network net); 13 | #endif 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/activation_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef ACTIVATION_LAYER_H 2 | #define ACTIVATION_LAYER_H 3 | 4 | #include "activations.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_activation_layer(int batch, int inputs, ACTIVATION activation); 9 | 10 | void forward_activation_layer(layer l, network net); 11 | void backward_activation_layer(layer l, network net); 12 | 13 | #ifdef GPU 14 | void forward_activation_layer_gpu(layer l, network net); 15 | void backward_activation_layer_gpu(layer l, network net); 16 | #endif 17 | 18 | #endif 19 | 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/LICENSE: -------------------------------------------------------------------------------- 1 | YOLO LICENSE 2 | Version 2, July 29 2016 3 | 4 | THIS SOFTWARE LICENSE IS PROVIDED "ALL CAPS" SO THAT YOU KNOW IT IS SUPER 5 | SERIOUS AND YOU DON'T MESS AROUND WITH COPYRIGHT LAW BECAUSE YOU WILL GET IN 6 | TROUBLE HERE ARE SOME OTHER BUZZWORDS COMMONLY IN THESE THINGS WARRANTIES 7 | LIABILITY CONTRACT TORT LIABLE CLAIMS RESTRICTION MERCHANTABILITY. NOW HERE'S 8 | THE REAL LICENSE: 9 | 10 | 0. Darknet is public domain. 11 | 1. Do whatever you want with it. 12 | 2. Stop emailing me about it! 13 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/region_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef REGION_LAYER_H 2 | #define REGION_LAYER_H 3 | 4 | #include "darknet.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_region_layer(int batch, int w, int h, int n, int classes, int coords); 9 | void forward_region_layer(const layer l, network net); 10 | void backward_region_layer(const layer l, network net); 11 | void resize_region_layer(layer *l, int w, int h); 12 | 13 | #ifdef GPU 14 | void forward_region_layer_gpu(const layer l, network net); 15 | void backward_region_layer_gpu(layer l, network net); 16 | #endif 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/batchnorm_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef BATCHNORM_LAYER_H 2 | #define BATCHNORM_LAYER_H 3 | 4 | #include "image.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_batchnorm_layer(int batch, int w, int h, int c); 9 | void forward_batchnorm_layer(layer l, network net); 10 | void backward_batchnorm_layer(layer l, network net); 11 | 12 | #ifdef GPU 13 | void forward_batchnorm_layer_gpu(layer l, network net); 14 | void backward_batchnorm_layer_gpu(layer l, network net); 15 | void pull_batchnorm_layer(layer l); 16 | void push_batchnorm_layer(layer l); 17 | #endif 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/shortcut_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef SHORTCUT_LAYER_H 2 | #define SHORTCUT_LAYER_H 3 | 4 | #include "layer.h" 5 | #include "network.h" 6 | 7 | layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2); 8 | void forward_shortcut_layer(const layer l, network net); 9 | void backward_shortcut_layer(const layer l, network net); 10 | void resize_shortcut_layer(layer *l, int w, int h); 11 | 12 | #ifdef GPU 13 | void forward_shortcut_layer_gpu(const layer l, network net); 14 | void backward_shortcut_layer_gpu(const layer l, network net); 15 | #endif 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/lstm_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef LSTM_LAYER_H 2 | #define LSTM_LAYER_H 3 | 4 | #include "activations.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | #define USET 8 | 9 | layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam); 10 | 11 | void forward_lstm_layer(layer l, network net); 12 | void update_lstm_layer(layer l, update_args a); 13 | 14 | #ifdef GPU 15 | void forward_lstm_layer_gpu(layer l, network net); 16 | void backward_lstm_layer_gpu(layer l, network net); 17 | void update_lstm_layer_gpu(layer l, update_args a); 18 | 19 | #endif 20 | #endif 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/crop_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef CROP_LAYER_H 2 | #define CROP_LAYER_H 3 | 4 | #include "image.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | typedef layer crop_layer; 9 | 10 | image get_crop_image(crop_layer l); 11 | crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure); 12 | void forward_crop_layer(const crop_layer l, network net); 13 | void resize_crop_layer(layer *l, int w, int h); 14 | 15 | #ifdef GPU 16 | void forward_crop_layer_gpu(crop_layer l, network net); 17 | #endif 18 | 19 | #endif 20 | 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/reorg_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef REORG_LAYER_H 2 | #define REORG_LAYER_H 3 | 4 | #include "image.h" 5 | #include "cuda.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | 9 | layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra); 10 | void resize_reorg_layer(layer *l, int w, int h); 11 | void forward_reorg_layer(const layer l, network net); 12 | void backward_reorg_layer(const layer l, network net); 13 | 14 | #ifdef GPU 15 | void forward_reorg_layer_gpu(layer l, network net); 16 | void backward_reorg_layer_gpu(layer l, network net); 17 | #endif 18 | 19 | #endif 20 | 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/route_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef ROUTE_LAYER_H 2 | #define ROUTE_LAYER_H 3 | #include "network.h" 4 | #include "layer.h" 5 | 6 | typedef layer route_layer; 7 | 8 | route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size); 9 | void forward_route_layer(const route_layer l, network net); 10 | void backward_route_layer(const route_layer l, network net); 11 | void resize_route_layer(route_layer *l, network *net); 12 | 13 | #ifdef GPU 14 | void forward_route_layer_gpu(const route_layer l, network net); 15 | void backward_route_layer_gpu(const route_layer l, network net); 16 | #endif 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/dropout_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef DROPOUT_LAYER_H 2 | #define DROPOUT_LAYER_H 3 | 4 | #include "layer.h" 5 | #include "network.h" 6 | 7 | typedef layer dropout_layer; 8 | 9 | dropout_layer make_dropout_layer(int batch, int inputs, float probability); 10 | 11 | void forward_dropout_layer(dropout_layer l, network net); 12 | void backward_dropout_layer(dropout_layer l, network net); 13 | void resize_dropout_layer(dropout_layer *l, int inputs); 14 | 15 | #ifdef GPU 16 | void forward_dropout_layer_gpu(dropout_layer l, network net); 17 | void backward_dropout_layer_gpu(dropout_layer l, network net); 18 | 19 | #endif 20 | #endif 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/yolo_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef YOLO_LAYER_H 2 | #define YOLO_LAYER_H 3 | 4 | #include "darknet.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes); 9 | void forward_yolo_layer(const layer l, network net); 10 | void backward_yolo_layer(const layer l, network net); 11 | void resize_yolo_layer(layer *l, int w, int h); 12 | int yolo_num_detections(layer l, float thresh); 13 | 14 | #ifdef GPU 15 | void forward_yolo_layer_gpu(const layer l, network net); 16 | void backward_yolo_layer_gpu(layer l, network net); 17 | #endif 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/detection_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef DETECTION_LAYER_H 2 | #define DETECTION_LAYER_H 3 | 4 | #include "layer.h" 5 | #include "network.h" 6 | 7 | typedef layer detection_layer; 8 | 9 | detection_layer make_detection_layer(int batch, int inputs, int n, int size, int classes, int coords, int rescore); 10 | void forward_detection_layer(const detection_layer l, network net); 11 | void backward_detection_layer(const detection_layer l, network net); 12 | 13 | #ifdef GPU 14 | void forward_detection_layer_gpu(const detection_layer l, network net); 15 | void backward_detection_layer_gpu(detection_layer l, network net); 16 | #endif 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/cost_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef COST_LAYER_H 2 | #define COST_LAYER_H 3 | #include "layer.h" 4 | #include "network.h" 5 | 6 | typedef layer cost_layer; 7 | 8 | COST_TYPE get_cost_type(char *s); 9 | char *get_cost_string(COST_TYPE a); 10 | cost_layer make_cost_layer(int batch, int inputs, COST_TYPE type, float scale); 11 | void forward_cost_layer(const cost_layer l, network net); 12 | void backward_cost_layer(const cost_layer l, network net); 13 | void resize_cost_layer(cost_layer *l, int inputs); 14 | 15 | #ifdef GPU 16 | void forward_cost_layer_gpu(cost_layer l, network net); 17 | void backward_cost_layer_gpu(const cost_layer l, network net); 18 | #endif 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/softmax_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef SOFTMAX_LAYER_H 2 | #define SOFTMAX_LAYER_H 3 | #include "layer.h" 4 | #include "network.h" 5 | 6 | typedef layer softmax_layer; 7 | 8 | void softmax_array(float *input, int n, float temp, float *output); 9 | softmax_layer make_softmax_layer(int batch, int inputs, int groups); 10 | void forward_softmax_layer(const softmax_layer l, network net); 11 | void backward_softmax_layer(const softmax_layer l, network net); 12 | 13 | #ifdef GPU 14 | void pull_softmax_layer_output(const softmax_layer l); 15 | void forward_softmax_layer_gpu(const softmax_layer l, network net); 16 | void backward_softmax_layer_gpu(const softmax_layer l, network net); 17 | #endif 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/gru_layer.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef GRU_LAYER_H 3 | #define GRU_LAYER_H 4 | 5 | #include "activations.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | 9 | layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam); 10 | 11 | void forward_gru_layer(layer l, network state); 12 | void backward_gru_layer(layer l, network state); 13 | void update_gru_layer(layer l, update_args a); 14 | 15 | #ifdef GPU 16 | void forward_gru_layer_gpu(layer l, network state); 17 | void backward_gru_layer_gpu(layer l, network state); 18 | void update_gru_layer_gpu(layer l, update_args a); 19 | void push_gru_layer(layer l); 20 | void pull_gru_layer(layer l); 21 | #endif 22 | 23 | #endif 24 | 25 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/avgpool_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef AVGPOOL_LAYER_H 2 | #define AVGPOOL_LAYER_H 3 | 4 | #include "image.h" 5 | #include "cuda.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | 9 | typedef layer avgpool_layer; 10 | 11 | image get_avgpool_image(avgpool_layer l); 12 | avgpool_layer make_avgpool_layer(int batch, int w, int h, int c); 13 | void resize_avgpool_layer(avgpool_layer *l, int w, int h); 14 | void forward_avgpool_layer(const avgpool_layer l, network net); 15 | void backward_avgpool_layer(const avgpool_layer l, network net); 16 | 17 | #ifdef GPU 18 | void forward_avgpool_layer_gpu(avgpool_layer l, network net); 19 | void backward_avgpool_layer_gpu(avgpool_layer l, network net); 20 | #endif 21 | 22 | #endif 23 | 24 | -------------------------------------------------------------------------------- /rawDNA/LSTM/Makefile: -------------------------------------------------------------------------------- 1 | CAFFE2_PREFIX=/home/xieyi/opt/caffe2 2 | CAFFE2_HELPER_PREFIX=/home/xieyi/opt/caffe2_helper 3 | CXXFLAGS=`pkg-config --cflags opencv dlib-1 eigen3` -I. -I${CAFFE2_PREFIX}/include \ 4 | -I${CAFFE2_HELPER_PREFIX}/include -std=c++14 -g2 5 | LIBS= -L${CAFFE2_HELPER_PREFIX}/lib -lcaffe2_cpp -lcaffe2_cpp_gpu \ 6 | -L${CAFFE2_PREFIX}/lib -lcaffe2_gpu -lcaffe2 \ 7 | `pkg-config --libs opencv dlib-1 eigen3` \ 8 | -lglog -lprotobuf -lcudart -lcurand \ 9 | -lboost_filesystem -lboost_system -lboost_thread -lboost_regex -lboost_program_options -lpthread -ldl 10 | OBJS=$(patsubst %.cpp,%.o,$(wildcard src/*.cpp)) 11 | 12 | all: train_LSTM 13 | 14 | train_LSTM: src/train_LSTM.o 15 | $(CXX) $^ -o ${@} $(LIBS) 16 | 17 | clean: 18 | $(RM) train_LSTM $(OBJS) 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/network.h: -------------------------------------------------------------------------------- 1 | // Oh boy, why am I about to do this.... 2 | #ifndef NETWORK_H 3 | #define NETWORK_H 4 | #include "darknet.h" 5 | 6 | #include "image.h" 7 | #include "layer.h" 8 | #include "data.h" 9 | #include "tree.h" 10 | 11 | 12 | #ifdef GPU 13 | void pull_network_output(network *net); 14 | #endif 15 | 16 | void compare_networks(network *n1, network *n2, data d); 17 | char *get_layer_string(LAYER_TYPE a); 18 | 19 | network *make_network(int n); 20 | 21 | 22 | float network_accuracy_multi(network *net, data d, int n); 23 | int get_predicted_class_network(network *net); 24 | void print_network(network *net); 25 | int resize_network(network *net, int w, int h); 26 | void calc_network_cost(network *net); 27 | 28 | #endif 29 | 30 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/scripts/dice_label.sh: -------------------------------------------------------------------------------- 1 | mkdir -p images 2 | mkdir -p images/orig 3 | mkdir -p images/train 4 | mkdir -p images/val 5 | 6 | ffmpeg -i Face1.mp4 images/orig/face1_%6d.jpg 7 | ffmpeg -i Face2.mp4 images/orig/face2_%6d.jpg 8 | ffmpeg -i Face3.mp4 images/orig/face3_%6d.jpg 9 | ffmpeg -i Face4.mp4 images/orig/face4_%6d.jpg 10 | ffmpeg -i Face5.mp4 images/orig/face5_%6d.jpg 11 | ffmpeg -i Face6.mp4 images/orig/face6_%6d.jpg 12 | 13 | mogrify -resize 100x100^ -gravity center -crop 100x100+0+0 +repage images/orig/* 14 | 15 | ls images/orig/* | shuf | head -n 1000 | xargs mv -t images/val 16 | mv images/orig/* images/train 17 | 18 | find `pwd`/images/train > dice.train.list -name \*.jpg 19 | find `pwd`/images/val > dice.val.list -name \*.jpg 20 | 21 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/rnn_layer.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef RNN_LAYER_H 3 | #define RNN_LAYER_H 4 | 5 | #include "activations.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | #define USET 9 | 10 | layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam); 11 | 12 | void forward_rnn_layer(layer l, network net); 13 | void backward_rnn_layer(layer l, network net); 14 | void update_rnn_layer(layer l, update_args a); 15 | 16 | #ifdef GPU 17 | void forward_rnn_layer_gpu(layer l, network net); 18 | void backward_rnn_layer_gpu(layer l, network net); 19 | void update_rnn_layer_gpu(layer l, update_args a); 20 | void push_rnn_layer(layer l); 21 | void pull_rnn_layer(layer l); 22 | #endif 23 | 24 | #endif 25 | 26 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/maxpool_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef MAXPOOL_LAYER_H 2 | #define MAXPOOL_LAYER_H 3 | 4 | #include "image.h" 5 | #include "cuda.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | 9 | typedef layer maxpool_layer; 10 | 11 | image get_maxpool_image(maxpool_layer l); 12 | maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding); 13 | void resize_maxpool_layer(maxpool_layer *l, int w, int h); 14 | void forward_maxpool_layer(const maxpool_layer l, network net); 15 | void backward_maxpool_layer(const maxpool_layer l, network net); 16 | 17 | #ifdef GPU 18 | void forward_maxpool_layer_gpu(maxpool_layer l, network net); 19 | void backward_maxpool_layer_gpu(maxpool_layer l, network net); 20 | #endif 21 | 22 | #endif 23 | 24 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/normalization_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef NORMALIZATION_LAYER_H 2 | #define NORMALIZATION_LAYER_H 3 | 4 | #include "image.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa); 9 | void resize_normalization_layer(layer *layer, int h, int w); 10 | void forward_normalization_layer(const layer layer, network net); 11 | void backward_normalization_layer(const layer layer, network net); 12 | void visualize_normalization_layer(layer layer, char *window); 13 | 14 | #ifdef GPU 15 | void forward_normalization_layer_gpu(const layer layer, network net); 16 | void backward_normalization_layer_gpu(const layer layer, network net); 17 | #endif 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/crnn_layer.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef CRNN_LAYER_H 3 | #define CRNN_LAYER_H 4 | 5 | #include "activations.h" 6 | #include "layer.h" 7 | #include "network.h" 8 | 9 | layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize); 10 | 11 | void forward_crnn_layer(layer l, network net); 12 | void backward_crnn_layer(layer l, network net); 13 | void update_crnn_layer(layer l, update_args a); 14 | 15 | #ifdef GPU 16 | void forward_crnn_layer_gpu(layer l, network net); 17 | void backward_crnn_layer_gpu(layer l, network net); 18 | void update_crnn_layer_gpu(layer l, update_args a); 19 | void push_crnn_layer(layer l); 20 | void pull_crnn_layer(layer l); 21 | #endif 22 | 23 | #endif 24 | 25 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/README.md: -------------------------------------------------------------------------------- 1 | # YOLOWrapper 2 | this project implement a C++ Wrapper for YOLOv3, which will be used for iris detection. The location information will be used for biometric identification in latter projects. 3 | 4 | ## How to test the detector on objects of common categories 5 | 6 | ### download weights trained on MS COCO 7 | download with the following commands 8 | ```Shell 9 | wget -P models https://pjreddie.com/media/files/yolov3.weights 10 | wget -P models https://pjreddie.com/media/files/yolov3-tiny.weights 11 | ``` 12 | 13 | ### build everything 14 | build with the following commands 15 | ```Shell 16 | make -C darknet 17 | make 18 | ``` 19 | 20 | ### test detector on street view images 21 | test YOLOv3 with the following commands 22 | ```Shell 23 | make run 24 | ``` 25 | 26 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/YOLOWrapper.h: -------------------------------------------------------------------------------- 1 | #ifndef YOLOWRAPPER_H 2 | #define YOLOWRAPPER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #define BLOCK 512 10 | #include 11 | #include 12 | #include 13 | #include 14 | extern "C" { 15 | #include "darknet.h" 16 | } 17 | 18 | //#define USE_TINY 19 | 20 | using namespace std; 21 | using namespace cv; 22 | 23 | class YOLOWrapper { 24 | static const string cfg_path; 25 | static const string weight_path; 26 | static const string coco_names[]; 27 | network *net; 28 | public: 29 | YOLOWrapper(); 30 | virtual ~YOLOWrapper(); 31 | map > predict(Mat img,float thresh = 0.5); 32 | }; 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/connected_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef CONNECTED_LAYER_H 2 | #define CONNECTED_LAYER_H 3 | 4 | #include "activations.h" 5 | #include "layer.h" 6 | #include "network.h" 7 | 8 | layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam); 9 | 10 | void forward_connected_layer(layer l, network net); 11 | void backward_connected_layer(layer l, network net); 12 | void update_connected_layer(layer l, update_args a); 13 | 14 | #ifdef GPU 15 | void forward_connected_layer_gpu(layer l, network net); 16 | void backward_connected_layer_gpu(layer l, network net); 17 | void update_connected_layer_gpu(layer l, update_args a); 18 | void push_connected_layer(layer l); 19 | void pull_connected_layer(layer l); 20 | #endif 21 | 22 | #endif 23 | 24 | -------------------------------------------------------------------------------- /visual/facial/Makefile: -------------------------------------------------------------------------------- 1 | CAFFE2_PREFIX=/home/xieyi/opt/caffe2 2 | CAFFE2_HELPER_PREFIX=/home/xieyi/opt/caffe2_helper 3 | CXXFLAGS=`pkg-config --cflags opencv dlib-1 eigen3` -I. -I${CAFFE2_PREFIX}/include \ 4 | -I${CAFFE2_HELPER_PREFIX}/include -std=c++14 -g2 5 | LIBS= -L${CAFFE2_HELPER_PREFIX}/lib -lcaffe2_cpp -lcaffe2_cpp_gpu \ 6 | -L${CAFFE2_PREFIX}/lib -lcaffe2_gpu -lcaffe2 \ 7 | `pkg-config --libs opencv dlib-1 eigen3` \ 8 | -lglog -lprotobuf -lcudart -lcurand \ 9 | -lboost_filesystem -lboost_system -lboost_thread -lboost_regex -lboost_program_options -lpthread -ldl 10 | OBJS=$(patsubst %.cpp,%.o,$(wildcard src/*.cpp)) 11 | 12 | all: train_facial_classifier 13 | 14 | train_facial_classifier: src/train_facial_classifier.o 15 | $(CXX) $^ -o ${@} $(LIBS) 16 | 17 | clean: 18 | $(RM) train_facial_classifier $(OBJS) 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/Makefile: -------------------------------------------------------------------------------- 1 | CUDA_PREFIX=/usr/local/cuda 2 | CXXFLAGS=-I. `pkg-config --cflags opencv` -I${CUDA_PREFIX}/include -DGPU -DCUDNN -O2 -msse3 -msse4 3 | LIBS=`pkg-config --libs opencv` -Ldarknet -ldarknet -lboost_program_options -lboost_filesystem -lboost_system -L${CUDA_PREFIX}/lib64 -lcudart -lcuda -lcublas -lcurand -lcudnn 4 | OBJS=$(patsubst %.cpp,%.o,$(wildcard *.cpp)) 5 | 6 | all: demo 7 | 8 | demo: YOLOWrapper.o main.o 9 | $(CXX) $^ $(LIBS) -o ${@} 10 | 11 | run: demo 12 | LD_LIBRARY_PATH=darknet:${LD_LIBRARY_PATH} ./demo -i test/1.jpg -o 1.png 13 | LD_LIBRARY_PATH=darknet:${LD_LIBRARY_PATH} ./demo -i test/2.jpg -o 2.png 14 | LD_LIBRARY_PATH=darknet:${LD_LIBRARY_PATH} ./demo -i test/3.jpg -o 3.png 15 | LD_LIBRARY_PATH=darknet:${LD_LIBRARY_PATH} ./demo -i darknet/data/dog.jpg -o dog.png 16 | 17 | clean: 18 | $(RM) $(OBJS) demo 19 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/detector.py: -------------------------------------------------------------------------------- 1 | # Stupid python path shit. 2 | # Instead just add darknet.py to somewhere in your python path 3 | # OK actually that might not be a great idea, idk, work in progress 4 | # Use at your own risk. or don't, i don't care 5 | 6 | import sys, os 7 | sys.path.append(os.path.join(os.getcwd(),'python/')) 8 | 9 | import darknet as dn 10 | import pdb 11 | 12 | dn.set_gpu(0) 13 | net = dn.load_net("cfg/yolo-thor.cfg", "/home/pjreddie/backup/yolo-thor_final.weights", 0) 14 | meta = dn.load_meta("cfg/thor.data") 15 | r = dn.detect(net, meta, "data/bedroom.jpg") 16 | print r 17 | 18 | # And then down here you could detect a lot more images like: 19 | r = dn.detect(net, meta, "data/eagle.jpg") 20 | print r 21 | r = dn.detect(net, meta, "data/giraffe.jpg") 22 | print r 23 | r = dn.detect(net, meta, "data/horses.jpg") 24 | print r 25 | r = dn.detect(net, meta, "data/person.jpg") 26 | print r 27 | 28 | -------------------------------------------------------------------------------- /extractedDNA/AllelesClassifier/classifier.h: -------------------------------------------------------------------------------- 1 | #ifndef CLASSIFIER_H 2 | #define CLASSIFIER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | using namespace std; 17 | using namespace boost; 18 | using namespace boost::serialization; 19 | using namespace boost::archive; 20 | namespace ublas = boost::numeric::ublas; 21 | 22 | class Classifier { 23 | map > conprobs; 24 | map priors; 25 | int classnum; 26 | int dimnum; 27 | public: 28 | Classifier(string file); 29 | virtual ~Classifier(); 30 | int predict(vector & v); 31 | }; 32 | 33 | #endif 34 | 35 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/scripts/get_coco_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Clone COCO API 4 | git clone https://github.com/pdollar/coco 5 | cd coco 6 | 7 | mkdir images 8 | cd images 9 | 10 | # Download Images 11 | wget -c https://pjreddie.com/media/files/train2014.zip 12 | wget -c https://pjreddie.com/media/files/val2014.zip 13 | 14 | # Unzip 15 | unzip -q train2014.zip 16 | unzip -q val2014.zip 17 | 18 | cd .. 19 | 20 | # Download COCO Metadata 21 | wget -c https://pjreddie.com/media/files/instances_train-val2014.zip 22 | wget -c https://pjreddie.com/media/files/coco/5k.part 23 | wget -c https://pjreddie.com/media/files/coco/trainvalno5k.part 24 | wget -c https://pjreddie.com/media/files/coco/labels.tgz 25 | tar xzf labels.tgz 26 | unzip -q instances_train-val2014.zip 27 | 28 | # Set Up Image Lists 29 | paste <(awk "{print \"$PWD\"}" <5k.part) 5k.part | tr -d '\t' > 5k.txt 30 | paste <(awk "{print \"$PWD\"}" trainvalno5k.txt 31 | 32 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/deconvolutional_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef DECONVOLUTIONAL_LAYER_H 2 | #define DECONVOLUTIONAL_LAYER_H 3 | 4 | #include "cuda.h" 5 | #include "image.h" 6 | #include "activations.h" 7 | #include "layer.h" 8 | #include "network.h" 9 | 10 | #ifdef GPU 11 | void forward_deconvolutional_layer_gpu(layer l, network net); 12 | void backward_deconvolutional_layer_gpu(layer l, network net); 13 | void update_deconvolutional_layer_gpu(layer l, update_args a); 14 | void push_deconvolutional_layer(layer l); 15 | void pull_deconvolutional_layer(layer l); 16 | #endif 17 | 18 | layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam); 19 | void resize_deconvolutional_layer(layer *l, int h, int w); 20 | void forward_deconvolutional_layer(const layer l, network net); 21 | void update_deconvolutional_layer(layer l, update_args a); 22 | void backward_deconvolutional_layer(layer l, network net); 23 | 24 | #endif 25 | 26 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/python/proverbot.py: -------------------------------------------------------------------------------- 1 | from darknet import * 2 | 3 | def predict_tactic(net, s): 4 | prob = 0 5 | d = c_array(c_float, [0.0]*256) 6 | tac = '' 7 | if not len(s): 8 | s = '\n' 9 | for c in s[:-1]: 10 | d[ord(c)] = 1 11 | pred = predict(net, d) 12 | d[ord(c)] = 0 13 | c = s[-1] 14 | while 1: 15 | d[ord(c)] = 1 16 | pred = predict(net, d) 17 | d[ord(c)] = 0 18 | pred = [pred[i] for i in range(256)] 19 | ind = sample(pred) 20 | c = chr(ind) 21 | prob += math.log(pred[ind]) 22 | if len(tac) and tac[-1] == '.': 23 | break 24 | tac = tac + c 25 | return (tac, prob) 26 | 27 | def predict_tactics(net, s, n): 28 | tacs = [] 29 | for i in range(n): 30 | reset_rnn(net) 31 | tacs.append(predict_tactic(net, s)) 32 | tacs = sorted(tacs, key=lambda x: -x[1]) 33 | return tacs 34 | 35 | net = load_net("cfg/coq.test.cfg", "/home/pjreddie/backup/coq.backup", 0) 36 | t = predict_tactics(net, "+++++\n", 10) 37 | print t 38 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/gemm.h: -------------------------------------------------------------------------------- 1 | #ifndef GEMM_H 2 | #define GEMM_H 3 | 4 | void gemm_bin(int M, int N, int K, float ALPHA, 5 | char *A, int lda, 6 | float *B, int ldb, 7 | float *C, int ldc); 8 | 9 | void gemm(int TA, int TB, int M, int N, int K, float ALPHA, 10 | float *A, int lda, 11 | float *B, int ldb, 12 | float BETA, 13 | float *C, int ldc); 14 | 15 | void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, 16 | float *A, int lda, 17 | float *B, int ldb, 18 | float BETA, 19 | float *C, int ldc); 20 | 21 | #ifdef GPU 22 | void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, 23 | float *A_gpu, int lda, 24 | float *B_gpu, int ldb, 25 | float BETA, 26 | float *C_gpu, int ldc); 27 | 28 | void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, 29 | float *A, int lda, 30 | float *B, int ldb, 31 | float BETA, 32 | float *C, int ldc); 33 | #endif 34 | #endif 35 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/local_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef LOCAL_LAYER_H 2 | #define LOCAL_LAYER_H 3 | 4 | #include "cuda.h" 5 | #include "image.h" 6 | #include "activations.h" 7 | #include "layer.h" 8 | #include "network.h" 9 | 10 | typedef layer local_layer; 11 | 12 | #ifdef GPU 13 | void forward_local_layer_gpu(local_layer layer, network net); 14 | void backward_local_layer_gpu(local_layer layer, network net); 15 | void update_local_layer_gpu(local_layer layer, update_args a); 16 | 17 | void push_local_layer(local_layer layer); 18 | void pull_local_layer(local_layer layer); 19 | #endif 20 | 21 | local_layer make_local_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation); 22 | 23 | void forward_local_layer(const local_layer layer, network net); 24 | void backward_local_layer(local_layer layer, network net); 25 | void update_local_layer(local_layer layer, update_args a); 26 | 27 | void bias_output(float *output, float *biases, int batch, int n, int size); 28 | void backward_bias(float *bias_updates, float *delta, int batch, int n, int size); 29 | 30 | #endif 31 | 32 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/LICENSE.mit: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Joseph Redmon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /extractedDNA/AllelesClassifier/classifier.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "classifier.h" 3 | 4 | Classifier::Classifier(string file) 5 | { 6 | std::ifstream in(file); 7 | if(false == in.is_open()) throw runtime_error("invalid model file"); 8 | text_iarchive ia(in); 9 | ia >> conprobs >> priors; 10 | classnum = conprobs.size(); 11 | #ifndef NDEBUG 12 | assert(4 == conprobs.begin()->second.size1()); 13 | assert(classnum == priors.size()); 14 | #endif 15 | dimnum = conprobs.begin()->second.size2(); 16 | } 17 | 18 | Classifier::~Classifier() 19 | { 20 | } 21 | 22 | int Classifier::predict(vector & v) 23 | { 24 | #ifndef NDEBUG 25 | assert(dimnum == v.size()); 26 | #endif 27 | vector scores(classnum,1); 28 | for(int i = 0 ; i < classnum ; i++) { 29 | scores[i] *= priors[i]; 30 | for(int j = 0 ; j < dimnum ; j++) 31 | switch(v[j]) { 32 | case 'A':case 'a': scores[i] *= conprobs[i](0,j); 33 | case 'T':case 't': scores[i] *= conprobs[i](1,j); 34 | case 'C':case 'c': scores[i] *= conprobs[i](2,j); 35 | case 'G':case 'g': scores[i] *= conprobs[i](3,j); 36 | default: break; 37 | } 38 | } 39 | vector::iterator max_iter = max_element(scores.begin(),scores.end()); 40 | return max_iter - scores.begin(); 41 | } 42 | 43 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/dropout_layer_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "dropout_layer.h" 7 | #include "cuda.h" 8 | #include "utils.h" 9 | } 10 | 11 | __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) 12 | { 13 | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; 14 | if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale; 15 | } 16 | 17 | void forward_dropout_layer_gpu(dropout_layer layer, network net) 18 | { 19 | if (!net.train) return; 20 | int size = layer.inputs*layer.batch; 21 | cuda_random(layer.rand_gpu, size); 22 | /* 23 | int i; 24 | for(i = 0; i < size; ++i){ 25 | layer.rand[i] = rand_uniform(); 26 | } 27 | cuda_push_array(layer.rand_gpu, layer.rand, size); 28 | */ 29 | 30 | yoloswag420blazeit360noscope<<>>(net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale); 31 | check_error(cudaPeekAtLastError()); 32 | } 33 | 34 | void backward_dropout_layer_gpu(dropout_layer layer, network net) 35 | { 36 | if(!net.delta_gpu) return; 37 | int size = layer.inputs*layer.batch; 38 | 39 | yoloswag420blazeit360noscope<<>>(net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale); 40 | check_error(cudaPeekAtLastError()); 41 | } 42 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/col2im.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | void col2im_add_pixel(float *im, int height, int width, int channels, 4 | int row, int col, int channel, int pad, float val) 5 | { 6 | row -= pad; 7 | col -= pad; 8 | 9 | if (row < 0 || col < 0 || 10 | row >= height || col >= width) return; 11 | im[col + width*(row + height*channel)] += val; 12 | } 13 | //This one might be too, can't remember. 14 | void col2im_cpu(float* data_col, 15 | int channels, int height, int width, 16 | int ksize, int stride, int pad, float* data_im) 17 | { 18 | int c,h,w; 19 | int height_col = (height + 2*pad - ksize) / stride + 1; 20 | int width_col = (width + 2*pad - ksize) / stride + 1; 21 | 22 | int channels_col = channels * ksize * ksize; 23 | for (c = 0; c < channels_col; ++c) { 24 | int w_offset = c % ksize; 25 | int h_offset = (c / ksize) % ksize; 26 | int c_im = c / ksize / ksize; 27 | for (h = 0; h < height_col; ++h) { 28 | for (w = 0; w < width_col; ++w) { 29 | int im_row = h_offset + h * stride; 30 | int im_col = w_offset + w * stride; 31 | int col_index = (c * height_col + h) * width_col + w; 32 | double val = data_col[col_index]; 33 | col2im_add_pixel(data_im, height, width, channels, 34 | im_row, im_col, c_im, pad, val); 35 | } 36 | } 37 | } 38 | } 39 | 40 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/im2col.c: -------------------------------------------------------------------------------- 1 | #include "im2col.h" 2 | #include 3 | float im2col_get_pixel(float *im, int height, int width, int channels, 4 | int row, int col, int channel, int pad) 5 | { 6 | row -= pad; 7 | col -= pad; 8 | 9 | if (row < 0 || col < 0 || 10 | row >= height || col >= width) return 0; 11 | return im[col + width*(row + height*channel)]; 12 | } 13 | 14 | //From Berkeley Vision's Caffe! 15 | //https://github.com/BVLC/caffe/blob/master/LICENSE 16 | void im2col_cpu(float* data_im, 17 | int channels, int height, int width, 18 | int ksize, int stride, int pad, float* data_col) 19 | { 20 | int c,h,w; 21 | int height_col = (height + 2*pad - ksize) / stride + 1; 22 | int width_col = (width + 2*pad - ksize) / stride + 1; 23 | 24 | int channels_col = channels * ksize * ksize; 25 | for (c = 0; c < channels_col; ++c) { 26 | int w_offset = c % ksize; 27 | int h_offset = (c / ksize) % ksize; 28 | int c_im = c / ksize / ksize; 29 | for (h = 0; h < height_col; ++h) { 30 | for (w = 0; w < width_col; ++w) { 31 | int im_row = h_offset + h * stride; 32 | int im_col = w_offset + w * stride; 33 | int col_index = (c * height_col + h) * width_col + w; 34 | data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, 35 | im_row, im_col, c_im, pad); 36 | } 37 | } 38 | } 39 | } 40 | 41 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "YOLOWrapper.h" 8 | 9 | #define CROPPED 10 | 11 | using namespace std; 12 | using namespace boost::program_options; 13 | using namespace cv; 14 | 15 | int main(int argc,char ** argv) 16 | { 17 | string img_path; 18 | string output; 19 | options_description desc; 20 | desc.add_options() 21 | ("help,h","print current message") 22 | ("input,i",value(&img_path),"input image") 23 | ("output,o",value(&output)->default_value("output.png"),"output image"); 24 | variables_map vm; 25 | store(parse_command_line(argc,argv,desc),vm); 26 | notify(vm); 27 | 28 | if(1 == vm.count("help")) { 29 | cout< > objects = yolo.predict(img); 46 | for(auto & object : objects["person"]) { 47 | rectangle(img,object,Scalar(255,0,0),img.cols / 200); 48 | } 49 | cout<<"detected "< 4 | 5 | void demo_art(char *cfgfile, char *weightfile, int cam_index) 6 | { 7 | #ifdef OPENCV 8 | network *net = load_network(cfgfile, weightfile, 0); 9 | set_batch_network(net, 1); 10 | 11 | srand(2222222); 12 | CvCapture * cap; 13 | 14 | cap = cvCaptureFromCAM(cam_index); 15 | 16 | char *window = "ArtJudgementBot9000!!!"; 17 | if(!cap) error("Couldn't connect to webcam.\n"); 18 | cvNamedWindow(window, CV_WINDOW_NORMAL); 19 | cvResizeWindow(window, 512, 512); 20 | int i; 21 | int idx[] = {37, 401, 434}; 22 | int n = sizeof(idx)/sizeof(idx[0]); 23 | 24 | while(1){ 25 | image in = get_image_from_stream(cap); 26 | image in_s = resize_image(in, net->w, net->h); 27 | show_image(in, window); 28 | 29 | float *p = network_predict(net, in_s.data); 30 | 31 | printf("\033[2J"); 32 | printf("\033[1;1H"); 33 | 34 | float score = 0; 35 | for(i = 0; i < n; ++i){ 36 | float s = p[idx[i]]; 37 | if (s > score) score = s; 38 | } 39 | score = score; 40 | printf("I APPRECIATE THIS ARTWORK: %10.7f%%\n", score*100); 41 | printf("["); 42 | int upper = 30; 43 | for(i = 0; i < upper; ++i){ 44 | printf("%c", ((i+.5) < score*upper) ? 219 : ' '); 45 | } 46 | printf("]\n"); 47 | 48 | free_image(in_s); 49 | free_image(in); 50 | 51 | cvWaitKey(1); 52 | } 53 | #endif 54 | } 55 | 56 | 57 | void run_art(int argc, char **argv) 58 | { 59 | int cam_index = find_int_arg(argc, argv, "-c", 0); 60 | char *cfg = argv[2]; 61 | char *weights = argv[3]; 62 | demo_art(cfg, weights, cam_index); 63 | } 64 | 65 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/detector-scipy-opencv.py: -------------------------------------------------------------------------------- 1 | # Stupid python path shit. 2 | # Instead just add darknet.py to somewhere in your python path 3 | # OK actually that might not be a great idea, idk, work in progress 4 | # Use at your own risk. or don't, i don't care 5 | 6 | from scipy.misc import imread 7 | import cv2 8 | 9 | def array_to_image(arr): 10 | arr = arr.transpose(2,0,1) 11 | c = arr.shape[0] 12 | h = arr.shape[1] 13 | w = arr.shape[2] 14 | arr = (arr/255.0).flatten() 15 | data = dn.c_array(dn.c_float, arr) 16 | im = dn.IMAGE(w,h,c,data) 17 | return im 18 | 19 | def detect2(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): 20 | boxes = dn.make_boxes(net) 21 | probs = dn.make_probs(net) 22 | num = dn.num_boxes(net) 23 | dn.network_detect(net, image, thresh, hier_thresh, nms, boxes, probs) 24 | res = [] 25 | for j in range(num): 26 | for i in range(meta.classes): 27 | if probs[j][i] > 0: 28 | res.append((meta.names[i], probs[j][i], (boxes[j].x, boxes[j].y, boxes[j].w, boxes[j].h))) 29 | res = sorted(res, key=lambda x: -x[1]) 30 | dn.free_ptrs(dn.cast(probs, dn.POINTER(dn.c_void_p)), num) 31 | return res 32 | 33 | import sys, os 34 | sys.path.append(os.path.join(os.getcwd(),'python/')) 35 | 36 | import darknet as dn 37 | 38 | # Darknet 39 | net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0) 40 | meta = dn.load_meta("cfg/coco.data") 41 | r = dn.detect(net, meta, "data/dog.jpg") 42 | print r 43 | 44 | # scipy 45 | arr= imread('data/dog.jpg') 46 | im = array_to_image(arr) 47 | r = detect2(net, meta, im) 48 | print r 49 | 50 | # OpenCV 51 | arr = cv2.imread('data/dog.jpg') 52 | im = array_to_image(arr) 53 | dn.rgbgr_image(im) 54 | r = detect2(net, meta, im) 55 | print r 56 | 57 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/list.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "list.h" 4 | 5 | list *make_list() 6 | { 7 | list *l = malloc(sizeof(list)); 8 | l->size = 0; 9 | l->front = 0; 10 | l->back = 0; 11 | return l; 12 | } 13 | 14 | /* 15 | void transfer_node(list *s, list *d, node *n) 16 | { 17 | node *prev, *next; 18 | prev = n->prev; 19 | next = n->next; 20 | if(prev) prev->next = next; 21 | if(next) next->prev = prev; 22 | --s->size; 23 | if(s->front == n) s->front = next; 24 | if(s->back == n) s->back = prev; 25 | } 26 | */ 27 | 28 | void *list_pop(list *l){ 29 | if(!l->back) return 0; 30 | node *b = l->back; 31 | void *val = b->val; 32 | l->back = b->prev; 33 | if(l->back) l->back->next = 0; 34 | free(b); 35 | --l->size; 36 | 37 | return val; 38 | } 39 | 40 | void list_insert(list *l, void *val) 41 | { 42 | node *new = malloc(sizeof(node)); 43 | new->val = val; 44 | new->next = 0; 45 | 46 | if(!l->back){ 47 | l->front = new; 48 | new->prev = 0; 49 | }else{ 50 | l->back->next = new; 51 | new->prev = l->back; 52 | } 53 | l->back = new; 54 | ++l->size; 55 | } 56 | 57 | void free_node(node *n) 58 | { 59 | node *next; 60 | while(n) { 61 | next = n->next; 62 | free(n); 63 | n = next; 64 | } 65 | } 66 | 67 | void free_list(list *l) 68 | { 69 | free_node(l->front); 70 | free(l); 71 | } 72 | 73 | void free_list_contents(list *l) 74 | { 75 | node *n = l->front; 76 | while(n){ 77 | free(n->val); 78 | n = n->next; 79 | } 80 | } 81 | 82 | void **list_to_array(list *l) 83 | { 84 | void **a = calloc(l->size, sizeof(void*)); 85 | int count = 0; 86 | node *n = l->front; 87 | while(n){ 88 | a[count++] = n->val; 89 | n = n->next; 90 | } 91 | return a; 92 | } 93 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/dropout_layer.c: -------------------------------------------------------------------------------- 1 | #include "dropout_layer.h" 2 | #include "utils.h" 3 | #include "cuda.h" 4 | #include 5 | #include 6 | 7 | dropout_layer make_dropout_layer(int batch, int inputs, float probability) 8 | { 9 | dropout_layer l = {0}; 10 | l.type = DROPOUT; 11 | l.probability = probability; 12 | l.inputs = inputs; 13 | l.outputs = inputs; 14 | l.batch = batch; 15 | l.rand = calloc(inputs*batch, sizeof(float)); 16 | l.scale = 1./(1.-probability); 17 | l.forward = forward_dropout_layer; 18 | l.backward = backward_dropout_layer; 19 | #ifdef GPU 20 | l.forward_gpu = forward_dropout_layer_gpu; 21 | l.backward_gpu = backward_dropout_layer_gpu; 22 | l.rand_gpu = cuda_make_array(l.rand, inputs*batch); 23 | #endif 24 | fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs); 25 | return l; 26 | } 27 | 28 | void resize_dropout_layer(dropout_layer *l, int inputs) 29 | { 30 | l->rand = realloc(l->rand, l->inputs*l->batch*sizeof(float)); 31 | #ifdef GPU 32 | cuda_free(l->rand_gpu); 33 | 34 | l->rand_gpu = cuda_make_array(l->rand, inputs*l->batch); 35 | #endif 36 | } 37 | 38 | void forward_dropout_layer(dropout_layer l, network net) 39 | { 40 | int i; 41 | if (!net.train) return; 42 | for(i = 0; i < l.batch * l.inputs; ++i){ 43 | float r = rand_uniform(0, 1); 44 | l.rand[i] = r; 45 | if(r < l.probability) net.input[i] = 0; 46 | else net.input[i] *= l.scale; 47 | } 48 | } 49 | 50 | void backward_dropout_layer(dropout_layer l, network net) 51 | { 52 | int i; 53 | if(!net.delta) return; 54 | for(i = 0; i < l.batch * l.inputs; ++i){ 55 | float r = l.rand[i]; 56 | if(r < l.probability) net.delta[i] = 0; 57 | else net.delta[i] *= l.scale; 58 | } 59 | } 60 | 61 | -------------------------------------------------------------------------------- /rawDNA/LSTM/models/lstm_train_save.pbtxt: -------------------------------------------------------------------------------- 1 | op { 2 | input: "LSTM1/seq_lengths" 3 | input: "LSTM1/i2h_w" 4 | input: "LSTM1/i2h_b" 5 | input: "LSTM1/gates_t_w" 6 | input: "LSTM1/gates_t_b" 7 | input: "LSTM1/hidden_init" 8 | input: "LSTM1/cell_init" 9 | input: "LSTM2/seq_lengths" 10 | input: "LSTM2/i2h_w" 11 | input: "LSTM2/i2h_b" 12 | input: "LSTM2/gates_t_w" 13 | input: "LSTM2/gates_t_b" 14 | input: "LSTM2/hidden_init" 15 | input: "LSTM2/cell_init" 16 | input: "fc1_w" 17 | input: "fc1_b" 18 | input: "fc2_w" 19 | input: "fc2_b" 20 | input: "iter" 21 | input: "LSTM1/i2h_w_moment_1" 22 | input: "LSTM1/i2h_w_moment_2" 23 | input: "LSTM1/i2h_b_moment_1" 24 | input: "LSTM1/i2h_b_moment_2" 25 | input: "LSTM1/hidden_init_moment_1" 26 | input: "LSTM1/hidden_init_moment_2" 27 | input: "LSTM1/cell_init_moment_1" 28 | input: "LSTM1/cell_init_moment_2" 29 | input: "LSTM1/gates_t_w_moment_1" 30 | input: "LSTM1/gates_t_w_moment_2" 31 | input: "LSTM1/gates_t_b_moment_1" 32 | input: "LSTM1/gates_t_b_moment_2" 33 | input: "LSTM2/i2h_w_moment_1" 34 | input: "LSTM2/i2h_w_moment_2" 35 | input: "LSTM2/i2h_b_moment_1" 36 | input: "LSTM2/i2h_b_moment_2" 37 | input: "LSTM2/hidden_init_moment_1" 38 | input: "LSTM2/hidden_init_moment_2" 39 | input: "LSTM2/cell_init_moment_1" 40 | input: "LSTM2/cell_init_moment_2" 41 | input: "LSTM2/gates_t_w_moment_1" 42 | input: "LSTM2/gates_t_w_moment_2" 43 | input: "LSTM2/gates_t_b_moment_1" 44 | input: "LSTM2/gates_t_b_moment_2" 45 | input: "fc1_w_moment_1" 46 | input: "fc1_w_moment_2" 47 | input: "fc1_b_moment_1" 48 | input: "fc1_b_moment_2" 49 | input: "fc2_w_moment_1" 50 | input: "fc2_w_moment_2" 51 | input: "fc2_b_moment_1" 52 | input: "fc2_b_moment_2" 53 | type: "Save" 54 | arg { 55 | name: "db_type" 56 | s: "lmdb" 57 | } 58 | arg { 59 | name: "db" 60 | s: "LSTM_params" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/utils.h: -------------------------------------------------------------------------------- 1 | #ifndef UTILS_H 2 | #define UTILS_H 3 | #include 4 | #include 5 | #include "darknet.h" 6 | #include "list.h" 7 | 8 | #define TIME(a) \ 9 | do { \ 10 | double start = what_time_is_it_now(); \ 11 | a; \ 12 | printf("%s took: %f seconds\n", #a, what_time_is_it_now() - start); \ 13 | } while (0) 14 | 15 | #define TWO_PI 6.2831853071795864769252866f 16 | 17 | double what_time_is_it_now(); 18 | void shuffle(void *arr, size_t n, size_t size); 19 | void sorta_shuffle(void *arr, size_t n, size_t size, size_t sections); 20 | void free_ptrs(void **ptrs, int n); 21 | int alphanum_to_int(char c); 22 | char int_to_alphanum(int i); 23 | int read_int(int fd); 24 | void write_int(int fd, int n); 25 | void read_all(int fd, char *buffer, size_t bytes); 26 | void write_all(int fd, char *buffer, size_t bytes); 27 | int read_all_fail(int fd, char *buffer, size_t bytes); 28 | int write_all_fail(int fd, char *buffer, size_t bytes); 29 | void find_replace(char *str, char *orig, char *rep, char *output); 30 | void malloc_error(); 31 | void file_error(char *s); 32 | void strip(char *s); 33 | void strip_char(char *s, char bad); 34 | list *split_str(char *s, char delim); 35 | char *fgetl(FILE *fp); 36 | list *parse_csv_line(char *line); 37 | char *copy_string(char *s); 38 | int count_fields(char *line); 39 | float *parse_fields(char *line, int n); 40 | void translate_array(float *a, int n, float s); 41 | float constrain(float min, float max, float a); 42 | int constrain_int(int a, int min, int max); 43 | float rand_scale(float s); 44 | int rand_int(int min, int max); 45 | void mean_arrays(float **a, int n, int els, float *avg); 46 | float dist_array(float *a, float *b, int n, int sub); 47 | float **one_hot_encode(float *a, int n, int k); 48 | float sec(clock_t clocks); 49 | void print_statistics(float *a, int n); 50 | int int_index(int *a, int val, int n); 51 | 52 | #endif 53 | 54 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/avgpool_layer_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "avgpool_layer.h" 7 | #include "cuda.h" 8 | } 9 | 10 | __global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output) 11 | { 12 | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; 13 | if(id >= n) return; 14 | 15 | int k = id % c; 16 | id /= c; 17 | int b = id; 18 | 19 | int i; 20 | int out_index = (k + c*b); 21 | output[out_index] = 0; 22 | for(i = 0; i < w*h; ++i){ 23 | int in_index = i + h*w*(k + b*c); 24 | output[out_index] += input[in_index]; 25 | } 26 | output[out_index] /= w*h; 27 | } 28 | 29 | __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta) 30 | { 31 | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; 32 | if(id >= n) return; 33 | 34 | int k = id % c; 35 | id /= c; 36 | int b = id; 37 | 38 | int i; 39 | int out_index = (k + c*b); 40 | for(i = 0; i < w*h; ++i){ 41 | int in_index = i + h*w*(k + b*c); 42 | in_delta[in_index] += out_delta[out_index] / (w*h); 43 | } 44 | } 45 | 46 | extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net) 47 | { 48 | size_t n = layer.c*layer.batch; 49 | 50 | forward_avgpool_layer_kernel<<>>(n, layer.w, layer.h, layer.c, net.input_gpu, layer.output_gpu); 51 | check_error(cudaPeekAtLastError()); 52 | } 53 | 54 | extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, network net) 55 | { 56 | size_t n = layer.c*layer.batch; 57 | 58 | backward_avgpool_layer_kernel<<>>(n, layer.w, layer.h, layer.c, net.delta_gpu, layer.delta_gpu); 59 | check_error(cudaPeekAtLastError()); 60 | } 61 | 62 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/activation_layer.c: -------------------------------------------------------------------------------- 1 | #include "activation_layer.h" 2 | #include "utils.h" 3 | #include "cuda.h" 4 | #include "blas.h" 5 | #include "gemm.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | layer make_activation_layer(int batch, int inputs, ACTIVATION activation) 13 | { 14 | layer l = {0}; 15 | l.type = ACTIVE; 16 | 17 | l.inputs = inputs; 18 | l.outputs = inputs; 19 | l.batch=batch; 20 | 21 | l.output = calloc(batch*inputs, sizeof(float*)); 22 | l.delta = calloc(batch*inputs, sizeof(float*)); 23 | 24 | l.forward = forward_activation_layer; 25 | l.backward = backward_activation_layer; 26 | #ifdef GPU 27 | l.forward_gpu = forward_activation_layer_gpu; 28 | l.backward_gpu = backward_activation_layer_gpu; 29 | 30 | l.output_gpu = cuda_make_array(l.output, inputs*batch); 31 | l.delta_gpu = cuda_make_array(l.delta, inputs*batch); 32 | #endif 33 | l.activation = activation; 34 | fprintf(stderr, "Activation Layer: %d inputs\n", inputs); 35 | return l; 36 | } 37 | 38 | void forward_activation_layer(layer l, network net) 39 | { 40 | copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); 41 | activate_array(l.output, l.outputs*l.batch, l.activation); 42 | } 43 | 44 | void backward_activation_layer(layer l, network net) 45 | { 46 | gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); 47 | copy_cpu(l.outputs*l.batch, l.delta, 1, net.delta, 1); 48 | } 49 | 50 | #ifdef GPU 51 | 52 | void forward_activation_layer_gpu(layer l, network net) 53 | { 54 | copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1); 55 | activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); 56 | } 57 | 58 | void backward_activation_layer_gpu(layer l, network net) 59 | { 60 | gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); 61 | copy_gpu(l.outputs*l.batch, l.delta_gpu, 1, net.delta_gpu, 1); 62 | } 63 | #endif 64 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/l2norm_layer.c: -------------------------------------------------------------------------------- 1 | #include "l2norm_layer.h" 2 | #include "activations.h" 3 | #include "blas.h" 4 | #include "cuda.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | layer make_l2norm_layer(int batch, int inputs) 13 | { 14 | fprintf(stderr, "l2norm %4d\n", inputs); 15 | layer l = {0}; 16 | l.type = L2NORM; 17 | l.batch = batch; 18 | l.inputs = inputs; 19 | l.outputs = inputs; 20 | l.output = calloc(inputs*batch, sizeof(float)); 21 | l.scales = calloc(inputs*batch, sizeof(float)); 22 | l.delta = calloc(inputs*batch, sizeof(float)); 23 | 24 | l.forward = forward_l2norm_layer; 25 | l.backward = backward_l2norm_layer; 26 | #ifdef GPU 27 | l.forward_gpu = forward_l2norm_layer_gpu; 28 | l.backward_gpu = backward_l2norm_layer_gpu; 29 | 30 | l.output_gpu = cuda_make_array(l.output, inputs*batch); 31 | l.scales_gpu = cuda_make_array(l.output, inputs*batch); 32 | l.delta_gpu = cuda_make_array(l.delta, inputs*batch); 33 | #endif 34 | return l; 35 | } 36 | 37 | void forward_l2norm_layer(const layer l, network net) 38 | { 39 | copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); 40 | l2normalize_cpu(l.output, l.scales, l.batch, l.out_c, l.out_w*l.out_h); 41 | } 42 | 43 | void backward_l2norm_layer(const layer l, network net) 44 | { 45 | //axpy_cpu(l.inputs*l.batch, 1, l.scales, 1, l.delta, 1); 46 | axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1); 47 | } 48 | 49 | #ifdef GPU 50 | 51 | void forward_l2norm_layer_gpu(const layer l, network net) 52 | { 53 | copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1); 54 | l2normalize_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_w*l.out_h); 55 | } 56 | 57 | void backward_l2norm_layer_gpu(const layer l, network net) 58 | { 59 | axpy_gpu(l.batch*l.inputs, 1, l.scales_gpu, 1, l.delta_gpu, 1); 60 | axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1); 61 | } 62 | 63 | #endif 64 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/avgpool_layer.c: -------------------------------------------------------------------------------- 1 | #include "avgpool_layer.h" 2 | #include "cuda.h" 3 | #include 4 | 5 | avgpool_layer make_avgpool_layer(int batch, int w, int h, int c) 6 | { 7 | fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c); 8 | avgpool_layer l = {0}; 9 | l.type = AVGPOOL; 10 | l.batch = batch; 11 | l.h = h; 12 | l.w = w; 13 | l.c = c; 14 | l.out_w = 1; 15 | l.out_h = 1; 16 | l.out_c = c; 17 | l.outputs = l.out_c; 18 | l.inputs = h*w*c; 19 | int output_size = l.outputs * batch; 20 | l.output = calloc(output_size, sizeof(float)); 21 | l.delta = calloc(output_size, sizeof(float)); 22 | l.forward = forward_avgpool_layer; 23 | l.backward = backward_avgpool_layer; 24 | #ifdef GPU 25 | l.forward_gpu = forward_avgpool_layer_gpu; 26 | l.backward_gpu = backward_avgpool_layer_gpu; 27 | l.output_gpu = cuda_make_array(l.output, output_size); 28 | l.delta_gpu = cuda_make_array(l.delta, output_size); 29 | #endif 30 | return l; 31 | } 32 | 33 | void resize_avgpool_layer(avgpool_layer *l, int w, int h) 34 | { 35 | l->w = w; 36 | l->h = h; 37 | l->inputs = h*w*l->c; 38 | } 39 | 40 | void forward_avgpool_layer(const avgpool_layer l, network net) 41 | { 42 | int b,i,k; 43 | 44 | for(b = 0; b < l.batch; ++b){ 45 | for(k = 0; k < l.c; ++k){ 46 | int out_index = k + b*l.c; 47 | l.output[out_index] = 0; 48 | for(i = 0; i < l.h*l.w; ++i){ 49 | int in_index = i + l.h*l.w*(k + b*l.c); 50 | l.output[out_index] += net.input[in_index]; 51 | } 52 | l.output[out_index] /= l.h*l.w; 53 | } 54 | } 55 | } 56 | 57 | void backward_avgpool_layer(const avgpool_layer l, network net) 58 | { 59 | int b,i,k; 60 | 61 | for(b = 0; b < l.batch; ++b){ 62 | for(k = 0; k < l.c; ++k){ 63 | int out_index = k + b*l.c; 64 | for(i = 0; i < l.h*l.w; ++i){ 65 | int in_index = i + l.h*l.w*(k + b*l.c); 66 | net.delta[in_index] += l.delta[out_index] / (l.h*l.w); 67 | } 68 | } 69 | } 70 | } 71 | 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI-components 2 | ### Introduction 3 | AI Components of GeneSourceCode project is a subproject taking charge of machine learning related tasks. AI Components aim to make full use of gene and other medical data with the facilities of modern AI technologies. AI Components currently focus on 4 | 5 | 1.Prediction of diseases and traits directly from raw DNA sequence. 6 | We will test both traditional classification/regression algorithm and popular deep neural network ways such as LSTM, p-LSTM, IndRNN, attention model and so on to process raw DNA sequential data. 7 | 8 | 2.Prediction of diseases and traits from hand-designed feature. 9 | The hand-designed feature extracted from raw DNA, RNA or histone sometime may be discriminative enough to make prediction task viable. We will try to extract and learn on features this way. 10 | 11 | 3.Medical application based on visual clues. 12 | Computer Vision has become a reliable way of prediction after deep learning prevails. Medical scientists have adopted this method to various applications such as predicting or detecting certain diseases, image processing on X ray pictures, and so on. We will implement all these applications in this subproject and make them optional service modules. 13 | 14 | 4.Mining fitness status on physical examination and motion data, 15 | We will also mining data provided by users to detect potential fitness problem or reveal healthy status. 16 | 17 | ### Components 18 | 1.Prediction of diseases and traits directly from raw DNA sequence. 19 | 20 | (1)rawDNA/LSTM: classification base on DNA subsequence: 21 | 22 | You can train a classifier with train_LSTM. The dataset generation tools will be released soon. 23 | 24 | 2.Prediction of disease and trais from hand-designed feature. 25 | 26 | (1)extractedDNA/AllelesClassifier: classification base on Alleles 27 | 28 | You can train a classifier on polymorphic alleles. 29 | 30 | 3.Medical appliation based on visual clues. 31 | 32 | (1)visual/facial: classification based on facial images: 33 | 34 | (2)visual/iris: biometric identification and illness detection according to visual information from iris. 35 | 36 | You can train a classifier with train_facial_classifier. The dataset generation tools will be released soon. 37 | 38 | 4.Mining fitness status on physical examination and motion data. 39 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/data.h: -------------------------------------------------------------------------------- 1 | #ifndef DATA_H 2 | #define DATA_H 3 | #include 4 | 5 | #include "darknet.h" 6 | #include "matrix.h" 7 | #include "list.h" 8 | #include "image.h" 9 | #include "tree.h" 10 | 11 | static inline float distance_from_edge(int x, int max) 12 | { 13 | int dx = (max/2) - x; 14 | if (dx < 0) dx = -dx; 15 | dx = (max/2) + 1 - dx; 16 | dx *= 2; 17 | float dist = (float)dx/max; 18 | if (dist > 1) dist = 1; 19 | return dist; 20 | } 21 | void load_data_blocking(load_args args); 22 | 23 | 24 | void print_letters(float *pred, int n); 25 | data load_data_captcha(char **paths, int n, int m, int k, int w, int h); 26 | data load_data_captcha_encode(char **paths, int n, int m, int w, int h); 27 | data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure); 28 | data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure); 29 | matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center); 30 | data load_data_super(char **paths, int n, int m, int w, int h, int scale); 31 | data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center); 32 | data load_data_regression(char **paths, int n, int m, int classes, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure); 33 | data load_go(char *filename); 34 | 35 | 36 | data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h); 37 | 38 | void get_random_batch(data d, int n, float *X, float *y); 39 | data get_data_part(data d, int part, int total); 40 | data get_random_data(data d, int num); 41 | data load_categorical_data_csv(char *filename, int target, int k); 42 | void normalize_data_rows(data d); 43 | void scale_data_rows(data d, float s); 44 | void translate_data_rows(data d, float s); 45 | void randomize_data(data d); 46 | data *split_data(data d, int part, int total); 47 | data concat_datas(data *d, int n); 48 | void fill_truth(char *path, char **labels, int k, float *truth); 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/logistic_layer.c: -------------------------------------------------------------------------------- 1 | #include "logistic_layer.h" 2 | #include "activations.h" 3 | #include "blas.h" 4 | #include "cuda.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | layer make_logistic_layer(int batch, int inputs) 13 | { 14 | fprintf(stderr, "logistic x entropy %4d\n", inputs); 15 | layer l = {0}; 16 | l.type = LOGXENT; 17 | l.batch = batch; 18 | l.inputs = inputs; 19 | l.outputs = inputs; 20 | l.loss = calloc(inputs*batch, sizeof(float)); 21 | l.output = calloc(inputs*batch, sizeof(float)); 22 | l.delta = calloc(inputs*batch, sizeof(float)); 23 | l.cost = calloc(1, sizeof(float)); 24 | 25 | l.forward = forward_logistic_layer; 26 | l.backward = backward_logistic_layer; 27 | #ifdef GPU 28 | l.forward_gpu = forward_logistic_layer_gpu; 29 | l.backward_gpu = backward_logistic_layer_gpu; 30 | 31 | l.output_gpu = cuda_make_array(l.output, inputs*batch); 32 | l.loss_gpu = cuda_make_array(l.loss, inputs*batch); 33 | l.delta_gpu = cuda_make_array(l.delta, inputs*batch); 34 | #endif 35 | return l; 36 | } 37 | 38 | void forward_logistic_layer(const layer l, network net) 39 | { 40 | copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); 41 | activate_array(l.output, l.outputs*l.batch, LOGISTIC); 42 | if(net.truth){ 43 | logistic_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss); 44 | l.cost[0] = sum_array(l.loss, l.batch*l.inputs); 45 | } 46 | } 47 | 48 | void backward_logistic_layer(const layer l, network net) 49 | { 50 | axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1); 51 | } 52 | 53 | #ifdef GPU 54 | 55 | void forward_logistic_layer_gpu(const layer l, network net) 56 | { 57 | copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1); 58 | activate_array_gpu(l.output_gpu, l.outputs*l.batch, LOGISTIC); 59 | if(net.truth){ 60 | logistic_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth_gpu, l.delta_gpu, l.loss_gpu); 61 | cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs); 62 | l.cost[0] = sum_array(l.loss, l.batch*l.inputs); 63 | } 64 | } 65 | 66 | void backward_logistic_layer_gpu(const layer l, network net) 67 | { 68 | axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1); 69 | } 70 | 71 | #endif 72 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/convolutional_layer.h: -------------------------------------------------------------------------------- 1 | #ifndef CONVOLUTIONAL_LAYER_H 2 | #define CONVOLUTIONAL_LAYER_H 3 | 4 | #include "cuda.h" 5 | #include "image.h" 6 | #include "activations.h" 7 | #include "layer.h" 8 | #include "network.h" 9 | 10 | typedef layer convolutional_layer; 11 | 12 | #ifdef GPU 13 | void forward_convolutional_layer_gpu(convolutional_layer layer, network net); 14 | void backward_convolutional_layer_gpu(convolutional_layer layer, network net); 15 | void update_convolutional_layer_gpu(convolutional_layer layer, update_args a); 16 | 17 | void push_convolutional_layer(convolutional_layer layer); 18 | void pull_convolutional_layer(convolutional_layer layer); 19 | 20 | void add_bias_gpu(float *output, float *biases, int batch, int n, int size); 21 | void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size); 22 | void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t); 23 | #ifdef CUDNN 24 | void cudnn_convolutional_setup(layer *l); 25 | #endif 26 | #endif 27 | 28 | convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam); 29 | void resize_convolutional_layer(convolutional_layer *layer, int w, int h); 30 | void forward_convolutional_layer(const convolutional_layer layer, network net); 31 | void update_convolutional_layer(convolutional_layer layer, update_args a); 32 | image *visualize_convolutional_layer(convolutional_layer layer, char *window, image *prev_weights); 33 | void binarize_weights(float *weights, int n, int size, float *binary); 34 | void swap_binary(convolutional_layer *l); 35 | void binarize_weights2(float *weights, int n, int size, char *binary, float *scales); 36 | 37 | void backward_convolutional_layer(convolutional_layer layer, network net); 38 | 39 | void add_bias(float *output, float *biases, int batch, int n, int size); 40 | void backward_bias(float *bias_updates, float *delta, int batch, int n, int size); 41 | 42 | image get_convolutional_image(convolutional_layer layer); 43 | image get_convolutional_delta(convolutional_layer layer); 44 | image get_convolutional_weight(convolutional_layer layer, int i); 45 | 46 | int convolutional_out_height(convolutional_layer layer); 47 | int convolutional_out_width(convolutional_layer layer); 48 | 49 | #endif 50 | 51 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/scripts/voc_label.py: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | import pickle 3 | import os 4 | from os import listdir, getcwd 5 | from os.path import join 6 | 7 | sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')] 8 | 9 | classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] 10 | 11 | 12 | def convert(size, box): 13 | dw = 1./(size[0]) 14 | dh = 1./(size[1]) 15 | x = (box[0] + box[1])/2.0 - 1 16 | y = (box[2] + box[3])/2.0 - 1 17 | w = box[1] - box[0] 18 | h = box[3] - box[2] 19 | x = x*dw 20 | w = w*dw 21 | y = y*dh 22 | h = h*dh 23 | return (x,y,w,h) 24 | 25 | def convert_annotation(year, image_id): 26 | in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id)) 27 | out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w') 28 | tree=ET.parse(in_file) 29 | root = tree.getroot() 30 | size = root.find('size') 31 | w = int(size.find('width').text) 32 | h = int(size.find('height').text) 33 | 34 | for obj in root.iter('object'): 35 | difficult = obj.find('difficult').text 36 | cls = obj.find('name').text 37 | if cls not in classes or int(difficult)==1: 38 | continue 39 | cls_id = classes.index(cls) 40 | xmlbox = obj.find('bndbox') 41 | b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) 42 | bb = convert((w,h), b) 43 | out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') 44 | 45 | wd = getcwd() 46 | 47 | for year, image_set in sets: 48 | if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)): 49 | os.makedirs('VOCdevkit/VOC%s/labels/'%(year)) 50 | image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split() 51 | list_file = open('%s_%s.txt'%(year, image_set), 'w') 52 | for image_id in image_ids: 53 | list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id)) 54 | convert_annotation(year, image_id) 55 | list_file.close() 56 | 57 | os.system("cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt > train.txt") 58 | os.system("cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt > train.all.txt") 59 | 60 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/im2col_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "im2col.h" 7 | #include "cuda.h" 8 | } 9 | 10 | // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu 11 | // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE 12 | 13 | __global__ void im2col_gpu_kernel(const int n, const float* data_im, 14 | const int height, const int width, const int ksize, 15 | const int pad, 16 | const int stride, 17 | const int height_col, const int width_col, 18 | float *data_col) { 19 | int index = blockIdx.x*blockDim.x+threadIdx.x; 20 | for(; index < n; index += blockDim.x*gridDim.x){ 21 | int w_out = index % width_col; 22 | int h_index = index / width_col; 23 | int h_out = h_index % height_col; 24 | int channel_in = h_index / height_col; 25 | int channel_out = channel_in * ksize * ksize; 26 | int h_in = h_out * stride - pad; 27 | int w_in = w_out * stride - pad; 28 | float* data_col_ptr = data_col; 29 | data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; 30 | const float* data_im_ptr = data_im; 31 | data_im_ptr += (channel_in * height + h_in) * width + w_in; 32 | for (int i = 0; i < ksize; ++i) { 33 | for (int j = 0; j < ksize; ++j) { 34 | int h = h_in + i; 35 | int w = w_in + j; 36 | 37 | *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? 38 | data_im_ptr[i * width + j] : 0; 39 | 40 | //*data_col_ptr = data_im_ptr[ii * width + jj]; 41 | 42 | data_col_ptr += height_col * width_col; 43 | } 44 | } 45 | } 46 | } 47 | 48 | void im2col_gpu(float *im, 49 | int channels, int height, int width, 50 | int ksize, int stride, int pad, float *data_col){ 51 | // We are going to launch channels * height_col * width_col kernels, each 52 | // kernel responsible for copying a single-channel grid. 53 | int height_col = (height + 2 * pad - ksize) / stride + 1; 54 | int width_col = (width + 2 * pad - ksize) / stride + 1; 55 | int num_kernels = channels * height_col * width_col; 56 | im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, 57 | BLOCK>>>( 58 | num_kernels, im, height, width, ksize, pad, 59 | stride, height_col, 60 | width_col, data_col); 61 | } 62 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/image.h: -------------------------------------------------------------------------------- 1 | #ifndef IMAGE_H 2 | #define IMAGE_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "box.h" 10 | #include "darknet.h" 11 | 12 | #ifndef __cplusplus 13 | #ifdef OPENCV 14 | int fill_image_from_stream(CvCapture *cap, image im); 15 | image ipl_to_image(IplImage* src); 16 | void ipl_into_image(IplImage* src, image im); 17 | void flush_stream_buffer(CvCapture *cap, int n); 18 | void show_image_cv(image p, const char *name, IplImage *disp); 19 | #endif 20 | #endif 21 | 22 | float get_color(int c, int x, int max); 23 | void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b); 24 | void draw_bbox(image a, box bbox, int w, float r, float g, float b); 25 | void write_label(image a, int r, int c, image *characters, char *string, float *rgb); 26 | image image_distance(image a, image b); 27 | void scale_image(image m, float s); 28 | image rotate_crop_image(image im, float rad, float s, int w, int h, float dx, float dy, float aspect); 29 | image random_crop_image(image im, int w, int h); 30 | image random_augment_image(image im, float angle, float aspect, int low, int high, int w, int h); 31 | augment_args random_augment_args(image im, float angle, float aspect, int low, int high, int w, int h); 32 | void letterbox_image_into(image im, int w, int h, image boxed); 33 | image resize_max(image im, int max); 34 | void translate_image(image m, float s); 35 | void embed_image(image source, image dest, int dx, int dy); 36 | void place_image(image im, int w, int h, int dx, int dy, image canvas); 37 | void saturate_image(image im, float sat); 38 | void exposure_image(image im, float sat); 39 | void distort_image(image im, float hue, float sat, float val); 40 | void saturate_exposure_image(image im, float sat, float exposure); 41 | void rgb_to_hsv(image im); 42 | void hsv_to_rgb(image im); 43 | void yuv_to_rgb(image im); 44 | void rgb_to_yuv(image im); 45 | 46 | 47 | image collapse_image_layers(image source, int border); 48 | image collapse_images_horz(image *ims, int n); 49 | image collapse_images_vert(image *ims, int n); 50 | 51 | void show_image_normalized(image im, const char *name); 52 | void show_images(image *ims, int n, char *window); 53 | void show_image_layers(image p, char *name); 54 | void show_image_collapsed(image p, char *name); 55 | 56 | void print_image(image m); 57 | 58 | image make_empty_image(int w, int h, int c); 59 | void copy_image_into(image src, image dest); 60 | 61 | image get_image_layer(image m, int l); 62 | 63 | #endif 64 | 65 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/col2im_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "col2im.h" 7 | #include "cuda.h" 8 | } 9 | 10 | // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu 11 | // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE 12 | 13 | __global__ void col2im_gpu_kernel(const int n, const float* data_col, 14 | const int height, const int width, const int ksize, 15 | const int pad, 16 | const int stride, 17 | const int height_col, const int width_col, 18 | float *data_im) { 19 | int index = blockIdx.x*blockDim.x+threadIdx.x; 20 | for(; index < n; index += blockDim.x*gridDim.x){ 21 | float val = 0; 22 | int w = index % width + pad; 23 | int h = (index / width) % height + pad; 24 | int c = index / (width * height); 25 | // compute the start and end of the output 26 | int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1; 27 | int w_col_end = min(w / stride + 1, width_col); 28 | int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1; 29 | int h_col_end = min(h / stride + 1, height_col); 30 | // equivalent implementation 31 | int offset = 32 | (c * ksize * ksize + h * ksize + w) * height_col * width_col; 33 | int coeff_h_col = (1 - stride * ksize * height_col) * width_col; 34 | int coeff_w_col = (1 - stride * height_col * width_col); 35 | for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { 36 | for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { 37 | val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; 38 | } 39 | } 40 | data_im[index] += val; 41 | } 42 | } 43 | 44 | void col2im_gpu(float *data_col, 45 | int channels, int height, int width, 46 | int ksize, int stride, int pad, float *data_im){ 47 | // We are going to launch channels * height_col * width_col kernels, each 48 | // kernel responsible for copying a single-channel grid. 49 | int height_col = (height + 2 * pad - ksize) / stride + 1; 50 | int width_col = (width + 2 * pad - ksize) / stride + 1; 51 | int num_kernels = channels * height * width; 52 | col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, 53 | BLOCK>>>( 54 | num_kernels, data_col, height, width, ksize, pad, 55 | stride, height_col, 56 | width_col, data_im); 57 | } 58 | 59 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/swag.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | #include 3 | 4 | void train_swag(char *cfgfile, char *weightfile) 5 | { 6 | char *train_images = "data/voc.0712.trainval"; 7 | char *backup_directory = "/home/pjreddie/backup/"; 8 | srand(time(0)); 9 | char *base = basecfg(cfgfile); 10 | printf("%s\n", base); 11 | float avg_loss = -1; 12 | network net = parse_network_cfg(cfgfile); 13 | if(weightfile){ 14 | load_weights(&net, weightfile); 15 | } 16 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); 17 | int imgs = net.batch*net.subdivisions; 18 | int i = *net.seen/imgs; 19 | data train, buffer; 20 | 21 | layer l = net.layers[net.n - 1]; 22 | 23 | int side = l.side; 24 | int classes = l.classes; 25 | float jitter = l.jitter; 26 | 27 | list *plist = get_paths(train_images); 28 | //int N = plist->size; 29 | char **paths = (char **)list_to_array(plist); 30 | 31 | load_args args = {0}; 32 | args.w = net.w; 33 | args.h = net.h; 34 | args.paths = paths; 35 | args.n = imgs; 36 | args.m = plist->size; 37 | args.classes = classes; 38 | args.jitter = jitter; 39 | args.num_boxes = side; 40 | args.d = &buffer; 41 | args.type = REGION_DATA; 42 | 43 | pthread_t load_thread = load_data_in_thread(args); 44 | clock_t time; 45 | //while(i*imgs < N*120){ 46 | while(get_current_batch(net) < net.max_batches){ 47 | i += 1; 48 | time=clock(); 49 | pthread_join(load_thread, 0); 50 | train = buffer; 51 | load_thread = load_data_in_thread(args); 52 | 53 | printf("Loaded: %lf seconds\n", sec(clock()-time)); 54 | 55 | time=clock(); 56 | float loss = train_network(net, train); 57 | if (avg_loss < 0) avg_loss = loss; 58 | avg_loss = avg_loss*.9 + loss*.1; 59 | 60 | printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs); 61 | if(i%1000==0 || i == 600){ 62 | char buff[256]; 63 | sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); 64 | save_weights(net, buff); 65 | } 66 | free_data(train); 67 | } 68 | char buff[256]; 69 | sprintf(buff, "%s/%s_final.weights", backup_directory, base); 70 | save_weights(net, buff); 71 | } 72 | 73 | void run_swag(int argc, char **argv) 74 | { 75 | if(argc < 4){ 76 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 77 | return; 78 | } 79 | 80 | char *cfg = argv[3]; 81 | char *weights = (argc > 4) ? argv[4] : 0; 82 | if(0==strcmp(argv[2], "train")) train_swag(cfg, weights); 83 | } 84 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/activations.h: -------------------------------------------------------------------------------- 1 | #ifndef ACTIVATIONS_H 2 | #define ACTIVATIONS_H 3 | #include "darknet.h" 4 | #include "cuda.h" 5 | #include "math.h" 6 | 7 | ACTIVATION get_activation(char *s); 8 | 9 | char *get_activation_string(ACTIVATION a); 10 | float activate(float x, ACTIVATION a); 11 | float gradient(float x, ACTIVATION a); 12 | void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta); 13 | void activate_array(float *x, const int n, const ACTIVATION a); 14 | #ifdef GPU 15 | void activate_array_gpu(float *x, int n, ACTIVATION a); 16 | void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta); 17 | #endif 18 | 19 | static inline float stair_activate(float x) 20 | { 21 | int n = floor(x); 22 | if (n%2 == 0) return floor(x/2.); 23 | else return (x - n) + floor(x/2.); 24 | } 25 | static inline float hardtan_activate(float x) 26 | { 27 | if (x < -1) return -1; 28 | if (x > 1) return 1; 29 | return x; 30 | } 31 | static inline float linear_activate(float x){return x;} 32 | static inline float logistic_activate(float x){return 1./(1. + exp(-x));} 33 | static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;} 34 | static inline float relu_activate(float x){return x*(x>0);} 35 | static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} 36 | static inline float relie_activate(float x){return (x>0) ? x : .01*x;} 37 | static inline float ramp_activate(float x){return x*(x>0)+.1*x;} 38 | static inline float leaky_activate(float x){return (x>0) ? x : .1*x;} 39 | static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} 40 | static inline float plse_activate(float x) 41 | { 42 | if(x < -4) return .01 * (x + 4); 43 | if(x > 4) return .01 * (x - 4) + 1; 44 | return .125*x + .5; 45 | } 46 | 47 | static inline float lhtan_activate(float x) 48 | { 49 | if(x < 0) return .001*x; 50 | if(x > 1) return .001*(x-1) + 1; 51 | return x; 52 | } 53 | static inline float lhtan_gradient(float x) 54 | { 55 | if(x > 0 && x < 1) return 1; 56 | return .001; 57 | } 58 | 59 | static inline float hardtan_gradient(float x) 60 | { 61 | if (x > -1 && x < 1) return 1; 62 | return 0; 63 | } 64 | static inline float linear_gradient(float x){return 1;} 65 | static inline float logistic_gradient(float x){return (1-x)*x;} 66 | static inline float loggy_gradient(float x) 67 | { 68 | float y = (x+1.)/2.; 69 | return 2*(1-y)*y; 70 | } 71 | static inline float stair_gradient(float x) 72 | { 73 | if (floor(x) == x) return 0; 74 | return 1; 75 | } 76 | static inline float relu_gradient(float x){return (x>0);} 77 | static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);} 78 | static inline float relie_gradient(float x){return (x>0) ? 1 : .01;} 79 | static inline float ramp_gradient(float x){return (x>0)+.1;} 80 | static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;} 81 | static inline float tanh_gradient(float x){return 1-x*x;} 82 | static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;} 83 | 84 | #endif 85 | 86 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/YOLOWrapper.cpp: -------------------------------------------------------------------------------- 1 | #include "YOLOWrapper.h" 2 | 3 | #ifdef USE_TINY 4 | const string YOLOWrapper::cfg_path = "darknet/cfg/yolov3-tiny.cfg"; 5 | const string YOLOWrapper::weight_path = "models/yolov3-tiny.weights"; 6 | #else 7 | const string YOLOWrapper::cfg_path = "darknet/cfg/yolov3.cfg"; 8 | const string YOLOWrapper::weight_path = "models/yolov3.weights"; 9 | #endif 10 | const string YOLOWrapper::coco_names[] = { 11 | "person","bicycle","car","motorbike","aeroplane","bus","train","truck","boat","traffic light", 12 | "fire hydrant","stop sign","parking meter","bench","bird","cat","dog","horse","sheep","cow", 13 | "elephant","bear","zebra","giraffe","backpack","umbrella","handbag","tie","suitcase","frisbee", 14 | "skis","snowboard","sports ball","kite","baseball bat","baseball glove","skateboard","surfboard", 15 | "tennis racket","bottle","wine glass","cup","fork","knife","spoon","bowl","banana","apple", 16 | "sandwich","orange","broccoli","carrot","hot dog","pizza","donut","cake","chair","sofa", 17 | "pottedplant","bed","diningtable","toilet","tvmonitor","laptop","mouse","remote","keyboard", 18 | "cell phone","microwave","oven","toaster","sink","refrigerator","book","clock","vase", 19 | "scissors","teddy bear","hair drier","toothbrush" 20 | }; 21 | 22 | YOLOWrapper::YOLOWrapper() 23 | { 24 | cuda_set_device(0); 25 | net = load_network(const_cast(cfg_path.c_str()),const_cast(weight_path.c_str()),0); 26 | set_batch_network(net,1); 27 | } 28 | 29 | YOLOWrapper::~YOLOWrapper() 30 | { 31 | } 32 | 33 | map > YOLOWrapper::predict(Mat img,float thresh) 34 | { 35 | #ifndef NDEBUG 36 | assert(false == img.empty()); 37 | #endif 38 | map > retVal; 39 | //allocate image 40 | image im = make_image(img.cols,img.rows,3); 41 | for(int c = 0 ; c < img.channels() ; c++) 42 | for(int h = 0 ; h < img.rows ; h++) 43 | for(int w = 0 ; w < img.cols ; w++) { 44 | int dst_index = w + img.cols * h + img.cols * img.rows * c; 45 | im.data[dst_index] = static_cast(img.ptr(h)[w * img.channels() + c] / 255.0); 46 | } 47 | image sized = letterbox_image(im,net->w,net->h); 48 | //detect 49 | float *X = sized.data; 50 | network_predict(net,X); 51 | int nboxes = 0; 52 | detection *dets = get_network_boxes(net,im.w,im.h,thresh,0.5,0,1,&nboxes); 53 | layer l = net->layers[net->n-1]; 54 | do_nms_sort(dets,nboxes,l.classes,0.45); 55 | //extract detection results 56 | for(int i = 0 ; i < nboxes ; i++) { 57 | int _class = -1; 58 | for(int j = 0 ; j < l.classes ; j++) { 59 | if(dets[i].prob[j] > thresh) if(_class < 0) _class = j; 60 | } 61 | if(_class >= 0) { 62 | box b = dets[i].bbox; 63 | int left = (b.x - b.w / 2.) * im.w; 64 | int right = (b.x + b.w / 2.) * im.w; 65 | int top = (b.y - b.h / 2.) * im.h; 66 | int bot = (b.y + b.h / 2.) * im.h; 67 | 68 | if(left < 0) left = 0; 69 | if(right > im.w - 1) right = im.w - 1; 70 | if(top < 0) top = 0; 71 | if(bot > im.h - 1) bot = im.h - 1; 72 | 73 | retVal[coco_names[_class]].push_back(Rect(Point(left,top),Point(right + 1,bot + 1))); 74 | } 75 | } 76 | //free image 77 | free_detections(dets,nboxes); 78 | free_image(im); 79 | free_image(sized); 80 | return retVal; 81 | } 82 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/crop_layer.c: -------------------------------------------------------------------------------- 1 | #include "crop_layer.h" 2 | #include "cuda.h" 3 | #include 4 | 5 | image get_crop_image(crop_layer l) 6 | { 7 | int h = l.out_h; 8 | int w = l.out_w; 9 | int c = l.out_c; 10 | return float_to_image(w,h,c,l.output); 11 | } 12 | 13 | void backward_crop_layer(const crop_layer l, network net){} 14 | void backward_crop_layer_gpu(const crop_layer l, network net){} 15 | 16 | crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure) 17 | { 18 | fprintf(stderr, "Crop Layer: %d x %d -> %d x %d x %d image\n", h,w,crop_height,crop_width,c); 19 | crop_layer l = {0}; 20 | l.type = CROP; 21 | l.batch = batch; 22 | l.h = h; 23 | l.w = w; 24 | l.c = c; 25 | l.scale = (float)crop_height / h; 26 | l.flip = flip; 27 | l.angle = angle; 28 | l.saturation = saturation; 29 | l.exposure = exposure; 30 | l.out_w = crop_width; 31 | l.out_h = crop_height; 32 | l.out_c = c; 33 | l.inputs = l.w * l.h * l.c; 34 | l.outputs = l.out_w * l.out_h * l.out_c; 35 | l.output = calloc(l.outputs*batch, sizeof(float)); 36 | l.forward = forward_crop_layer; 37 | l.backward = backward_crop_layer; 38 | 39 | #ifdef GPU 40 | l.forward_gpu = forward_crop_layer_gpu; 41 | l.backward_gpu = backward_crop_layer_gpu; 42 | l.output_gpu = cuda_make_array(l.output, l.outputs*batch); 43 | l.rand_gpu = cuda_make_array(0, l.batch*8); 44 | #endif 45 | return l; 46 | } 47 | 48 | void resize_crop_layer(layer *l, int w, int h) 49 | { 50 | l->w = w; 51 | l->h = h; 52 | 53 | l->out_w = l->scale*w; 54 | l->out_h = l->scale*h; 55 | 56 | l->inputs = l->w * l->h * l->c; 57 | l->outputs = l->out_h * l->out_w * l->out_c; 58 | 59 | l->output = realloc(l->output, l->batch*l->outputs*sizeof(float)); 60 | #ifdef GPU 61 | cuda_free(l->output_gpu); 62 | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); 63 | #endif 64 | } 65 | 66 | 67 | void forward_crop_layer(const crop_layer l, network net) 68 | { 69 | int i,j,c,b,row,col; 70 | int index; 71 | int count = 0; 72 | int flip = (l.flip && rand()%2); 73 | int dh = rand()%(l.h - l.out_h + 1); 74 | int dw = rand()%(l.w - l.out_w + 1); 75 | float scale = 2; 76 | float trans = -1; 77 | if(l.noadjust){ 78 | scale = 1; 79 | trans = 0; 80 | } 81 | if(!net.train){ 82 | flip = 0; 83 | dh = (l.h - l.out_h)/2; 84 | dw = (l.w - l.out_w)/2; 85 | } 86 | for(b = 0; b < l.batch; ++b){ 87 | for(c = 0; c < l.c; ++c){ 88 | for(i = 0; i < l.out_h; ++i){ 89 | for(j = 0; j < l.out_w; ++j){ 90 | if(flip){ 91 | col = l.w - dw - j - 1; 92 | }else{ 93 | col = j + dw; 94 | } 95 | row = i + dh; 96 | index = col+l.w*(row+l.h*(c + l.c*b)); 97 | l.output[count++] = net.input[index]*scale + trans; 98 | } 99 | } 100 | } 101 | } 102 | } 103 | 104 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/shortcut_layer.c: -------------------------------------------------------------------------------- 1 | #include "shortcut_layer.h" 2 | #include "cuda.h" 3 | #include "blas.h" 4 | #include "activations.h" 5 | 6 | #include 7 | #include 8 | 9 | layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) 10 | { 11 | fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c); 12 | layer l = {0}; 13 | l.type = SHORTCUT; 14 | l.batch = batch; 15 | l.w = w2; 16 | l.h = h2; 17 | l.c = c2; 18 | l.out_w = w; 19 | l.out_h = h; 20 | l.out_c = c; 21 | l.outputs = w*h*c; 22 | l.inputs = l.outputs; 23 | 24 | l.index = index; 25 | 26 | l.delta = calloc(l.outputs*batch, sizeof(float)); 27 | l.output = calloc(l.outputs*batch, sizeof(float));; 28 | 29 | l.forward = forward_shortcut_layer; 30 | l.backward = backward_shortcut_layer; 31 | #ifdef GPU 32 | l.forward_gpu = forward_shortcut_layer_gpu; 33 | l.backward_gpu = backward_shortcut_layer_gpu; 34 | 35 | l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); 36 | l.output_gpu = cuda_make_array(l.output, l.outputs*batch); 37 | #endif 38 | return l; 39 | } 40 | 41 | void resize_shortcut_layer(layer *l, int w, int h) 42 | { 43 | assert(l->w == l->out_w); 44 | assert(l->h == l->out_h); 45 | l->w = l->out_w = w; 46 | l->h = l->out_h = h; 47 | l->outputs = w*h*l->out_c; 48 | l->inputs = l->outputs; 49 | l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float)); 50 | l->output = realloc(l->output, l->outputs*l->batch*sizeof(float)); 51 | 52 | #ifdef GPU 53 | cuda_free(l->output_gpu); 54 | cuda_free(l->delta_gpu); 55 | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); 56 | l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); 57 | #endif 58 | 59 | } 60 | 61 | 62 | void forward_shortcut_layer(const layer l, network net) 63 | { 64 | copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); 65 | shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output); 66 | activate_array(l.output, l.outputs*l.batch, l.activation); 67 | } 68 | 69 | void backward_shortcut_layer(const layer l, network net) 70 | { 71 | gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); 72 | axpy_cpu(l.outputs*l.batch, l.alpha, l.delta, 1, net.delta, 1); 73 | shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, 1, l.beta, net.layers[l.index].delta); 74 | } 75 | 76 | #ifdef GPU 77 | void forward_shortcut_layer_gpu(const layer l, network net) 78 | { 79 | copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1); 80 | shortcut_gpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output_gpu); 81 | activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); 82 | } 83 | 84 | void backward_shortcut_layer_gpu(const layer l, network net) 85 | { 86 | gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); 87 | axpy_gpu(l.outputs*l.batch, l.alpha, l.delta_gpu, 1, net.delta_gpu, 1); 88 | shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, 1, l.beta, net.layers[l.index].delta_gpu); 89 | } 90 | #endif 91 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/Makefile: -------------------------------------------------------------------------------- 1 | GPU=1 2 | CUDNN=1 3 | OPENCV=0 4 | OPENMP=0 5 | DEBUG=0 6 | 7 | ARCH= -gencode arch=compute_30,code=sm_30 \ 8 | -gencode arch=compute_35,code=sm_35 \ 9 | -gencode arch=compute_50,code=[sm_50,compute_50] \ 10 | -gencode arch=compute_52,code=[sm_52,compute_52] 11 | # -gencode arch=compute_20,code=[sm_20,sm_21] \ This one is deprecated? 12 | 13 | # This is what I use, uncomment if you know your arch and want to specify 14 | # ARCH= -gencode arch=compute_52,code=compute_52 15 | 16 | VPATH=./src/:./examples 17 | SLIB=libdarknet.so 18 | ALIB=libdarknet.a 19 | EXEC=darknet 20 | OBJDIR=./obj/ 21 | 22 | CC=gcc 23 | NVCC=nvcc -ccbin=gcc-6 24 | AR=ar 25 | ARFLAGS=rcs 26 | OPTS=-Ofast 27 | LDFLAGS= -lm -pthread 28 | COMMON= -Iinclude/ -Isrc/ 29 | CFLAGS=-Wall -Wno-unused-result -Wno-unknown-pragmas -Wfatal-errors -fPIC 30 | 31 | ifeq ($(OPENMP), 1) 32 | CFLAGS+= -fopenmp 33 | endif 34 | 35 | ifeq ($(DEBUG), 1) 36 | OPTS=-O0 -g 37 | endif 38 | 39 | CFLAGS+=$(OPTS) 40 | 41 | ifeq ($(OPENCV), 1) 42 | COMMON+= -DOPENCV 43 | CFLAGS+= -DOPENCV 44 | LDFLAGS+= `pkg-config --libs opencv` 45 | COMMON+= `pkg-config --cflags opencv` 46 | endif 47 | 48 | ifeq ($(GPU), 1) 49 | COMMON+= -DGPU -I/usr/local/cuda/include/ 50 | CFLAGS+= -DGPU 51 | LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand 52 | endif 53 | 54 | ifeq ($(CUDNN), 1) 55 | COMMON+= -DCUDNN 56 | CFLAGS+= -DCUDNN 57 | LDFLAGS+= -lcudnn 58 | endif 59 | 60 | OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o detection_layer.o route_layer.o upsample_layer.o box.o normalization_layer.o avgpool_layer.o layer.o local_layer.o shortcut_layer.o logistic_layer.o activation_layer.o rnn_layer.o gru_layer.o crnn_layer.o demo.o batchnorm_layer.o region_layer.o reorg_layer.o tree.o lstm_layer.o l2norm_layer.o yolo_layer.o 61 | EXECOBJA=captcha.o lsd.o super.o art.o tag.o cifar.o go.o rnn.o segmenter.o regressor.o classifier.o coco.o yolo.o detector.o nightmare.o darknet.o 62 | ifeq ($(GPU), 1) 63 | LDFLAGS+= -lstdc++ 64 | OBJ+=convolutional_kernels.o deconvolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o avgpool_layer_kernels.o 65 | endif 66 | 67 | EXECOBJ = $(addprefix $(OBJDIR), $(EXECOBJA)) 68 | OBJS = $(addprefix $(OBJDIR), $(OBJ)) 69 | DEPS = $(wildcard src/*.h) Makefile include/darknet.h 70 | 71 | all: obj backup results $(SLIB) $(ALIB) $(EXEC) 72 | #all: obj results $(SLIB) $(ALIB) $(EXEC) 73 | 74 | 75 | $(EXEC): $(EXECOBJ) $(ALIB) 76 | $(CC) $(COMMON) $(CFLAGS) $^ -o $@ $(LDFLAGS) $(ALIB) 77 | 78 | $(ALIB): $(OBJS) 79 | $(AR) $(ARFLAGS) $@ $^ 80 | 81 | $(SLIB): $(OBJS) 82 | $(CC) $(CFLAGS) -shared $^ -o $@ $(LDFLAGS) 83 | 84 | $(OBJDIR)%.o: %.c $(DEPS) 85 | $(CC) $(COMMON) $(CFLAGS) -c $< -o $@ 86 | 87 | $(OBJDIR)%.o: %.cu $(DEPS) 88 | $(NVCC) $(ARCH) $(COMMON) --compiler-options "$(CFLAGS)" -c $< -o $@ 89 | 90 | obj: 91 | mkdir -p obj 92 | backup: 93 | mkdir -p backup 94 | results: 95 | mkdir -p results 96 | 97 | .PHONY: clean 98 | 99 | clean: 100 | rm -rf $(OBJS) $(SLIB) $(ALIB) $(EXEC) $(EXECOBJ) $(OBJDIR)/* 101 | 102 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/maxpool_layer_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "maxpool_layer.h" 7 | #include "cuda.h" 8 | } 9 | 10 | __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) 11 | { 12 | int h = (in_h + 2*pad)/stride; 13 | int w = (in_w + 2*pad)/stride; 14 | int c = in_c; 15 | 16 | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; 17 | if(id >= n) return; 18 | 19 | int j = id % w; 20 | id /= w; 21 | int i = id % h; 22 | id /= h; 23 | int k = id % c; 24 | id /= c; 25 | int b = id; 26 | 27 | int w_offset = -pad; 28 | int h_offset = -pad; 29 | 30 | int out_index = j + w*(i + h*(k + c*b)); 31 | float max = -INFINITY; 32 | int max_i = -1; 33 | int l, m; 34 | for(l = 0; l < size; ++l){ 35 | for(m = 0; m < size; ++m){ 36 | int cur_h = h_offset + i*stride + l; 37 | int cur_w = w_offset + j*stride + m; 38 | int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); 39 | int valid = (cur_h >= 0 && cur_h < in_h && 40 | cur_w >= 0 && cur_w < in_w); 41 | float val = (valid != 0) ? input[index] : -INFINITY; 42 | max_i = (val > max) ? index : max_i; 43 | max = (val > max) ? val : max; 44 | } 45 | } 46 | output[out_index] = max; 47 | indexes[out_index] = max_i; 48 | } 49 | 50 | __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) 51 | { 52 | int h = (in_h + 2*pad)/stride; 53 | int w = (in_w + 2*pad)/stride; 54 | int c = in_c; 55 | int area = (size-1)/stride; 56 | 57 | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; 58 | if(id >= n) return; 59 | 60 | int index = id; 61 | int j = id % in_w; 62 | id /= in_w; 63 | int i = id % in_h; 64 | id /= in_h; 65 | int k = id % in_c; 66 | id /= in_c; 67 | int b = id; 68 | 69 | int w_offset = -pad; 70 | int h_offset = -pad; 71 | 72 | float d = 0; 73 | int l, m; 74 | for(l = -area; l < area+1; ++l){ 75 | for(m = -area; m < area+1; ++m){ 76 | int out_w = (j-w_offset)/stride + m; 77 | int out_h = (i-h_offset)/stride + l; 78 | int out_index = out_w + w*(out_h + h*(k + c*b)); 79 | int valid = (out_w >= 0 && out_w < w && 80 | out_h >= 0 && out_h < h); 81 | d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; 82 | } 83 | } 84 | prev_delta[index] += d; 85 | } 86 | 87 | extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net) 88 | { 89 | int h = layer.out_h; 90 | int w = layer.out_w; 91 | int c = layer.c; 92 | 93 | size_t n = h*w*c*layer.batch; 94 | 95 | forward_maxpool_layer_kernel<<>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu); 96 | check_error(cudaPeekAtLastError()); 97 | } 98 | 99 | extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net) 100 | { 101 | size_t n = layer.h*layer.w*layer.c*layer.batch; 102 | 103 | backward_maxpool_layer_kernel<<>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); 104 | check_error(cudaPeekAtLastError()); 105 | } 106 | 107 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/upsample_layer.c: -------------------------------------------------------------------------------- 1 | #include "upsample_layer.h" 2 | #include "cuda.h" 3 | #include "blas.h" 4 | 5 | #include 6 | 7 | layer make_upsample_layer(int batch, int w, int h, int c, int stride) 8 | { 9 | layer l = {0}; 10 | l.type = UPSAMPLE; 11 | l.batch = batch; 12 | l.w = w; 13 | l.h = h; 14 | l.c = c; 15 | l.out_w = w*stride; 16 | l.out_h = h*stride; 17 | l.out_c = c; 18 | if(stride < 0){ 19 | stride = -stride; 20 | l.reverse=1; 21 | l.out_w = w/stride; 22 | l.out_h = h/stride; 23 | } 24 | l.stride = stride; 25 | l.outputs = l.out_w*l.out_h*l.out_c; 26 | l.inputs = l.w*l.h*l.c; 27 | l.delta = calloc(l.outputs*batch, sizeof(float)); 28 | l.output = calloc(l.outputs*batch, sizeof(float));; 29 | 30 | l.forward = forward_upsample_layer; 31 | l.backward = backward_upsample_layer; 32 | #ifdef GPU 33 | l.forward_gpu = forward_upsample_layer_gpu; 34 | l.backward_gpu = backward_upsample_layer_gpu; 35 | 36 | l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); 37 | l.output_gpu = cuda_make_array(l.output, l.outputs*batch); 38 | #endif 39 | if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); 40 | else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); 41 | return l; 42 | } 43 | 44 | void resize_upsample_layer(layer *l, int w, int h) 45 | { 46 | l->w = w; 47 | l->h = h; 48 | l->out_w = w*l->stride; 49 | l->out_h = h*l->stride; 50 | if(l->reverse){ 51 | l->out_w = w/l->stride; 52 | l->out_h = h/l->stride; 53 | } 54 | l->outputs = l->out_w*l->out_h*l->out_c; 55 | l->inputs = l->h*l->w*l->c; 56 | l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float)); 57 | l->output = realloc(l->output, l->outputs*l->batch*sizeof(float)); 58 | 59 | #ifdef GPU 60 | cuda_free(l->output_gpu); 61 | cuda_free(l->delta_gpu); 62 | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); 63 | l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); 64 | #endif 65 | 66 | } 67 | 68 | void forward_upsample_layer(const layer l, network net) 69 | { 70 | fill_cpu(l.outputs*l.batch, 0, l.output, 1); 71 | if(l.reverse){ 72 | upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input); 73 | }else{ 74 | upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); 75 | } 76 | } 77 | 78 | void backward_upsample_layer(const layer l, network net) 79 | { 80 | if(l.reverse){ 81 | upsample_cpu(l.delta, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta); 82 | }else{ 83 | upsample_cpu(net.delta, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta); 84 | } 85 | } 86 | 87 | #ifdef GPU 88 | void forward_upsample_layer_gpu(const layer l, network net) 89 | { 90 | fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); 91 | if(l.reverse){ 92 | upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input_gpu); 93 | }else{ 94 | upsample_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu); 95 | } 96 | } 97 | 98 | void backward_upsample_layer_gpu(const layer l, network net) 99 | { 100 | if(l.reverse){ 101 | upsample_gpu(l.delta_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta_gpu); 102 | }else{ 103 | upsample_gpu(net.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta_gpu); 104 | } 105 | } 106 | #endif 107 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/option_list.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "option_list.h" 5 | #include "utils.h" 6 | 7 | list *read_data_cfg(char *filename) 8 | { 9 | FILE *file = fopen(filename, "r"); 10 | if(file == 0) file_error(filename); 11 | char *line; 12 | int nu = 0; 13 | list *options = make_list(); 14 | while((line=fgetl(file)) != 0){ 15 | ++ nu; 16 | strip(line); 17 | switch(line[0]){ 18 | case '\0': 19 | case '#': 20 | case ';': 21 | free(line); 22 | break; 23 | default: 24 | if(!read_option(line, options)){ 25 | fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); 26 | free(line); 27 | } 28 | break; 29 | } 30 | } 31 | fclose(file); 32 | return options; 33 | } 34 | 35 | metadata get_metadata(char *file) 36 | { 37 | metadata m = {0}; 38 | list *options = read_data_cfg(file); 39 | 40 | char *name_list = option_find_str(options, "names", 0); 41 | if(!name_list) name_list = option_find_str(options, "labels", 0); 42 | if(!name_list) { 43 | fprintf(stderr, "No names or labels found\n"); 44 | } else { 45 | m.names = get_labels(name_list); 46 | } 47 | m.classes = option_find_int(options, "classes", 2); 48 | free_list(options); 49 | return m; 50 | } 51 | 52 | int read_option(char *s, list *options) 53 | { 54 | size_t i; 55 | size_t len = strlen(s); 56 | char *val = 0; 57 | for(i = 0; i < len; ++i){ 58 | if(s[i] == '='){ 59 | s[i] = '\0'; 60 | val = s+i+1; 61 | break; 62 | } 63 | } 64 | if(i == len-1) return 0; 65 | char *key = s; 66 | option_insert(options, key, val); 67 | return 1; 68 | } 69 | 70 | void option_insert(list *l, char *key, char *val) 71 | { 72 | kvp *p = malloc(sizeof(kvp)); 73 | p->key = key; 74 | p->val = val; 75 | p->used = 0; 76 | list_insert(l, p); 77 | } 78 | 79 | void option_unused(list *l) 80 | { 81 | node *n = l->front; 82 | while(n){ 83 | kvp *p = (kvp *)n->val; 84 | if(!p->used){ 85 | fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); 86 | } 87 | n = n->next; 88 | } 89 | } 90 | 91 | char *option_find(list *l, char *key) 92 | { 93 | node *n = l->front; 94 | while(n){ 95 | kvp *p = (kvp *)n->val; 96 | if(strcmp(p->key, key) == 0){ 97 | p->used = 1; 98 | return p->val; 99 | } 100 | n = n->next; 101 | } 102 | return 0; 103 | } 104 | char *option_find_str(list *l, char *key, char *def) 105 | { 106 | char *v = option_find(l, key); 107 | if(v) return v; 108 | if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def); 109 | return def; 110 | } 111 | 112 | int option_find_int(list *l, char *key, int def) 113 | { 114 | char *v = option_find(l, key); 115 | if(v) return atoi(v); 116 | fprintf(stderr, "%s: Using default '%d'\n", key, def); 117 | return def; 118 | } 119 | 120 | int option_find_int_quiet(list *l, char *key, int def) 121 | { 122 | char *v = option_find(l, key); 123 | if(v) return atoi(v); 124 | return def; 125 | } 126 | 127 | float option_find_float_quiet(list *l, char *key, float def) 128 | { 129 | char *v = option_find(l, key); 130 | if(v) return atof(v); 131 | return def; 132 | } 133 | 134 | float option_find_float(list *l, char *key, float def) 135 | { 136 | char *v = option_find(l, key); 137 | if(v) return atof(v); 138 | fprintf(stderr, "%s: Using default '%lf'\n", key, def); 139 | return def; 140 | } 141 | -------------------------------------------------------------------------------- /extractedDNA/AllelesClassifier/test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include "classifier.h" 19 | 20 | using namespace std; 21 | using namespace boost; 22 | using namespace boost::serialization; 23 | using namespace boost::archive; 24 | using namespace boost::program_options; 25 | namespace ublas = boost::numeric::ublas; 26 | 27 | map > > loadList(string file); 28 | 29 | int main(int argc,char ** argv) 30 | { 31 | options_description desc; 32 | string inputfile; 33 | string modelfile; 34 | desc.add_options() 35 | ("help,h","print current usage") 36 | ("input,i",value(&inputfile),"test samples list") 37 | ("model,m",value(&modelfile),"model file"); 38 | variables_map vm; 39 | store(parse_command_line(argc,argv,desc),vm); 40 | notify(vm); 41 | 42 | if(1 == argc || vm.count("help")) { 43 | cout< > > list = loadList(inputfile); 60 | int correct = 0,wrong = 0; 61 | for(auto itr = list.begin() ; itr != list.end() ; itr++) 62 | for(auto sample = itr->second.begin() ; sample != itr->second.end() ; sample++) { 63 | int _class = classifier.predict(*sample); 64 | if(_class == itr->first) correct++; 65 | else wrong++; 66 | } 67 | cout<<"precision: "<(correct) / (correct + wrong)< > > loadList(string file) 73 | { 74 | std::ifstream in(file); 75 | if(false == in.is_open()) throw runtime_error("can't open the training list file"); 76 | char_separator sep(" \t"); 77 | typedef boost::tokenizer > tokenizer; 78 | map > > retVal; 79 | while(false == in.eof()) { 80 | string line; 81 | getline(in,line); 82 | trim(line); 83 | if("" == line) continue; 84 | tokenizer tokens(line,sep); 85 | vector alleles; 86 | int c; 87 | for(tokenizer::iterator tok_iter = tokens.begin() ; tok_iter != tokens.end() ; tok_iter++) { 88 | auto next = tok_iter; 89 | next++; 90 | if(next != tokens.end()) 91 | alleles.push_back(lexical_cast(*tok_iter)); 92 | else 93 | c = lexical_cast(*tok_iter); 94 | } 95 | #ifndef NDEBUG 96 | for(auto & a : alleles) { 97 | if( 98 | a != 'a' && a != 'A' && a != 't' && a != 'T' && 99 | a != 'c' && a != 'C' && a != 'g' && a != 'G' 100 | ) throw logic_error("the value of alleles can only within ATCG"); 101 | } 102 | #endif 103 | retVal[c].push_back(alleles); 104 | } 105 | return retVal; 106 | } 107 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/softmax_layer.c: -------------------------------------------------------------------------------- 1 | #include "softmax_layer.h" 2 | #include "blas.h" 3 | #include "cuda.h" 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | softmax_layer make_softmax_layer(int batch, int inputs, int groups) 12 | { 13 | assert(inputs%groups == 0); 14 | fprintf(stderr, "softmax %4d\n", inputs); 15 | softmax_layer l = {0}; 16 | l.type = SOFTMAX; 17 | l.batch = batch; 18 | l.groups = groups; 19 | l.inputs = inputs; 20 | l.outputs = inputs; 21 | l.loss = calloc(inputs*batch, sizeof(float)); 22 | l.output = calloc(inputs*batch, sizeof(float)); 23 | l.delta = calloc(inputs*batch, sizeof(float)); 24 | l.cost = calloc(1, sizeof(float)); 25 | 26 | l.forward = forward_softmax_layer; 27 | l.backward = backward_softmax_layer; 28 | #ifdef GPU 29 | l.forward_gpu = forward_softmax_layer_gpu; 30 | l.backward_gpu = backward_softmax_layer_gpu; 31 | 32 | l.output_gpu = cuda_make_array(l.output, inputs*batch); 33 | l.loss_gpu = cuda_make_array(l.loss, inputs*batch); 34 | l.delta_gpu = cuda_make_array(l.delta, inputs*batch); 35 | #endif 36 | return l; 37 | } 38 | 39 | void forward_softmax_layer(const softmax_layer l, network net) 40 | { 41 | if(l.softmax_tree){ 42 | int i; 43 | int count = 0; 44 | for (i = 0; i < l.softmax_tree->groups; ++i) { 45 | int group_size = l.softmax_tree->group_size[i]; 46 | softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count); 47 | count += group_size; 48 | } 49 | } else { 50 | softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output); 51 | } 52 | 53 | if(net.truth){ 54 | softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss); 55 | l.cost[0] = sum_array(l.loss, l.batch*l.inputs); 56 | } 57 | } 58 | 59 | void backward_softmax_layer(const softmax_layer l, network net) 60 | { 61 | axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1); 62 | } 63 | 64 | #ifdef GPU 65 | 66 | void pull_softmax_layer_output(const softmax_layer layer) 67 | { 68 | cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch); 69 | } 70 | 71 | void forward_softmax_layer_gpu(const softmax_layer l, network net) 72 | { 73 | if(l.softmax_tree){ 74 | softmax_tree(net.input_gpu, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree); 75 | /* 76 | int i; 77 | int count = 0; 78 | for (i = 0; i < l.softmax_tree->groups; ++i) { 79 | int group_size = l.softmax_tree->group_size[i]; 80 | softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count); 81 | count += group_size; 82 | } 83 | */ 84 | } else { 85 | if(l.spatial){ 86 | softmax_gpu(net.input_gpu, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu); 87 | }else{ 88 | softmax_gpu(net.input_gpu, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu); 89 | } 90 | } 91 | if(net.truth){ 92 | softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth_gpu, l.delta_gpu, l.loss_gpu); 93 | if(l.softmax_tree){ 94 | mask_gpu(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth_gpu, 0); 95 | mask_gpu(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth_gpu, 0); 96 | } 97 | cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs); 98 | l.cost[0] = sum_array(l.loss, l.batch*l.inputs); 99 | } 100 | } 101 | 102 | void backward_softmax_layer_gpu(const softmax_layer layer, network net) 103 | { 104 | axpy_gpu(layer.batch*layer.inputs, 1, layer.delta_gpu, 1, net.delta_gpu, 1); 105 | } 106 | 107 | #endif 108 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/dice.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | 3 | char *dice_labels[] = {"face1","face2","face3","face4","face5","face6"}; 4 | 5 | void train_dice(char *cfgfile, char *weightfile) 6 | { 7 | srand(time(0)); 8 | float avg_loss = -1; 9 | char *base = basecfg(cfgfile); 10 | char *backup_directory = "/home/pjreddie/backup/"; 11 | printf("%s\n", base); 12 | network net = parse_network_cfg(cfgfile); 13 | if(weightfile){ 14 | load_weights(&net, weightfile); 15 | } 16 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); 17 | int imgs = 1024; 18 | int i = *net.seen/imgs; 19 | char **labels = dice_labels; 20 | list *plist = get_paths("data/dice/dice.train.list"); 21 | char **paths = (char **)list_to_array(plist); 22 | printf("%d\n", plist->size); 23 | clock_t time; 24 | while(1){ 25 | ++i; 26 | time=clock(); 27 | data train = load_data_old(paths, imgs, plist->size, labels, 6, net.w, net.h); 28 | printf("Loaded: %lf seconds\n", sec(clock()-time)); 29 | 30 | time=clock(); 31 | float loss = train_network(net, train); 32 | if(avg_loss == -1) avg_loss = loss; 33 | avg_loss = avg_loss*.9 + loss*.1; 34 | printf("%d: %f, %f avg, %lf seconds, %ld images\n", i, loss, avg_loss, sec(clock()-time), *net.seen); 35 | free_data(train); 36 | if((i % 100) == 0) net.learning_rate *= .1; 37 | if(i%100==0){ 38 | char buff[256]; 39 | sprintf(buff, "%s/%s_%d.weights",backup_directory,base, i); 40 | save_weights(net, buff); 41 | } 42 | } 43 | } 44 | 45 | void validate_dice(char *filename, char *weightfile) 46 | { 47 | network net = parse_network_cfg(filename); 48 | if(weightfile){ 49 | load_weights(&net, weightfile); 50 | } 51 | srand(time(0)); 52 | 53 | char **labels = dice_labels; 54 | list *plist = get_paths("data/dice/dice.val.list"); 55 | 56 | char **paths = (char **)list_to_array(plist); 57 | int m = plist->size; 58 | free_list(plist); 59 | 60 | data val = load_data_old(paths, m, 0, labels, 6, net.w, net.h); 61 | float *acc = network_accuracies(net, val, 2); 62 | printf("Validation Accuracy: %f, %d images\n", acc[0], m); 63 | free_data(val); 64 | } 65 | 66 | void test_dice(char *cfgfile, char *weightfile, char *filename) 67 | { 68 | network net = parse_network_cfg(cfgfile); 69 | if(weightfile){ 70 | load_weights(&net, weightfile); 71 | } 72 | set_batch_network(&net, 1); 73 | srand(2222222); 74 | int i = 0; 75 | char **names = dice_labels; 76 | char buff[256]; 77 | char *input = buff; 78 | int indexes[6]; 79 | while(1){ 80 | if(filename){ 81 | strncpy(input, filename, 256); 82 | }else{ 83 | printf("Enter Image Path: "); 84 | fflush(stdout); 85 | input = fgets(input, 256, stdin); 86 | if(!input) return; 87 | strtok(input, "\n"); 88 | } 89 | image im = load_image_color(input, net.w, net.h); 90 | float *X = im.data; 91 | float *predictions = network_predict(net, X); 92 | top_predictions(net, 6, indexes); 93 | for(i = 0; i < 6; ++i){ 94 | int index = indexes[i]; 95 | printf("%s: %f\n", names[index], predictions[index]); 96 | } 97 | free_image(im); 98 | if (filename) break; 99 | } 100 | } 101 | 102 | void run_dice(int argc, char **argv) 103 | { 104 | if(argc < 4){ 105 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 106 | return; 107 | } 108 | 109 | char *cfg = argv[3]; 110 | char *weights = (argc > 4) ? argv[4] : 0; 111 | char *filename = (argc > 5) ? argv[5]: 0; 112 | if(0==strcmp(argv[2], "test")) test_dice(cfg, weights, filename); 113 | else if(0==strcmp(argv[2], "train")) train_dice(cfg, weights); 114 | else if(0==strcmp(argv[2], "valid")) validate_dice(cfg, weights); 115 | } 116 | 117 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/super.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | 3 | void train_super(char *cfgfile, char *weightfile, int clear) 4 | { 5 | char *train_images = "/data/imagenet/imagenet1k.train.list"; 6 | char *backup_directory = "/home/pjreddie/backup/"; 7 | srand(time(0)); 8 | char *base = basecfg(cfgfile); 9 | printf("%s\n", base); 10 | float avg_loss = -1; 11 | network *net = load_network(cfgfile, weightfile, clear); 12 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); 13 | int imgs = net->batch*net->subdivisions; 14 | int i = *net->seen/imgs; 15 | data train, buffer; 16 | 17 | 18 | list *plist = get_paths(train_images); 19 | //int N = plist->size; 20 | char **paths = (char **)list_to_array(plist); 21 | 22 | load_args args = {0}; 23 | args.w = net->w; 24 | args.h = net->h; 25 | args.scale = 4; 26 | args.paths = paths; 27 | args.n = imgs; 28 | args.m = plist->size; 29 | args.d = &buffer; 30 | args.type = SUPER_DATA; 31 | 32 | pthread_t load_thread = load_data_in_thread(args); 33 | clock_t time; 34 | //while(i*imgs < N*120){ 35 | while(get_current_batch(net) < net->max_batches){ 36 | i += 1; 37 | time=clock(); 38 | pthread_join(load_thread, 0); 39 | train = buffer; 40 | load_thread = load_data_in_thread(args); 41 | 42 | printf("Loaded: %lf seconds\n", sec(clock()-time)); 43 | 44 | time=clock(); 45 | float loss = train_network(net, train); 46 | if (avg_loss < 0) avg_loss = loss; 47 | avg_loss = avg_loss*.9 + loss*.1; 48 | 49 | printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs); 50 | if(i%1000==0){ 51 | char buff[256]; 52 | sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); 53 | save_weights(net, buff); 54 | } 55 | if(i%100==0){ 56 | char buff[256]; 57 | sprintf(buff, "%s/%s.backup", backup_directory, base); 58 | save_weights(net, buff); 59 | } 60 | free_data(train); 61 | } 62 | char buff[256]; 63 | sprintf(buff, "%s/%s_final.weights", backup_directory, base); 64 | save_weights(net, buff); 65 | } 66 | 67 | void test_super(char *cfgfile, char *weightfile, char *filename) 68 | { 69 | network *net = load_network(cfgfile, weightfile, 0); 70 | set_batch_network(net, 1); 71 | srand(2222222); 72 | 73 | clock_t time; 74 | char buff[256]; 75 | char *input = buff; 76 | while(1){ 77 | if(filename){ 78 | strncpy(input, filename, 256); 79 | }else{ 80 | printf("Enter Image Path: "); 81 | fflush(stdout); 82 | input = fgets(input, 256, stdin); 83 | if(!input) return; 84 | strtok(input, "\n"); 85 | } 86 | image im = load_image_color(input, 0, 0); 87 | resize_network(net, im.w, im.h); 88 | printf("%d %d\n", im.w, im.h); 89 | 90 | float *X = im.data; 91 | time=clock(); 92 | network_predict(net, X); 93 | image out = get_network_image(net); 94 | printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time)); 95 | save_image(out, "out"); 96 | show_image(out, "out"); 97 | 98 | free_image(im); 99 | if (filename) break; 100 | } 101 | } 102 | 103 | 104 | void run_super(int argc, char **argv) 105 | { 106 | if(argc < 4){ 107 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 108 | return; 109 | } 110 | 111 | char *cfg = argv[3]; 112 | char *weights = (argc > 4) ? argv[4] : 0; 113 | char *filename = (argc > 5) ? argv[5] : 0; 114 | int clear = find_arg(argc, argv, "-clear"); 115 | if(0==strcmp(argv[2], "train")) train_super(cfg, weights, clear); 116 | else if(0==strcmp(argv[2], "test")) test_super(cfg, weights, filename); 117 | /* 118 | else if(0==strcmp(argv[2], "valid")) validate_super(cfg, weights); 119 | */ 120 | } 121 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/activations.c: -------------------------------------------------------------------------------- 1 | #include "activations.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | char *get_activation_string(ACTIVATION a) 9 | { 10 | switch(a){ 11 | case LOGISTIC: 12 | return "logistic"; 13 | case LOGGY: 14 | return "loggy"; 15 | case RELU: 16 | return "relu"; 17 | case ELU: 18 | return "elu"; 19 | case RELIE: 20 | return "relie"; 21 | case RAMP: 22 | return "ramp"; 23 | case LINEAR: 24 | return "linear"; 25 | case TANH: 26 | return "tanh"; 27 | case PLSE: 28 | return "plse"; 29 | case LEAKY: 30 | return "leaky"; 31 | case STAIR: 32 | return "stair"; 33 | case HARDTAN: 34 | return "hardtan"; 35 | case LHTAN: 36 | return "lhtan"; 37 | default: 38 | break; 39 | } 40 | return "relu"; 41 | } 42 | 43 | ACTIVATION get_activation(char *s) 44 | { 45 | if (strcmp(s, "logistic")==0) return LOGISTIC; 46 | if (strcmp(s, "loggy")==0) return LOGGY; 47 | if (strcmp(s, "relu")==0) return RELU; 48 | if (strcmp(s, "elu")==0) return ELU; 49 | if (strcmp(s, "relie")==0) return RELIE; 50 | if (strcmp(s, "plse")==0) return PLSE; 51 | if (strcmp(s, "hardtan")==0) return HARDTAN; 52 | if (strcmp(s, "lhtan")==0) return LHTAN; 53 | if (strcmp(s, "linear")==0) return LINEAR; 54 | if (strcmp(s, "ramp")==0) return RAMP; 55 | if (strcmp(s, "leaky")==0) return LEAKY; 56 | if (strcmp(s, "tanh")==0) return TANH; 57 | if (strcmp(s, "stair")==0) return STAIR; 58 | fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); 59 | return RELU; 60 | } 61 | 62 | float activate(float x, ACTIVATION a) 63 | { 64 | switch(a){ 65 | case LINEAR: 66 | return linear_activate(x); 67 | case LOGISTIC: 68 | return logistic_activate(x); 69 | case LOGGY: 70 | return loggy_activate(x); 71 | case RELU: 72 | return relu_activate(x); 73 | case ELU: 74 | return elu_activate(x); 75 | case RELIE: 76 | return relie_activate(x); 77 | case RAMP: 78 | return ramp_activate(x); 79 | case LEAKY: 80 | return leaky_activate(x); 81 | case TANH: 82 | return tanh_activate(x); 83 | case PLSE: 84 | return plse_activate(x); 85 | case STAIR: 86 | return stair_activate(x); 87 | case HARDTAN: 88 | return hardtan_activate(x); 89 | case LHTAN: 90 | return lhtan_activate(x); 91 | } 92 | return 0; 93 | } 94 | 95 | void activate_array(float *x, const int n, const ACTIVATION a) 96 | { 97 | int i; 98 | for(i = 0; i < n; ++i){ 99 | x[i] = activate(x[i], a); 100 | } 101 | } 102 | 103 | float gradient(float x, ACTIVATION a) 104 | { 105 | switch(a){ 106 | case LINEAR: 107 | return linear_gradient(x); 108 | case LOGISTIC: 109 | return logistic_gradient(x); 110 | case LOGGY: 111 | return loggy_gradient(x); 112 | case RELU: 113 | return relu_gradient(x); 114 | case ELU: 115 | return elu_gradient(x); 116 | case RELIE: 117 | return relie_gradient(x); 118 | case RAMP: 119 | return ramp_gradient(x); 120 | case LEAKY: 121 | return leaky_gradient(x); 122 | case TANH: 123 | return tanh_gradient(x); 124 | case PLSE: 125 | return plse_gradient(x); 126 | case STAIR: 127 | return stair_gradient(x); 128 | case HARDTAN: 129 | return hardtan_gradient(x); 130 | case LHTAN: 131 | return lhtan_gradient(x); 132 | } 133 | return 0; 134 | } 135 | 136 | void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta) 137 | { 138 | int i; 139 | for(i = 0; i < n; ++i){ 140 | delta[i] *= gradient(x[i], a); 141 | } 142 | } 143 | 144 | -------------------------------------------------------------------------------- /extractedDNA/AllelesClassifier/train.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | using namespace std; 20 | using namespace boost; 21 | using namespace boost::serialization; 22 | using namespace boost::archive; 23 | using namespace boost::program_options; 24 | namespace ublas = boost::numeric::ublas; 25 | 26 | map > > loadList(string file); 27 | 28 | int main(int argc,char ** argv) 29 | { 30 | options_description desc; 31 | string inputfile; 32 | string outputfile; 33 | desc.add_options() 34 | ("help,h","print current usage") 35 | ("input,i",value(&inputfile),"training samples list") 36 | ("output,o",value(&outputfile),"model file"); 37 | variables_map vm; 38 | store(parse_command_line(argc,argv,desc),vm); 39 | notify(vm); 40 | 41 | if(1 == argc || vm.count("help")) { 42 | cout< > > list = loadList(inputfile); 57 | if(0 == list.size()) { 58 | cout<<"empty list"<second[0].size(); 62 | #ifndef NDEBUG 63 | cout<<"number of alleles: "< > probabilities; 66 | for(auto & _class : list) { 67 | //accumulate conditional probability of current class 68 | probabilities[_class.first] = ublas::zero_matrix(4,dim); 69 | for(auto & sample : _class.second) { 70 | for(int i = 0 ; i < dim ; i++) 71 | switch(sample[i]) { 72 | case 'A':case 'a': probabilities[_class.first](0,i) += 1; break; 73 | case 'T':case 't': probabilities[_class.first](1,i) += 1; break; 74 | case 'C':case 'c': probabilities[_class.first](2,i) += 1; break; 75 | case 'G':case 'g': probabilities[_class.first](3,i) += 1; break; 76 | default:break; 77 | } 78 | } 79 | probabilities[_class.first] /= _class.second.size(); 80 | } 81 | map priors; 82 | float sum = 0; 83 | for(auto & _class : list) { 84 | priors[_class.first] = _class.second.size(); 85 | sum += _class.second.size(); 86 | } 87 | for(auto & _class : priors) 88 | _class.second /= sum; 89 | 90 | text_oarchive oa(out); 91 | oa << probabilities << priors; 92 | 93 | return EXIT_SUCCESS; 94 | } 95 | 96 | map > > loadList(string file) 97 | { 98 | std::ifstream in(file); 99 | if(false == in.is_open()) throw runtime_error("can't open the training list file"); 100 | char_separator sep(" \t"); 101 | typedef boost::tokenizer > tokenizer; 102 | map > > retVal; 103 | while(false == in.eof()) { 104 | string line; 105 | getline(in,line); 106 | trim(line); 107 | if("" == line) continue; 108 | tokenizer tokens(line,sep); 109 | vector alleles; 110 | int c; 111 | for(tokenizer::iterator tok_iter = tokens.begin() ; tok_iter != tokens.end() ; tok_iter++) { 112 | auto next = tok_iter; 113 | next++; 114 | if(next != tokens.end()) 115 | alleles.push_back(lexical_cast(*tok_iter)); 116 | else 117 | c = lexical_cast(*tok_iter); 118 | } 119 | #ifndef NDEBUG 120 | for(auto & a : alleles) { 121 | if( 122 | a != 'a' && a != 'A' && a != 't' && a != 'T' && 123 | a != 'c' && a != 'C' && a != 'g' && a != 'G' 124 | ) throw logic_error("the value of alleles can only within ATCG"); 125 | } 126 | #endif 127 | retVal[c].push_back(alleles); 128 | } 129 | return retVal; 130 | } 131 | 132 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/tree.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "tree.h" 4 | #include "utils.h" 5 | #include "data.h" 6 | 7 | void change_leaves(tree *t, char *leaf_list) 8 | { 9 | list *llist = get_paths(leaf_list); 10 | char **leaves = (char **)list_to_array(llist); 11 | int n = llist->size; 12 | int i,j; 13 | int found = 0; 14 | for(i = 0; i < t->n; ++i){ 15 | t->leaf[i] = 0; 16 | for(j = 0; j < n; ++j){ 17 | if (0==strcmp(t->name[i], leaves[j])){ 18 | t->leaf[i] = 1; 19 | ++found; 20 | break; 21 | } 22 | } 23 | } 24 | fprintf(stderr, "Found %d leaves.\n", found); 25 | } 26 | 27 | float get_hierarchy_probability(float *x, tree *hier, int c, int stride) 28 | { 29 | float p = 1; 30 | while(c >= 0){ 31 | p = p * x[c*stride]; 32 | c = hier->parent[c]; 33 | } 34 | return p; 35 | } 36 | 37 | void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride) 38 | { 39 | int j; 40 | for(j = 0; j < n; ++j){ 41 | int parent = hier->parent[j]; 42 | if(parent >= 0){ 43 | predictions[j*stride] *= predictions[parent*stride]; 44 | } 45 | } 46 | if(only_leaves){ 47 | for(j = 0; j < n; ++j){ 48 | if(!hier->leaf[j]) predictions[j*stride] = 0; 49 | } 50 | } 51 | } 52 | 53 | int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride) 54 | { 55 | float p = 1; 56 | int group = 0; 57 | int i; 58 | while(1){ 59 | float max = 0; 60 | int max_i = 0; 61 | 62 | for(i = 0; i < hier->group_size[group]; ++i){ 63 | int index = i + hier->group_offset[group]; 64 | float val = predictions[(i + hier->group_offset[group])*stride]; 65 | if(val > max){ 66 | max_i = index; 67 | max = val; 68 | } 69 | } 70 | if(p*max > thresh){ 71 | p = p*max; 72 | group = hier->child[max_i]; 73 | if(hier->child[max_i] < 0) return max_i; 74 | } else if (group == 0){ 75 | return max_i; 76 | } else { 77 | return hier->parent[hier->group_offset[group]]; 78 | } 79 | } 80 | return 0; 81 | } 82 | 83 | tree *read_tree(char *filename) 84 | { 85 | tree t = {0}; 86 | FILE *fp = fopen(filename, "r"); 87 | 88 | char *line; 89 | int last_parent = -1; 90 | int group_size = 0; 91 | int groups = 0; 92 | int n = 0; 93 | while((line=fgetl(fp)) != 0){ 94 | char *id = calloc(256, sizeof(char)); 95 | int parent = -1; 96 | sscanf(line, "%s %d", id, &parent); 97 | t.parent = realloc(t.parent, (n+1)*sizeof(int)); 98 | t.parent[n] = parent; 99 | 100 | t.child = realloc(t.child, (n+1)*sizeof(int)); 101 | t.child[n] = -1; 102 | 103 | t.name = realloc(t.name, (n+1)*sizeof(char *)); 104 | t.name[n] = id; 105 | if(parent != last_parent){ 106 | ++groups; 107 | t.group_offset = realloc(t.group_offset, groups * sizeof(int)); 108 | t.group_offset[groups - 1] = n - group_size; 109 | t.group_size = realloc(t.group_size, groups * sizeof(int)); 110 | t.group_size[groups - 1] = group_size; 111 | group_size = 0; 112 | last_parent = parent; 113 | } 114 | t.group = realloc(t.group, (n+1)*sizeof(int)); 115 | t.group[n] = groups; 116 | if (parent >= 0) { 117 | t.child[parent] = groups; 118 | } 119 | ++n; 120 | ++group_size; 121 | } 122 | ++groups; 123 | t.group_offset = realloc(t.group_offset, groups * sizeof(int)); 124 | t.group_offset[groups - 1] = n - group_size; 125 | t.group_size = realloc(t.group_size, groups * sizeof(int)); 126 | t.group_size[groups - 1] = group_size; 127 | t.n = n; 128 | t.groups = groups; 129 | t.leaf = calloc(n, sizeof(int)); 130 | int i; 131 | for(i = 0; i < n; ++i) t.leaf[i] = 1; 132 | for(i = 0; i < n; ++i) if(t.parent[i] >= 0) t.leaf[t.parent[i]] = 0; 133 | 134 | fclose(fp); 135 | tree *tree_ptr = calloc(1, sizeof(tree)); 136 | *tree_ptr = t; 137 | //error(0); 138 | return tree_ptr; 139 | } 140 | -------------------------------------------------------------------------------- /rawDNA/LSTM/src/train_LSTM.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define NDEBUG 12 | #define WITH_CUDA 13 | #define TRAINSIZE 350418 14 | #define BATCHSIZE 80 15 | #define SEQLENGTH 25 16 | #define CLASSNUM 10 17 | 18 | using namespace std; 19 | using namespace boost::filesystem; 20 | using namespace caffe2; 21 | using namespace cvplot; 22 | 23 | void setupTrainNet(NetDef & init, NetDef & predict); 24 | void setupSaveNet(NetDef & init, NetDef & save); 25 | 26 | unique_ptr predict_net; 27 | unique_ptr save_net; 28 | 29 | 30 | void atexit_handler() 31 | { 32 | cout<<"saving params"<Run(); 35 | } 36 | 37 | int main(int argc,char ** argv) 38 | { 39 | NetDef init,predict,save; 40 | setupTrainNet(init,predict); 41 | setupSaveNet(init,save); 42 | #ifdef WITH_CUDA 43 | auto device = CUDA; 44 | #else 45 | auto device = CPU; 46 | #endif 47 | init.mutable_device_option()->set_device_type(device); 48 | predict.mutable_device_option()->set_device_type(device); 49 | save.mutable_device_option()->set_device_type(device); 50 | Workspace workspace(nullptr); 51 | workspace.RunNetOnce(init); 52 | predict_net = CreateNet(predict,&workspace); 53 | save_net = CreateNet(save,&workspace); 54 | atexit(atexit_handler); 55 | #ifndef NDEBUG 56 | //show loss degradation 57 | cvplot::window("loss revolution"); 58 | cvplot::move("loss",300,300); 59 | cvplot::resize("loss",500,300); 60 | cvplot::figure("loss").series("train").color(cvplot::Purple); 61 | #endif 62 | for(int i = 0 ; ; i++) { 63 | predict_net->Run(); 64 | cout<<"iter:"<Run(); 69 | } 70 | } 71 | return EXIT_SUCCESS; 72 | } 73 | 74 | void setupTrainNet(NetDef & init, NetDef & predict) 75 | { 76 | ModelUtil network(init,predict); 77 | network.init.AddCreateDbOp("db","lmdb","./dataset"); 78 | network.predict.AddInput("db"); 79 | //data in format of batch_size x seq_length x 4 80 | network.AddTensorProtosDbInputOp("db","data","label",BATCHSIZE); 81 | //transpose into format seq_length x batch_size x 4 82 | network.AddTransposeOp("data","data_transposed",{1,0,2}); 83 | 84 | network.init.AddConstantIntFillOp({BATCHSIZE},SEQLENGTH,"LSTM1/seq_lengths"); 85 | network.AddLSTMOps("data_transposed","LSTM1","seq_lengths","hidden_init","cell_init","hidden_state","cell_state",4,100,false); 86 | network.AddCopyOp("LSTM1/hidden_state","LSTM1/hidden_init"); 87 | network.AddCopyOp("LSTM1/cell_state","LSTM1/cell_init"); 88 | 89 | network.init.AddConstantIntFillOp({BATCHSIZE},SEQLENGTH,"LSTM2/seq_lengths"); 90 | network.AddLSTMOps("LSTM1/hidden_t_all","LSTM2","seq_lengths","hidden_init","cell_init","hidden_state","cell_state",100,100,false); 91 | network.AddCopyOp("LSTM2/hidden_state","LSTM2/hidden_init"); 92 | network.AddCopyOp("LSTM2/cell_state","LSTM2/cell_init"); 93 | 94 | network.AddFcOps("LSTM2/hidden_state","fc1",100,50); 95 | network.AddFcOps("fc1","fc2",50,CLASSNUM); 96 | network.AddSoftmaxWithLossOp({"fc2","label"},{"softmax","loss"}); 97 | 98 | network.AddConstantFillWithOp(1.0, "loss", "loss_grad"); 99 | network.predict.AddGradientOps(); 100 | network.AddIterOps(); 101 | #ifndef NDEBUG 102 | network.predict.AddTimePlotOp("loss","iter","loss","train",10); 103 | #endif 104 | network.AddLearningRateOp("iter", "lr", -0.01,0.9,100*round(static_cast(TRAINSIZE)/BATCHSIZE)); 105 | string optimizer = "adam"; 106 | network.AddOptimizerOps(optimizer); 107 | //输出网络结构 108 | network.predict.WriteText("models/lstm_train_predict.pbtxt"); 109 | network.init.WriteText("models/lstm_train_init.pbtxt"); 110 | } 111 | 112 | void setupSaveNet(NetDef & init, NetDef & save) 113 | { 114 | NetUtil InitNet(init); 115 | NetUtil SaveNet(save); 116 | vector params; 117 | for(auto & op : InitNet.net.op()) { 118 | if(op.type() == "CreateDB") continue; 119 | for(auto & output : op.output()) 120 | params.push_back(output); 121 | } 122 | SaveNet.AddSaveOp(params,"lmdb","LSTM_params"); 123 | //output network 124 | SaveNet.WriteText("models/lstm_train_save.pbtxt"); 125 | } 126 | 127 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/maxpool_layer.c: -------------------------------------------------------------------------------- 1 | #include "maxpool_layer.h" 2 | #include "cuda.h" 3 | #include 4 | 5 | image get_maxpool_image(maxpool_layer l) 6 | { 7 | int h = l.out_h; 8 | int w = l.out_w; 9 | int c = l.c; 10 | return float_to_image(w,h,c,l.output); 11 | } 12 | 13 | image get_maxpool_delta(maxpool_layer l) 14 | { 15 | int h = l.out_h; 16 | int w = l.out_w; 17 | int c = l.c; 18 | return float_to_image(w,h,c,l.delta); 19 | } 20 | 21 | maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) 22 | { 23 | maxpool_layer l = {0}; 24 | l.type = MAXPOOL; 25 | l.batch = batch; 26 | l.h = h; 27 | l.w = w; 28 | l.c = c; 29 | l.pad = padding; 30 | l.out_w = (w + 2*padding)/stride; 31 | l.out_h = (h + 2*padding)/stride; 32 | l.out_c = c; 33 | l.outputs = l.out_h * l.out_w * l.out_c; 34 | l.inputs = h*w*c; 35 | l.size = size; 36 | l.stride = stride; 37 | int output_size = l.out_h * l.out_w * l.out_c * batch; 38 | l.indexes = calloc(output_size, sizeof(int)); 39 | l.output = calloc(output_size, sizeof(float)); 40 | l.delta = calloc(output_size, sizeof(float)); 41 | l.forward = forward_maxpool_layer; 42 | l.backward = backward_maxpool_layer; 43 | #ifdef GPU 44 | l.forward_gpu = forward_maxpool_layer_gpu; 45 | l.backward_gpu = backward_maxpool_layer_gpu; 46 | l.indexes_gpu = cuda_make_int_array(0, output_size); 47 | l.output_gpu = cuda_make_array(l.output, output_size); 48 | l.delta_gpu = cuda_make_array(l.delta, output_size); 49 | #endif 50 | fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); 51 | return l; 52 | } 53 | 54 | void resize_maxpool_layer(maxpool_layer *l, int w, int h) 55 | { 56 | l->h = h; 57 | l->w = w; 58 | l->inputs = h*w*l->c; 59 | 60 | l->out_w = (w + 2*l->pad)/l->stride; 61 | l->out_h = (h + 2*l->pad)/l->stride; 62 | l->outputs = l->out_w * l->out_h * l->c; 63 | int output_size = l->outputs * l->batch; 64 | 65 | l->indexes = realloc(l->indexes, output_size * sizeof(int)); 66 | l->output = realloc(l->output, output_size * sizeof(float)); 67 | l->delta = realloc(l->delta, output_size * sizeof(float)); 68 | 69 | #ifdef GPU 70 | cuda_free((float *)l->indexes_gpu); 71 | cuda_free(l->output_gpu); 72 | cuda_free(l->delta_gpu); 73 | l->indexes_gpu = cuda_make_int_array(0, output_size); 74 | l->output_gpu = cuda_make_array(l->output, output_size); 75 | l->delta_gpu = cuda_make_array(l->delta, output_size); 76 | #endif 77 | } 78 | 79 | void forward_maxpool_layer(const maxpool_layer l, network net) 80 | { 81 | int b,i,j,k,m,n; 82 | int w_offset = -l.pad; 83 | int h_offset = -l.pad; 84 | 85 | int h = l.out_h; 86 | int w = l.out_w; 87 | int c = l.c; 88 | 89 | for(b = 0; b < l.batch; ++b){ 90 | for(k = 0; k < c; ++k){ 91 | for(i = 0; i < h; ++i){ 92 | for(j = 0; j < w; ++j){ 93 | int out_index = j + w*(i + h*(k + c*b)); 94 | float max = -FLT_MAX; 95 | int max_i = -1; 96 | for(n = 0; n < l.size; ++n){ 97 | for(m = 0; m < l.size; ++m){ 98 | int cur_h = h_offset + i*l.stride + n; 99 | int cur_w = w_offset + j*l.stride + m; 100 | int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); 101 | int valid = (cur_h >= 0 && cur_h < l.h && 102 | cur_w >= 0 && cur_w < l.w); 103 | float val = (valid != 0) ? net.input[index] : -FLT_MAX; 104 | max_i = (val > max) ? index : max_i; 105 | max = (val > max) ? val : max; 106 | } 107 | } 108 | l.output[out_index] = max; 109 | l.indexes[out_index] = max_i; 110 | } 111 | } 112 | } 113 | } 114 | } 115 | 116 | void backward_maxpool_layer(const maxpool_layer l, network net) 117 | { 118 | int i; 119 | int h = l.out_h; 120 | int w = l.out_w; 121 | int c = l.c; 122 | for(i = 0; i < h*w*c*l.batch; ++i){ 123 | int index = l.indexes[i]; 124 | net.delta[index] += l.delta[i]; 125 | } 126 | } 127 | 128 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/route_layer.c: -------------------------------------------------------------------------------- 1 | #include "route_layer.h" 2 | #include "cuda.h" 3 | #include "blas.h" 4 | 5 | #include 6 | 7 | route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) 8 | { 9 | fprintf(stderr,"route "); 10 | route_layer l = {0}; 11 | l.type = ROUTE; 12 | l.batch = batch; 13 | l.n = n; 14 | l.input_layers = input_layers; 15 | l.input_sizes = input_sizes; 16 | int i; 17 | int outputs = 0; 18 | for(i = 0; i < n; ++i){ 19 | fprintf(stderr," %d", input_layers[i]); 20 | outputs += input_sizes[i]; 21 | } 22 | fprintf(stderr, "\n"); 23 | l.outputs = outputs; 24 | l.inputs = outputs; 25 | l.delta = calloc(outputs*batch, sizeof(float)); 26 | l.output = calloc(outputs*batch, sizeof(float));; 27 | 28 | l.forward = forward_route_layer; 29 | l.backward = backward_route_layer; 30 | #ifdef GPU 31 | l.forward_gpu = forward_route_layer_gpu; 32 | l.backward_gpu = backward_route_layer_gpu; 33 | 34 | l.delta_gpu = cuda_make_array(l.delta, outputs*batch); 35 | l.output_gpu = cuda_make_array(l.output, outputs*batch); 36 | #endif 37 | return l; 38 | } 39 | 40 | void resize_route_layer(route_layer *l, network *net) 41 | { 42 | int i; 43 | layer first = net->layers[l->input_layers[0]]; 44 | l->out_w = first.out_w; 45 | l->out_h = first.out_h; 46 | l->out_c = first.out_c; 47 | l->outputs = first.outputs; 48 | l->input_sizes[0] = first.outputs; 49 | for(i = 1; i < l->n; ++i){ 50 | int index = l->input_layers[i]; 51 | layer next = net->layers[index]; 52 | l->outputs += next.outputs; 53 | l->input_sizes[i] = next.outputs; 54 | if(next.out_w == first.out_w && next.out_h == first.out_h){ 55 | l->out_c += next.out_c; 56 | }else{ 57 | printf("%d %d, %d %d\n", next.out_w, next.out_h, first.out_w, first.out_h); 58 | l->out_h = l->out_w = l->out_c = 0; 59 | } 60 | } 61 | l->inputs = l->outputs; 62 | l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float)); 63 | l->output = realloc(l->output, l->outputs*l->batch*sizeof(float)); 64 | 65 | #ifdef GPU 66 | cuda_free(l->output_gpu); 67 | cuda_free(l->delta_gpu); 68 | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); 69 | l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); 70 | #endif 71 | 72 | } 73 | 74 | void forward_route_layer(const route_layer l, network net) 75 | { 76 | int i, j; 77 | int offset = 0; 78 | for(i = 0; i < l.n; ++i){ 79 | int index = l.input_layers[i]; 80 | float *input = net.layers[index].output; 81 | int input_size = l.input_sizes[i]; 82 | for(j = 0; j < l.batch; ++j){ 83 | copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1); 84 | } 85 | offset += input_size; 86 | } 87 | } 88 | 89 | void backward_route_layer(const route_layer l, network net) 90 | { 91 | int i, j; 92 | int offset = 0; 93 | for(i = 0; i < l.n; ++i){ 94 | int index = l.input_layers[i]; 95 | float *delta = net.layers[index].delta; 96 | int input_size = l.input_sizes[i]; 97 | for(j = 0; j < l.batch; ++j){ 98 | axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1); 99 | } 100 | offset += input_size; 101 | } 102 | } 103 | 104 | #ifdef GPU 105 | void forward_route_layer_gpu(const route_layer l, network net) 106 | { 107 | int i, j; 108 | int offset = 0; 109 | for(i = 0; i < l.n; ++i){ 110 | int index = l.input_layers[i]; 111 | float *input = net.layers[index].output_gpu; 112 | int input_size = l.input_sizes[i]; 113 | for(j = 0; j < l.batch; ++j){ 114 | copy_gpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1); 115 | } 116 | offset += input_size; 117 | } 118 | } 119 | 120 | void backward_route_layer_gpu(const route_layer l, network net) 121 | { 122 | int i, j; 123 | int offset = 0; 124 | for(i = 0; i < l.n; ++i){ 125 | int index = l.input_layers[i]; 126 | float *delta = net.layers[index].delta_gpu; 127 | int input_size = l.input_sizes[i]; 128 | for(j = 0; j < l.batch; ++j){ 129 | axpy_gpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1); 130 | } 131 | offset += input_size; 132 | } 133 | } 134 | #endif 135 | -------------------------------------------------------------------------------- /visual/facial/src/train_facial_classifier.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define NDEBUG 12 | #define WITH_CUDA 13 | #define TRAINSIZE 32000 14 | #define BATCHSIZE 80 15 | #define CLASSNUM 4 16 | 17 | using namespace std; 18 | using namespace caffe2; 19 | 20 | void setupTrainNet(NetDef & init, NetDef & predict); 21 | void setupSaveNet(NetDef & init, NetDef & save); 22 | 23 | unique_ptr predict_net; 24 | unique_ptr save_net; 25 | 26 | void atexit_handler() 27 | { 28 | cout<<"saving params"<Run(); 31 | } 32 | 33 | int main(int argc,char ** argv) 34 | { 35 | NetDef init,predict,save; 36 | setupTrainNet(init,predict); 37 | setupSaveNet(init,save); 38 | auto device = CUDA; 39 | init.mutable_device_option()->set_device_type(device); 40 | predict.mutable_device_option()->set_device_type(device); 41 | save.mutable_device_option()->set_device_type(device); 42 | Workspace workspace(nullptr); 43 | workspace.RunNetOnce(init); 44 | predict_net = CreateNet(predict,&workspace); 45 | save_net = CreateNet(save,&workspace); 46 | atexit(atexit_handler); 47 | for(int i = 0 ; ; i++) { 48 | predict_net->Run(); 49 | cout<<"iter:"<Run(); 54 | } 55 | } 56 | return EXIT_SUCCESS; 57 | } 58 | 59 | void setupTrainNet(NetDef & init, NetDef & predict) 60 | { 61 | ModelUtil MobileNet(init,predict); 62 | MobileNet.init.AddCreateDbOp("db","lmdb","./dataset"); 63 | MobileNet.predict.AddInput("db"); 64 | MobileNet.addTensorProtosDbInputOp("db","data","label",BATCHSIZE); 65 | MobileNet.predict.SetName("MobileNet"); 66 | auto input = "data"; 67 | auto n = 0; 68 | auto alpha = 1.0; 69 | bool train = true; 70 | std::string layer = input; 71 | layer = MobileNet.AddFirst("1", layer, 32, 2, alpha, train)->output(0); 72 | layer = MobileNet.AddFilter(tos2(n++), layer, 32, 64, 1, alpha, train)->output(0); 73 | layer = MobileNet.AddFilter(tos2(n++), layer, 64, 128, 2, alpha, train)->output(0); 74 | layer = MobileNet.AddFilter(tos2(n++), layer, 128, 128, 1, alpha, train)->output(0); 75 | layer = MobileNet.AddFilter(tos2(n++), layer, 128, 256, 2, alpha, train)->output(0); 76 | layer = MobileNet.AddFilter(tos2(n++), layer, 256, 256, 1, alpha, train)->output(0); 77 | layer = MobileNet.AddFilter(tos2(n++), layer, 256, 512, 2, alpha, train)->output(0); 78 | for (auto i = 0; i < 5; i++) { // 6 - 10 79 | layer = MobileNet.AddFilter(tos2(n++), layer, 512, 512, 1, alpha, train)->output(0); 80 | } 81 | layer = MobileNet.AddFilter(tos2(n++), layer, 512, 1024, 2, alpha, train)->output(0); 82 | layer = MobileNet.AddFilter(tos2(n++), layer, 1024, 1024, 1, alpha, train)->output(0); 83 | MobileNet.AddAveragePoolOp(layer, "final_avg", 1, 0, 5); 84 | MobileNet.AddFcOps("final_avg", "fc", 1024, 128, train); 85 | MobileNet.AddFcOps("fc","logits",128,CLASSNUM,train); 86 | MobileNet.AddSoftmaxOp("logit","softmax"); 87 | MobileNet.AddCrossEntropyOp({"softmax","label"},"loss"); 88 | MobileNet.AddConstantFillWithOp(1.0, "loss", "loss_grad"); 89 | MobileNet.predict.AddGradientOps(); 90 | MobileNet.AddIterOps(); 91 | #ifndef NDEBUG 92 | MobileNet.predict.AddTimePlotOp("loss","iter","train",10); 93 | #endif 94 | MobileNet.AddLearningRateOp("iter", "lr", -0.01,0.9,100*round(static_cast(TRAINSIZE)/BATCHSIZE)); 95 | string optimizer = "adam"; 96 | MobileNet.AddOptimizerOps(optimizer); 97 | //输出网络结构 98 | MobileNet.init.WriteText("models/MobileNet_train_init.pbtxt"); 99 | MobileNet.predict.WriteText("models/MobileNet_train_predict.pbtxt"); 100 | } 101 | 102 | void setupSaveNet(NetDef & init, NetDef & save) 103 | { 104 | NetUtil InitNet(init); 105 | NetUtil SaveNet(save); 106 | vector params; 107 | for(auto & op : InitNet.net.op()) { 108 | if(op.type() == "CreateDB") continue; 109 | for(auto & output : op.output()) 110 | params.push_back(output); 111 | } 112 | SaveNet.AddSaveOp(params,"lmdb","LSTM_params"); 113 | //output network 114 | SaveNet.WriteText("models/lstm_train_save.pbtxt"); 115 | } -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/layer.c: -------------------------------------------------------------------------------- 1 | #include "layer.h" 2 | #include "cuda.h" 3 | 4 | #include 5 | 6 | void free_layer(layer l) 7 | { 8 | if(l.type == DROPOUT){ 9 | if(l.rand) free(l.rand); 10 | #ifdef GPU 11 | if(l.rand_gpu) cuda_free(l.rand_gpu); 12 | #endif 13 | return; 14 | } 15 | if(l.cweights) free(l.cweights); 16 | if(l.indexes) free(l.indexes); 17 | if(l.input_layers) free(l.input_layers); 18 | if(l.input_sizes) free(l.input_sizes); 19 | if(l.map) free(l.map); 20 | if(l.rand) free(l.rand); 21 | if(l.cost) free(l.cost); 22 | if(l.state) free(l.state); 23 | if(l.prev_state) free(l.prev_state); 24 | if(l.forgot_state) free(l.forgot_state); 25 | if(l.forgot_delta) free(l.forgot_delta); 26 | if(l.state_delta) free(l.state_delta); 27 | if(l.concat) free(l.concat); 28 | if(l.concat_delta) free(l.concat_delta); 29 | if(l.binary_weights) free(l.binary_weights); 30 | if(l.biases) free(l.biases); 31 | if(l.bias_updates) free(l.bias_updates); 32 | if(l.scales) free(l.scales); 33 | if(l.scale_updates) free(l.scale_updates); 34 | if(l.weights) free(l.weights); 35 | if(l.weight_updates) free(l.weight_updates); 36 | if(l.delta) free(l.delta); 37 | if(l.output) free(l.output); 38 | if(l.squared) free(l.squared); 39 | if(l.norms) free(l.norms); 40 | if(l.spatial_mean) free(l.spatial_mean); 41 | if(l.mean) free(l.mean); 42 | if(l.variance) free(l.variance); 43 | if(l.mean_delta) free(l.mean_delta); 44 | if(l.variance_delta) free(l.variance_delta); 45 | if(l.rolling_mean) free(l.rolling_mean); 46 | if(l.rolling_variance) free(l.rolling_variance); 47 | if(l.x) free(l.x); 48 | if(l.x_norm) free(l.x_norm); 49 | if(l.m) free(l.m); 50 | if(l.v) free(l.v); 51 | if(l.z_cpu) free(l.z_cpu); 52 | if(l.r_cpu) free(l.r_cpu); 53 | if(l.h_cpu) free(l.h_cpu); 54 | if(l.binary_input) free(l.binary_input); 55 | 56 | #ifdef GPU 57 | if(l.indexes_gpu) cuda_free((float *)l.indexes_gpu); 58 | 59 | if(l.z_gpu) cuda_free(l.z_gpu); 60 | if(l.r_gpu) cuda_free(l.r_gpu); 61 | if(l.h_gpu) cuda_free(l.h_gpu); 62 | if(l.m_gpu) cuda_free(l.m_gpu); 63 | if(l.v_gpu) cuda_free(l.v_gpu); 64 | if(l.prev_state_gpu) cuda_free(l.prev_state_gpu); 65 | if(l.forgot_state_gpu) cuda_free(l.forgot_state_gpu); 66 | if(l.forgot_delta_gpu) cuda_free(l.forgot_delta_gpu); 67 | if(l.state_gpu) cuda_free(l.state_gpu); 68 | if(l.state_delta_gpu) cuda_free(l.state_delta_gpu); 69 | if(l.gate_gpu) cuda_free(l.gate_gpu); 70 | if(l.gate_delta_gpu) cuda_free(l.gate_delta_gpu); 71 | if(l.save_gpu) cuda_free(l.save_gpu); 72 | if(l.save_delta_gpu) cuda_free(l.save_delta_gpu); 73 | if(l.concat_gpu) cuda_free(l.concat_gpu); 74 | if(l.concat_delta_gpu) cuda_free(l.concat_delta_gpu); 75 | if(l.binary_input_gpu) cuda_free(l.binary_input_gpu); 76 | if(l.binary_weights_gpu) cuda_free(l.binary_weights_gpu); 77 | if(l.mean_gpu) cuda_free(l.mean_gpu); 78 | if(l.variance_gpu) cuda_free(l.variance_gpu); 79 | if(l.rolling_mean_gpu) cuda_free(l.rolling_mean_gpu); 80 | if(l.rolling_variance_gpu) cuda_free(l.rolling_variance_gpu); 81 | if(l.variance_delta_gpu) cuda_free(l.variance_delta_gpu); 82 | if(l.mean_delta_gpu) cuda_free(l.mean_delta_gpu); 83 | if(l.x_gpu) cuda_free(l.x_gpu); 84 | if(l.x_norm_gpu) cuda_free(l.x_norm_gpu); 85 | if(l.weights_gpu) cuda_free(l.weights_gpu); 86 | if(l.weight_updates_gpu) cuda_free(l.weight_updates_gpu); 87 | if(l.biases_gpu) cuda_free(l.biases_gpu); 88 | if(l.bias_updates_gpu) cuda_free(l.bias_updates_gpu); 89 | if(l.scales_gpu) cuda_free(l.scales_gpu); 90 | if(l.scale_updates_gpu) cuda_free(l.scale_updates_gpu); 91 | if(l.output_gpu) cuda_free(l.output_gpu); 92 | if(l.delta_gpu) cuda_free(l.delta_gpu); 93 | if(l.rand_gpu) cuda_free(l.rand_gpu); 94 | if(l.squared_gpu) cuda_free(l.squared_gpu); 95 | if(l.norms_gpu) cuda_free(l.norms_gpu); 96 | #endif 97 | } 98 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/cuda.c: -------------------------------------------------------------------------------- 1 | int gpu_index = 0; 2 | 3 | #ifdef GPU 4 | 5 | #include "cuda.h" 6 | #include "utils.h" 7 | #include "blas.h" 8 | #include 9 | #include 10 | #include 11 | 12 | void cuda_set_device(int n) 13 | { 14 | gpu_index = n; 15 | cudaError_t status = cudaSetDevice(n); 16 | check_error(status); 17 | } 18 | 19 | int cuda_get_device() 20 | { 21 | int n = 0; 22 | cudaError_t status = cudaGetDevice(&n); 23 | check_error(status); 24 | return n; 25 | } 26 | 27 | void check_error(cudaError_t status) 28 | { 29 | //cudaDeviceSynchronize(); 30 | cudaError_t status2 = cudaGetLastError(); 31 | if (status != cudaSuccess) 32 | { 33 | const char *s = cudaGetErrorString(status); 34 | char buffer[256]; 35 | printf("CUDA Error: %s\n", s); 36 | assert(0); 37 | snprintf(buffer, 256, "CUDA Error: %s", s); 38 | error(buffer); 39 | } 40 | if (status2 != cudaSuccess) 41 | { 42 | const char *s = cudaGetErrorString(status); 43 | char buffer[256]; 44 | printf("CUDA Error Prev: %s\n", s); 45 | assert(0); 46 | snprintf(buffer, 256, "CUDA Error Prev: %s", s); 47 | error(buffer); 48 | } 49 | } 50 | 51 | dim3 cuda_gridsize(size_t n){ 52 | size_t k = (n-1) / BLOCK + 1; 53 | size_t x = k; 54 | size_t y = 1; 55 | if(x > 65535){ 56 | x = ceil(sqrt(k)); 57 | y = (n-1)/(x*BLOCK) + 1; 58 | } 59 | dim3 d = {x, y, 1}; 60 | //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); 61 | return d; 62 | } 63 | 64 | #ifdef CUDNN 65 | cudnnHandle_t cudnn_handle() 66 | { 67 | static int init[16] = {0}; 68 | static cudnnHandle_t handle[16]; 69 | int i = cuda_get_device(); 70 | if(!init[i]) { 71 | cudnnCreate(&handle[i]); 72 | init[i] = 1; 73 | } 74 | return handle[i]; 75 | } 76 | #endif 77 | 78 | cublasHandle_t blas_handle() 79 | { 80 | static int init[16] = {0}; 81 | static cublasHandle_t handle[16]; 82 | int i = cuda_get_device(); 83 | if(!init[i]) { 84 | cublasCreate(&handle[i]); 85 | init[i] = 1; 86 | } 87 | return handle[i]; 88 | } 89 | 90 | float *cuda_make_array(float *x, size_t n) 91 | { 92 | float *x_gpu; 93 | size_t size = sizeof(float)*n; 94 | cudaError_t status = cudaMalloc((void **)&x_gpu, size); 95 | check_error(status); 96 | if(x){ 97 | status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); 98 | check_error(status); 99 | } else { 100 | fill_gpu(n, 0, x_gpu, 1); 101 | } 102 | if(!x_gpu) error("Cuda malloc failed\n"); 103 | return x_gpu; 104 | } 105 | 106 | void cuda_random(float *x_gpu, size_t n) 107 | { 108 | static curandGenerator_t gen[16]; 109 | static int init[16] = {0}; 110 | int i = cuda_get_device(); 111 | if(!init[i]){ 112 | curandCreateGenerator(&gen[i], CURAND_RNG_PSEUDO_DEFAULT); 113 | curandSetPseudoRandomGeneratorSeed(gen[i], time(0)); 114 | init[i] = 1; 115 | } 116 | curandGenerateUniform(gen[i], x_gpu, n); 117 | check_error(cudaPeekAtLastError()); 118 | } 119 | 120 | float cuda_compare(float *x_gpu, float *x, size_t n, char *s) 121 | { 122 | float *tmp = calloc(n, sizeof(float)); 123 | cuda_pull_array(x_gpu, tmp, n); 124 | //int i; 125 | //for(i = 0; i < n; ++i) printf("%f %f\n", tmp[i], x[i]); 126 | axpy_cpu(n, -1, x, 1, tmp, 1); 127 | float err = dot_cpu(n, tmp, 1, tmp, 1); 128 | printf("Error %s: %f\n", s, sqrt(err/n)); 129 | free(tmp); 130 | return err; 131 | } 132 | 133 | int *cuda_make_int_array(int *x, size_t n) 134 | { 135 | int *x_gpu; 136 | size_t size = sizeof(int)*n; 137 | cudaError_t status = cudaMalloc((void **)&x_gpu, size); 138 | check_error(status); 139 | if(x){ 140 | status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); 141 | check_error(status); 142 | } 143 | if(!x_gpu) error("Cuda malloc failed\n"); 144 | return x_gpu; 145 | } 146 | 147 | void cuda_free(float *x_gpu) 148 | { 149 | cudaError_t status = cudaFree(x_gpu); 150 | check_error(status); 151 | } 152 | 153 | void cuda_push_array(float *x_gpu, float *x, size_t n) 154 | { 155 | size_t size = sizeof(float)*n; 156 | cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); 157 | check_error(status); 158 | } 159 | 160 | void cuda_pull_array(float *x_gpu, float *x, size_t n) 161 | { 162 | size_t size = sizeof(float)*n; 163 | cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); 164 | check_error(status); 165 | } 166 | 167 | float cuda_mag_array(float *x_gpu, size_t n) 168 | { 169 | float *temp = calloc(n, sizeof(float)); 170 | cuda_pull_array(x_gpu, temp, n); 171 | float m = mag_array(temp, n); 172 | free(temp); 173 | return m; 174 | } 175 | #else 176 | void cuda_set_device(int n){} 177 | 178 | #endif 179 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/tag.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | 3 | void train_tag(char *cfgfile, char *weightfile, int clear) 4 | { 5 | srand(time(0)); 6 | float avg_loss = -1; 7 | char *base = basecfg(cfgfile); 8 | char *backup_directory = "/home/pjreddie/backup/"; 9 | printf("%s\n", base); 10 | network *net = load_network(cfgfile, weightfile, clear); 11 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); 12 | int imgs = 1024; 13 | list *plist = get_paths("/home/pjreddie/tag/train.list"); 14 | char **paths = (char **)list_to_array(plist); 15 | printf("%d\n", plist->size); 16 | int N = plist->size; 17 | clock_t time; 18 | pthread_t load_thread; 19 | data train; 20 | data buffer; 21 | 22 | load_args args = {0}; 23 | args.w = net->w; 24 | args.h = net->h; 25 | 26 | args.min = net->w; 27 | args.max = net->max_crop; 28 | args.size = net->w; 29 | 30 | args.paths = paths; 31 | args.classes = net->outputs; 32 | args.n = imgs; 33 | args.m = N; 34 | args.d = &buffer; 35 | args.type = TAG_DATA; 36 | 37 | args.angle = net->angle; 38 | args.exposure = net->exposure; 39 | args.saturation = net->saturation; 40 | args.hue = net->hue; 41 | 42 | fprintf(stderr, "%d classes\n", net->outputs); 43 | 44 | load_thread = load_data_in_thread(args); 45 | int epoch = (*net->seen)/N; 46 | while(get_current_batch(net) < net->max_batches || net->max_batches == 0){ 47 | time=clock(); 48 | pthread_join(load_thread, 0); 49 | train = buffer; 50 | 51 | load_thread = load_data_in_thread(args); 52 | printf("Loaded: %lf seconds\n", sec(clock()-time)); 53 | time=clock(); 54 | float loss = train_network(net, train); 55 | if(avg_loss == -1) avg_loss = loss; 56 | avg_loss = avg_loss*.9 + loss*.1; 57 | printf("%ld, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net->seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net->seen); 58 | free_data(train); 59 | if(*net->seen/N > epoch){ 60 | epoch = *net->seen/N; 61 | char buff[256]; 62 | sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch); 63 | save_weights(net, buff); 64 | } 65 | if(get_current_batch(net)%100 == 0){ 66 | char buff[256]; 67 | sprintf(buff, "%s/%s.backup",backup_directory,base); 68 | save_weights(net, buff); 69 | } 70 | } 71 | char buff[256]; 72 | sprintf(buff, "%s/%s.weights", backup_directory, base); 73 | save_weights(net, buff); 74 | 75 | pthread_join(load_thread, 0); 76 | free_data(buffer); 77 | free_network(net); 78 | free_ptrs((void**)paths, plist->size); 79 | free_list(plist); 80 | free(base); 81 | } 82 | 83 | void test_tag(char *cfgfile, char *weightfile, char *filename) 84 | { 85 | network *net = load_network(cfgfile, weightfile, 0); 86 | set_batch_network(net, 1); 87 | srand(2222222); 88 | int i = 0; 89 | char **names = get_labels("data/tags.txt"); 90 | clock_t time; 91 | int indexes[10]; 92 | char buff[256]; 93 | char *input = buff; 94 | int size = net->w; 95 | while(1){ 96 | if(filename){ 97 | strncpy(input, filename, 256); 98 | }else{ 99 | printf("Enter Image Path: "); 100 | fflush(stdout); 101 | input = fgets(input, 256, stdin); 102 | if(!input) return; 103 | strtok(input, "\n"); 104 | } 105 | image im = load_image_color(input, 0, 0); 106 | image r = resize_min(im, size); 107 | resize_network(net, r.w, r.h); 108 | printf("%d %d\n", r.w, r.h); 109 | 110 | float *X = r.data; 111 | time=clock(); 112 | float *predictions = network_predict(net, X); 113 | top_predictions(net, 10, indexes); 114 | printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time)); 115 | for(i = 0; i < 10; ++i){ 116 | int index = indexes[i]; 117 | printf("%.1f%%: %s\n", predictions[index]*100, names[index]); 118 | } 119 | if(r.data != im.data) free_image(r); 120 | free_image(im); 121 | if (filename) break; 122 | } 123 | } 124 | 125 | 126 | void run_tag(int argc, char **argv) 127 | { 128 | if(argc < 4){ 129 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 130 | return; 131 | } 132 | 133 | int clear = find_arg(argc, argv, "-clear"); 134 | char *cfg = argv[3]; 135 | char *weights = (argc > 4) ? argv[4] : 0; 136 | char *filename = (argc > 5) ? argv[5] : 0; 137 | if(0==strcmp(argv[2], "train")) train_tag(cfg, weights, clear); 138 | else if(0==strcmp(argv[2], "test")) test_tag(cfg, weights, filename); 139 | } 140 | 141 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/writing.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | 3 | void train_writing(char *cfgfile, char *weightfile) 4 | { 5 | char *backup_directory = "/home/pjreddie/backup/"; 6 | srand(time(0)); 7 | float avg_loss = -1; 8 | char *base = basecfg(cfgfile); 9 | printf("%s\n", base); 10 | network net = parse_network_cfg(cfgfile); 11 | if(weightfile){ 12 | load_weights(&net, weightfile); 13 | } 14 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); 15 | int imgs = net.batch*net.subdivisions; 16 | list *plist = get_paths("figures.list"); 17 | char **paths = (char **)list_to_array(plist); 18 | clock_t time; 19 | int N = plist->size; 20 | printf("N: %d\n", N); 21 | image out = get_network_image(net); 22 | 23 | data train, buffer; 24 | 25 | load_args args = {0}; 26 | args.w = net.w; 27 | args.h = net.h; 28 | args.out_w = out.w; 29 | args.out_h = out.h; 30 | args.paths = paths; 31 | args.n = imgs; 32 | args.m = N; 33 | args.d = &buffer; 34 | args.type = WRITING_DATA; 35 | 36 | pthread_t load_thread = load_data_in_thread(args); 37 | int epoch = (*net.seen)/N; 38 | while(get_current_batch(net) < net.max_batches || net.max_batches == 0){ 39 | time=clock(); 40 | pthread_join(load_thread, 0); 41 | train = buffer; 42 | load_thread = load_data_in_thread(args); 43 | printf("Loaded %lf seconds\n",sec(clock()-time)); 44 | 45 | time=clock(); 46 | float loss = train_network(net, train); 47 | 48 | /* 49 | image pred = float_to_image(64, 64, 1, out); 50 | print_image(pred); 51 | */ 52 | 53 | /* 54 | image im = float_to_image(256, 256, 3, train.X.vals[0]); 55 | image lab = float_to_image(64, 64, 1, train.y.vals[0]); 56 | image pred = float_to_image(64, 64, 1, out); 57 | show_image(im, "image"); 58 | show_image(lab, "label"); 59 | print_image(lab); 60 | show_image(pred, "pred"); 61 | cvWaitKey(0); 62 | */ 63 | 64 | if(avg_loss == -1) avg_loss = loss; 65 | avg_loss = avg_loss*.9 + loss*.1; 66 | printf("%ld, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen); 67 | free_data(train); 68 | if(get_current_batch(net)%100 == 0){ 69 | char buff[256]; 70 | sprintf(buff, "%s/%s_batch_%ld.weights", backup_directory, base, get_current_batch(net)); 71 | save_weights(net, buff); 72 | } 73 | if(*net.seen/N > epoch){ 74 | epoch = *net.seen/N; 75 | char buff[256]; 76 | sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch); 77 | save_weights(net, buff); 78 | } 79 | } 80 | } 81 | 82 | void test_writing(char *cfgfile, char *weightfile, char *filename) 83 | { 84 | network net = parse_network_cfg(cfgfile); 85 | if(weightfile){ 86 | load_weights(&net, weightfile); 87 | } 88 | set_batch_network(&net, 1); 89 | srand(2222222); 90 | clock_t time; 91 | char buff[256]; 92 | char *input = buff; 93 | while(1){ 94 | if(filename){ 95 | strncpy(input, filename, 256); 96 | }else{ 97 | printf("Enter Image Path: "); 98 | fflush(stdout); 99 | input = fgets(input, 256, stdin); 100 | if(!input) return; 101 | strtok(input, "\n"); 102 | } 103 | 104 | image im = load_image_color(input, 0, 0); 105 | resize_network(&net, im.w, im.h); 106 | printf("%d %d %d\n", im.h, im.w, im.c); 107 | float *X = im.data; 108 | time=clock(); 109 | network_predict(net, X); 110 | printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time)); 111 | image pred = get_network_image(net); 112 | 113 | image upsampled = resize_image(pred, im.w, im.h); 114 | image thresh = threshold_image(upsampled, .5); 115 | pred = thresh; 116 | 117 | show_image(pred, "prediction"); 118 | show_image(im, "orig"); 119 | #ifdef OPENCV 120 | cvWaitKey(0); 121 | cvDestroyAllWindows(); 122 | #endif 123 | 124 | free_image(upsampled); 125 | free_image(thresh); 126 | free_image(im); 127 | if (filename) break; 128 | } 129 | } 130 | 131 | void run_writing(int argc, char **argv) 132 | { 133 | if(argc < 4){ 134 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 135 | return; 136 | } 137 | 138 | char *cfg = argv[3]; 139 | char *weights = (argc > 4) ? argv[4] : 0; 140 | char *filename = (argc > 5) ? argv[5] : 0; 141 | if(0==strcmp(argv[2], "train")) train_writing(cfg, weights); 142 | else if(0==strcmp(argv[2], "test")) test_writing(cfg, weights, filename); 143 | } 144 | 145 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/python/darknet.py: -------------------------------------------------------------------------------- 1 | from ctypes import * 2 | import math 3 | import random 4 | 5 | def sample(probs): 6 | s = sum(probs) 7 | probs = [a/s for a in probs] 8 | r = random.uniform(0, 1) 9 | for i in range(len(probs)): 10 | r = r - probs[i] 11 | if r <= 0: 12 | return i 13 | return len(probs)-1 14 | 15 | def c_array(ctype, values): 16 | arr = (ctype*len(values))() 17 | arr[:] = values 18 | return arr 19 | 20 | class BOX(Structure): 21 | _fields_ = [("x", c_float), 22 | ("y", c_float), 23 | ("w", c_float), 24 | ("h", c_float)] 25 | 26 | class DETECTION(Structure): 27 | _fields_ = [("bbox", BOX), 28 | ("classes", c_int), 29 | ("prob", POINTER(c_float)), 30 | ("mask", POINTER(c_float)), 31 | ("objectness", c_float), 32 | ("sort_class", c_int)] 33 | 34 | 35 | class IMAGE(Structure): 36 | _fields_ = [("w", c_int), 37 | ("h", c_int), 38 | ("c", c_int), 39 | ("data", POINTER(c_float))] 40 | 41 | class METADATA(Structure): 42 | _fields_ = [("classes", c_int), 43 | ("names", POINTER(c_char_p))] 44 | 45 | 46 | 47 | #lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL) 48 | lib = CDLL("libdarknet.so", RTLD_GLOBAL) 49 | lib.network_width.argtypes = [c_void_p] 50 | lib.network_width.restype = c_int 51 | lib.network_height.argtypes = [c_void_p] 52 | lib.network_height.restype = c_int 53 | 54 | predict = lib.network_predict 55 | predict.argtypes = [c_void_p, POINTER(c_float)] 56 | predict.restype = POINTER(c_float) 57 | 58 | set_gpu = lib.cuda_set_device 59 | set_gpu.argtypes = [c_int] 60 | 61 | make_image = lib.make_image 62 | make_image.argtypes = [c_int, c_int, c_int] 63 | make_image.restype = IMAGE 64 | 65 | get_network_boxes = lib.get_network_boxes 66 | get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)] 67 | get_network_boxes.restype = POINTER(DETECTION) 68 | 69 | make_network_boxes = lib.make_network_boxes 70 | make_network_boxes.argtypes = [c_void_p] 71 | make_network_boxes.restype = POINTER(DETECTION) 72 | 73 | free_detections = lib.free_detections 74 | free_detections.argtypes = [POINTER(DETECTION), c_int] 75 | 76 | free_ptrs = lib.free_ptrs 77 | free_ptrs.argtypes = [POINTER(c_void_p), c_int] 78 | 79 | network_predict = lib.network_predict 80 | network_predict.argtypes = [c_void_p, POINTER(c_float)] 81 | 82 | reset_rnn = lib.reset_rnn 83 | reset_rnn.argtypes = [c_void_p] 84 | 85 | load_net = lib.load_network 86 | load_net.argtypes = [c_char_p, c_char_p, c_int] 87 | load_net.restype = c_void_p 88 | 89 | do_nms_obj = lib.do_nms_obj 90 | do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float] 91 | 92 | do_nms_sort = lib.do_nms_sort 93 | do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float] 94 | 95 | free_image = lib.free_image 96 | free_image.argtypes = [IMAGE] 97 | 98 | letterbox_image = lib.letterbox_image 99 | letterbox_image.argtypes = [IMAGE, c_int, c_int] 100 | letterbox_image.restype = IMAGE 101 | 102 | load_meta = lib.get_metadata 103 | lib.get_metadata.argtypes = [c_char_p] 104 | lib.get_metadata.restype = METADATA 105 | 106 | load_image = lib.load_image_color 107 | load_image.argtypes = [c_char_p, c_int, c_int] 108 | load_image.restype = IMAGE 109 | 110 | rgbgr_image = lib.rgbgr_image 111 | rgbgr_image.argtypes = [IMAGE] 112 | 113 | predict_image = lib.network_predict_image 114 | predict_image.argtypes = [c_void_p, IMAGE] 115 | predict_image.restype = POINTER(c_float) 116 | 117 | def classify(net, meta, im): 118 | out = predict_image(net, im) 119 | res = [] 120 | for i in range(meta.classes): 121 | res.append((meta.names[i], out[i])) 122 | res = sorted(res, key=lambda x: -x[1]) 123 | return res 124 | 125 | def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): 126 | im = load_image(image, 0, 0) 127 | num = c_int(0) 128 | pnum = pointer(num) 129 | predict_image(net, im) 130 | dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum) 131 | num = pnum[0] 132 | if (nms): do_nms_obj(dets, num, meta.classes, nms); 133 | 134 | res = [] 135 | for j in range(num): 136 | for i in range(meta.classes): 137 | if dets[j].prob[i] > 0: 138 | b = dets[j].bbox 139 | res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h))) 140 | res = sorted(res, key=lambda x: -x[1]) 141 | free_image(im) 142 | free_detections(dets, num) 143 | return res 144 | 145 | if __name__ == "__main__": 146 | #net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0) 147 | #im = load_image("data/wolf.jpg", 0, 0) 148 | #meta = load_meta("cfg/imagenet1k.data") 149 | #r = classify(net, meta, im) 150 | #print r[:10] 151 | net = load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0) 152 | meta = load_meta("cfg/coco.data") 153 | r = detect(net, meta, "data/dog.jpg") 154 | print r 155 | 156 | 157 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/matrix.c: -------------------------------------------------------------------------------- 1 | #include "matrix.h" 2 | #include "utils.h" 3 | #include "blas.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | void free_matrix(matrix m) 11 | { 12 | int i; 13 | for(i = 0; i < m.rows; ++i) free(m.vals[i]); 14 | free(m.vals); 15 | } 16 | 17 | float matrix_topk_accuracy(matrix truth, matrix guess, int k) 18 | { 19 | int *indexes = calloc(k, sizeof(int)); 20 | int n = truth.cols; 21 | int i,j; 22 | int correct = 0; 23 | for(i = 0; i < truth.rows; ++i){ 24 | top_k(guess.vals[i], n, k, indexes); 25 | for(j = 0; j < k; ++j){ 26 | int class = indexes[j]; 27 | if(truth.vals[i][class]){ 28 | ++correct; 29 | break; 30 | } 31 | } 32 | } 33 | free(indexes); 34 | return (float)correct/truth.rows; 35 | } 36 | 37 | void scale_matrix(matrix m, float scale) 38 | { 39 | int i,j; 40 | for(i = 0; i < m.rows; ++i){ 41 | for(j = 0; j < m.cols; ++j){ 42 | m.vals[i][j] *= scale; 43 | } 44 | } 45 | } 46 | 47 | matrix resize_matrix(matrix m, int size) 48 | { 49 | int i; 50 | if (m.rows == size) return m; 51 | if (m.rows < size) { 52 | m.vals = realloc(m.vals, size*sizeof(float*)); 53 | for (i = m.rows; i < size; ++i) { 54 | m.vals[i] = calloc(m.cols, sizeof(float)); 55 | } 56 | } else if (m.rows > size) { 57 | for (i = size; i < m.rows; ++i) { 58 | free(m.vals[i]); 59 | } 60 | m.vals = realloc(m.vals, size*sizeof(float*)); 61 | } 62 | m.rows = size; 63 | return m; 64 | } 65 | 66 | void matrix_add_matrix(matrix from, matrix to) 67 | { 68 | assert(from.rows == to.rows && from.cols == to.cols); 69 | int i,j; 70 | for(i = 0; i < from.rows; ++i){ 71 | for(j = 0; j < from.cols; ++j){ 72 | to.vals[i][j] += from.vals[i][j]; 73 | } 74 | } 75 | } 76 | 77 | matrix copy_matrix(matrix m) 78 | { 79 | matrix c = {0}; 80 | c.rows = m.rows; 81 | c.cols = m.cols; 82 | c.vals = calloc(c.rows, sizeof(float *)); 83 | int i; 84 | for(i = 0; i < c.rows; ++i){ 85 | c.vals[i] = calloc(c.cols, sizeof(float)); 86 | copy_cpu(c.cols, m.vals[i], 1, c.vals[i], 1); 87 | } 88 | return c; 89 | } 90 | 91 | matrix make_matrix(int rows, int cols) 92 | { 93 | int i; 94 | matrix m; 95 | m.rows = rows; 96 | m.cols = cols; 97 | m.vals = calloc(m.rows, sizeof(float *)); 98 | for(i = 0; i < m.rows; ++i){ 99 | m.vals[i] = calloc(m.cols, sizeof(float)); 100 | } 101 | return m; 102 | } 103 | 104 | matrix hold_out_matrix(matrix *m, int n) 105 | { 106 | int i; 107 | matrix h; 108 | h.rows = n; 109 | h.cols = m->cols; 110 | h.vals = calloc(h.rows, sizeof(float *)); 111 | for(i = 0; i < n; ++i){ 112 | int index = rand()%m->rows; 113 | h.vals[i] = m->vals[index]; 114 | m->vals[index] = m->vals[--(m->rows)]; 115 | } 116 | return h; 117 | } 118 | 119 | float *pop_column(matrix *m, int c) 120 | { 121 | float *col = calloc(m->rows, sizeof(float)); 122 | int i, j; 123 | for(i = 0; i < m->rows; ++i){ 124 | col[i] = m->vals[i][c]; 125 | for(j = c; j < m->cols-1; ++j){ 126 | m->vals[i][j] = m->vals[i][j+1]; 127 | } 128 | } 129 | --m->cols; 130 | return col; 131 | } 132 | 133 | matrix csv_to_matrix(char *filename) 134 | { 135 | FILE *fp = fopen(filename, "r"); 136 | if(!fp) file_error(filename); 137 | 138 | matrix m; 139 | m.cols = -1; 140 | 141 | char *line; 142 | 143 | int n = 0; 144 | int size = 1024; 145 | m.vals = calloc(size, sizeof(float*)); 146 | while((line = fgetl(fp))){ 147 | if(m.cols == -1) m.cols = count_fields(line); 148 | if(n == size){ 149 | size *= 2; 150 | m.vals = realloc(m.vals, size*sizeof(float*)); 151 | } 152 | m.vals[n] = parse_fields(line, m.cols); 153 | free(line); 154 | ++n; 155 | } 156 | m.vals = realloc(m.vals, n*sizeof(float*)); 157 | m.rows = n; 158 | return m; 159 | } 160 | 161 | void matrix_to_csv(matrix m) 162 | { 163 | int i, j; 164 | 165 | for(i = 0; i < m.rows; ++i){ 166 | for(j = 0; j < m.cols; ++j){ 167 | if(j > 0) printf(","); 168 | printf("%.17g", m.vals[i][j]); 169 | } 170 | printf("\n"); 171 | } 172 | } 173 | 174 | void print_matrix(matrix m) 175 | { 176 | int i, j; 177 | printf("%d X %d Matrix:\n",m.rows, m.cols); 178 | printf(" __"); 179 | for(j = 0; j < 16*m.cols-1; ++j) printf(" "); 180 | printf("__ \n"); 181 | 182 | printf("| "); 183 | for(j = 0; j < 16*m.cols-1; ++j) printf(" "); 184 | printf(" |\n"); 185 | 186 | for(i = 0; i < m.rows; ++i){ 187 | printf("| "); 188 | for(j = 0; j < m.cols; ++j){ 189 | printf("%15.7f ", m.vals[i][j]); 190 | } 191 | printf(" |\n"); 192 | } 193 | printf("|__"); 194 | for(j = 0; j < 16*m.cols-1; ++j) printf(" "); 195 | printf("__|\n"); 196 | } 197 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/deconvolutional_kernels.cu: -------------------------------------------------------------------------------- 1 | #include "cuda_runtime.h" 2 | #include "curand.h" 3 | #include "cublas_v2.h" 4 | 5 | extern "C" { 6 | #include "convolutional_layer.h" 7 | #include "deconvolutional_layer.h" 8 | #include "batchnorm_layer.h" 9 | #include "gemm.h" 10 | #include "blas.h" 11 | #include "im2col.h" 12 | #include "col2im.h" 13 | #include "utils.h" 14 | #include "cuda.h" 15 | } 16 | 17 | extern "C" void forward_deconvolutional_layer_gpu(layer l, network net) 18 | { 19 | int i; 20 | 21 | int m = l.size*l.size*l.n; 22 | int n = l.h*l.w; 23 | int k = l.c; 24 | 25 | fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); 26 | 27 | for(i = 0; i < l.batch; ++i){ 28 | float *a = l.weights_gpu; 29 | float *b = net.input_gpu + i*l.c*l.h*l.w; 30 | float *c = net.workspace; 31 | 32 | gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n); 33 | 34 | col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs); 35 | } 36 | if (l.batch_normalize) { 37 | forward_batchnorm_layer_gpu(l, net); 38 | } else { 39 | add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); 40 | } 41 | activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation); 42 | } 43 | 44 | extern "C" void backward_deconvolutional_layer_gpu(layer l, network net) 45 | { 46 | int i; 47 | 48 | //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); 49 | gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); 50 | 51 | if(l.batch_normalize){ 52 | backward_batchnorm_layer_gpu(l, net); 53 | } else { 54 | backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); 55 | } 56 | 57 | //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float)); 58 | 59 | for(i = 0; i < l.batch; ++i){ 60 | int m = l.c; 61 | int n = l.size*l.size*l.n; 62 | int k = l.h*l.w; 63 | 64 | float *a = net.input_gpu + i*m*k; 65 | float *b = net.workspace; 66 | float *c = l.weight_updates_gpu; 67 | 68 | im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w, 69 | l.size, l.stride, l.pad, b); 70 | gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); 71 | 72 | if(net.delta_gpu){ 73 | int m = l.c; 74 | int n = l.h*l.w; 75 | int k = l.size*l.size*l.n; 76 | 77 | float *a = l.weights_gpu; 78 | float *b = net.workspace; 79 | float *c = net.delta_gpu + i*n*m; 80 | 81 | gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); 82 | } 83 | } 84 | } 85 | 86 | extern "C" void pull_deconvolutional_layer(layer l) 87 | { 88 | cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); 89 | cuda_pull_array(l.biases_gpu, l.biases, l.n); 90 | cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); 91 | cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); 92 | if (l.batch_normalize){ 93 | cuda_pull_array(l.scales_gpu, l.scales, l.n); 94 | cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); 95 | cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); 96 | } 97 | } 98 | 99 | extern "C" void push_deconvolutional_layer(layer l) 100 | { 101 | cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); 102 | cuda_push_array(l.biases_gpu, l.biases, l.n); 103 | cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); 104 | cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); 105 | if (l.batch_normalize){ 106 | cuda_push_array(l.scales_gpu, l.scales, l.n); 107 | cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); 108 | cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); 109 | } 110 | } 111 | 112 | void update_deconvolutional_layer_gpu(layer l, update_args a) 113 | { 114 | float learning_rate = a.learning_rate*l.learning_rate_scale; 115 | float momentum = a.momentum; 116 | float decay = a.decay; 117 | int batch = a.batch; 118 | 119 | if(a.adam){ 120 | adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); 121 | adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); 122 | if(l.scales_gpu){ 123 | adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); 124 | } 125 | }else{ 126 | axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); 127 | axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); 128 | scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); 129 | 130 | axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); 131 | scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); 132 | 133 | if(l.scales_gpu){ 134 | axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); 135 | scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); 136 | } 137 | } 138 | } 139 | 140 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/examples/voxel.c: -------------------------------------------------------------------------------- 1 | #include "darknet.h" 2 | 3 | void extract_voxel(char *lfile, char *rfile, char *prefix) 4 | { 5 | #ifdef OPENCV 6 | int w = 1920; 7 | int h = 1080; 8 | int shift = 0; 9 | int count = 0; 10 | CvCapture *lcap = cvCaptureFromFile(lfile); 11 | CvCapture *rcap = cvCaptureFromFile(rfile); 12 | while(1){ 13 | image l = get_image_from_stream(lcap); 14 | image r = get_image_from_stream(rcap); 15 | if(!l.w || !r.w) break; 16 | if(count%100 == 0) { 17 | shift = best_3d_shift_r(l, r, -l.h/100, l.h/100); 18 | printf("%d\n", shift); 19 | } 20 | image ls = crop_image(l, (l.w - w)/2, (l.h - h)/2, w, h); 21 | image rs = crop_image(r, 105 + (r.w - w)/2, (r.h - h)/2 + shift, w, h); 22 | char buff[256]; 23 | sprintf(buff, "%s_%05d_l", prefix, count); 24 | save_image(ls, buff); 25 | sprintf(buff, "%s_%05d_r", prefix, count); 26 | save_image(rs, buff); 27 | free_image(l); 28 | free_image(r); 29 | free_image(ls); 30 | free_image(rs); 31 | ++count; 32 | } 33 | 34 | #else 35 | printf("need OpenCV for extraction\n"); 36 | #endif 37 | } 38 | 39 | void train_voxel(char *cfgfile, char *weightfile) 40 | { 41 | char *train_images = "/data/imagenet/imagenet1k.train.list"; 42 | char *backup_directory = "/home/pjreddie/backup/"; 43 | srand(time(0)); 44 | char *base = basecfg(cfgfile); 45 | printf("%s\n", base); 46 | float avg_loss = -1; 47 | network net = parse_network_cfg(cfgfile); 48 | if(weightfile){ 49 | load_weights(&net, weightfile); 50 | } 51 | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); 52 | int imgs = net.batch*net.subdivisions; 53 | int i = *net.seen/imgs; 54 | data train, buffer; 55 | 56 | 57 | list *plist = get_paths(train_images); 58 | //int N = plist->size; 59 | char **paths = (char **)list_to_array(plist); 60 | 61 | load_args args = {0}; 62 | args.w = net.w; 63 | args.h = net.h; 64 | args.scale = 4; 65 | args.paths = paths; 66 | args.n = imgs; 67 | args.m = plist->size; 68 | args.d = &buffer; 69 | args.type = SUPER_DATA; 70 | 71 | pthread_t load_thread = load_data_in_thread(args); 72 | clock_t time; 73 | //while(i*imgs < N*120){ 74 | while(get_current_batch(net) < net.max_batches){ 75 | i += 1; 76 | time=clock(); 77 | pthread_join(load_thread, 0); 78 | train = buffer; 79 | load_thread = load_data_in_thread(args); 80 | 81 | printf("Loaded: %lf seconds\n", sec(clock()-time)); 82 | 83 | time=clock(); 84 | float loss = train_network(net, train); 85 | if (avg_loss < 0) avg_loss = loss; 86 | avg_loss = avg_loss*.9 + loss*.1; 87 | 88 | printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs); 89 | if(i%1000==0){ 90 | char buff[256]; 91 | sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); 92 | save_weights(net, buff); 93 | } 94 | if(i%100==0){ 95 | char buff[256]; 96 | sprintf(buff, "%s/%s.backup", backup_directory, base); 97 | save_weights(net, buff); 98 | } 99 | free_data(train); 100 | } 101 | char buff[256]; 102 | sprintf(buff, "%s/%s_final.weights", backup_directory, base); 103 | save_weights(net, buff); 104 | } 105 | 106 | void test_voxel(char *cfgfile, char *weightfile, char *filename) 107 | { 108 | network net = parse_network_cfg(cfgfile); 109 | if(weightfile){ 110 | load_weights(&net, weightfile); 111 | } 112 | set_batch_network(&net, 1); 113 | srand(2222222); 114 | 115 | clock_t time; 116 | char buff[256]; 117 | char *input = buff; 118 | while(1){ 119 | if(filename){ 120 | strncpy(input, filename, 256); 121 | }else{ 122 | printf("Enter Image Path: "); 123 | fflush(stdout); 124 | input = fgets(input, 256, stdin); 125 | if(!input) return; 126 | strtok(input, "\n"); 127 | } 128 | image im = load_image_color(input, 0, 0); 129 | resize_network(&net, im.w, im.h); 130 | printf("%d %d\n", im.w, im.h); 131 | 132 | float *X = im.data; 133 | time=clock(); 134 | network_predict(net, X); 135 | image out = get_network_image(net); 136 | printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time)); 137 | save_image(out, "out"); 138 | 139 | free_image(im); 140 | if (filename) break; 141 | } 142 | } 143 | 144 | 145 | void run_voxel(int argc, char **argv) 146 | { 147 | if(argc < 4){ 148 | fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); 149 | return; 150 | } 151 | 152 | char *cfg = argv[3]; 153 | char *weights = (argc > 4) ? argv[4] : 0; 154 | char *filename = (argc > 5) ? argv[5] : 0; 155 | if(0==strcmp(argv[2], "train")) train_voxel(cfg, weights); 156 | else if(0==strcmp(argv[2], "test")) test_voxel(cfg, weights, filename); 157 | else if(0==strcmp(argv[2], "extract")) extract_voxel(argv[3], argv[4], argv[5]); 158 | /* 159 | else if(0==strcmp(argv[2], "valid")) validate_voxel(cfg, weights); 160 | */ 161 | } 162 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/reorg_layer.c: -------------------------------------------------------------------------------- 1 | #include "reorg_layer.h" 2 | #include "cuda.h" 3 | #include "blas.h" 4 | 5 | #include 6 | 7 | 8 | layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra) 9 | { 10 | layer l = {0}; 11 | l.type = REORG; 12 | l.batch = batch; 13 | l.stride = stride; 14 | l.extra = extra; 15 | l.h = h; 16 | l.w = w; 17 | l.c = c; 18 | l.flatten = flatten; 19 | if(reverse){ 20 | l.out_w = w*stride; 21 | l.out_h = h*stride; 22 | l.out_c = c/(stride*stride); 23 | }else{ 24 | l.out_w = w/stride; 25 | l.out_h = h/stride; 26 | l.out_c = c*(stride*stride); 27 | } 28 | l.reverse = reverse; 29 | 30 | l.outputs = l.out_h * l.out_w * l.out_c; 31 | l.inputs = h*w*c; 32 | if(l.extra){ 33 | l.out_w = l.out_h = l.out_c = 0; 34 | l.outputs = l.inputs + l.extra; 35 | } 36 | 37 | if(extra){ 38 | fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs); 39 | } else { 40 | fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); 41 | } 42 | int output_size = l.outputs * batch; 43 | l.output = calloc(output_size, sizeof(float)); 44 | l.delta = calloc(output_size, sizeof(float)); 45 | 46 | l.forward = forward_reorg_layer; 47 | l.backward = backward_reorg_layer; 48 | #ifdef GPU 49 | l.forward_gpu = forward_reorg_layer_gpu; 50 | l.backward_gpu = backward_reorg_layer_gpu; 51 | 52 | l.output_gpu = cuda_make_array(l.output, output_size); 53 | l.delta_gpu = cuda_make_array(l.delta, output_size); 54 | #endif 55 | return l; 56 | } 57 | 58 | void resize_reorg_layer(layer *l, int w, int h) 59 | { 60 | int stride = l->stride; 61 | int c = l->c; 62 | 63 | l->h = h; 64 | l->w = w; 65 | 66 | if(l->reverse){ 67 | l->out_w = w*stride; 68 | l->out_h = h*stride; 69 | l->out_c = c/(stride*stride); 70 | }else{ 71 | l->out_w = w/stride; 72 | l->out_h = h/stride; 73 | l->out_c = c*(stride*stride); 74 | } 75 | 76 | l->outputs = l->out_h * l->out_w * l->out_c; 77 | l->inputs = l->outputs; 78 | int output_size = l->outputs * l->batch; 79 | 80 | l->output = realloc(l->output, output_size * sizeof(float)); 81 | l->delta = realloc(l->delta, output_size * sizeof(float)); 82 | 83 | #ifdef GPU 84 | cuda_free(l->output_gpu); 85 | cuda_free(l->delta_gpu); 86 | l->output_gpu = cuda_make_array(l->output, output_size); 87 | l->delta_gpu = cuda_make_array(l->delta, output_size); 88 | #endif 89 | } 90 | 91 | void forward_reorg_layer(const layer l, network net) 92 | { 93 | int i; 94 | if(l.flatten){ 95 | memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); 96 | if(l.reverse){ 97 | flatten(l.output, l.w*l.h, l.c, l.batch, 0); 98 | }else{ 99 | flatten(l.output, l.w*l.h, l.c, l.batch, 1); 100 | } 101 | } else if (l.extra) { 102 | for(i = 0; i < l.batch; ++i){ 103 | copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1); 104 | } 105 | } else if (l.reverse){ 106 | reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output); 107 | } else { 108 | reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output); 109 | } 110 | } 111 | 112 | void backward_reorg_layer(const layer l, network net) 113 | { 114 | int i; 115 | if(l.flatten){ 116 | memcpy(net.delta, l.delta, l.outputs*l.batch*sizeof(float)); 117 | if(l.reverse){ 118 | flatten(net.delta, l.w*l.h, l.c, l.batch, 1); 119 | }else{ 120 | flatten(net.delta, l.w*l.h, l.c, l.batch, 0); 121 | } 122 | } else if(l.reverse){ 123 | reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta); 124 | } else if (l.extra) { 125 | for(i = 0; i < l.batch; ++i){ 126 | copy_cpu(l.inputs, l.delta + i*l.outputs, 1, net.delta + i*l.inputs, 1); 127 | } 128 | }else{ 129 | reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta); 130 | } 131 | } 132 | 133 | #ifdef GPU 134 | void forward_reorg_layer_gpu(layer l, network net) 135 | { 136 | int i; 137 | if(l.flatten){ 138 | if(l.reverse){ 139 | flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 0, l.output_gpu); 140 | }else{ 141 | flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 1, l.output_gpu); 142 | } 143 | } else if (l.extra) { 144 | for(i = 0; i < l.batch; ++i){ 145 | copy_gpu(l.inputs, net.input_gpu + i*l.inputs, 1, l.output_gpu + i*l.outputs, 1); 146 | } 147 | } else if (l.reverse) { 148 | reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu); 149 | }else { 150 | reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.output_gpu); 151 | } 152 | } 153 | 154 | void backward_reorg_layer_gpu(layer l, network net) 155 | { 156 | if(l.flatten){ 157 | if(l.reverse){ 158 | flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 1, net.delta_gpu); 159 | }else{ 160 | flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 0, net.delta_gpu); 161 | } 162 | } else if (l.extra) { 163 | int i; 164 | for(i = 0; i < l.batch; ++i){ 165 | copy_gpu(l.inputs, l.delta_gpu + i*l.outputs, 1, net.delta_gpu + i*l.inputs, 1); 166 | } 167 | } else if(l.reverse){ 168 | reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta_gpu); 169 | } else { 170 | reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta_gpu); 171 | } 172 | } 173 | #endif 174 | -------------------------------------------------------------------------------- /visual/iris/YOLOWrapper/darknet/src/cost_layer.c: -------------------------------------------------------------------------------- 1 | #include "cost_layer.h" 2 | #include "utils.h" 3 | #include "cuda.h" 4 | #include "blas.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | COST_TYPE get_cost_type(char *s) 11 | { 12 | if (strcmp(s, "seg")==0) return SEG; 13 | if (strcmp(s, "sse")==0) return SSE; 14 | if (strcmp(s, "masked")==0) return MASKED; 15 | if (strcmp(s, "smooth")==0) return SMOOTH; 16 | if (strcmp(s, "L1")==0) return L1; 17 | if (strcmp(s, "wgan")==0) return WGAN; 18 | fprintf(stderr, "Couldn't find cost type %s, going with SSE\n", s); 19 | return SSE; 20 | } 21 | 22 | char *get_cost_string(COST_TYPE a) 23 | { 24 | switch(a){ 25 | case SEG: 26 | return "seg"; 27 | case SSE: 28 | return "sse"; 29 | case MASKED: 30 | return "masked"; 31 | case SMOOTH: 32 | return "smooth"; 33 | case L1: 34 | return "L1"; 35 | case WGAN: 36 | return "wgan"; 37 | } 38 | return "sse"; 39 | } 40 | 41 | cost_layer make_cost_layer(int batch, int inputs, COST_TYPE cost_type, float scale) 42 | { 43 | fprintf(stderr, "cost %4d\n", inputs); 44 | cost_layer l = {0}; 45 | l.type = COST; 46 | 47 | l.scale = scale; 48 | l.batch = batch; 49 | l.inputs = inputs; 50 | l.outputs = inputs; 51 | l.cost_type = cost_type; 52 | l.delta = calloc(inputs*batch, sizeof(float)); 53 | l.output = calloc(inputs*batch, sizeof(float)); 54 | l.cost = calloc(1, sizeof(float)); 55 | 56 | l.forward = forward_cost_layer; 57 | l.backward = backward_cost_layer; 58 | #ifdef GPU 59 | l.forward_gpu = forward_cost_layer_gpu; 60 | l.backward_gpu = backward_cost_layer_gpu; 61 | 62 | l.delta_gpu = cuda_make_array(l.output, inputs*batch); 63 | l.output_gpu = cuda_make_array(l.delta, inputs*batch); 64 | #endif 65 | return l; 66 | } 67 | 68 | void resize_cost_layer(cost_layer *l, int inputs) 69 | { 70 | l->inputs = inputs; 71 | l->outputs = inputs; 72 | l->delta = realloc(l->delta, inputs*l->batch*sizeof(float)); 73 | l->output = realloc(l->output, inputs*l->batch*sizeof(float)); 74 | #ifdef GPU 75 | cuda_free(l->delta_gpu); 76 | cuda_free(l->output_gpu); 77 | l->delta_gpu = cuda_make_array(l->delta, inputs*l->batch); 78 | l->output_gpu = cuda_make_array(l->output, inputs*l->batch); 79 | #endif 80 | } 81 | 82 | void forward_cost_layer(cost_layer l, network net) 83 | { 84 | if (!net.truth) return; 85 | if(l.cost_type == MASKED){ 86 | int i; 87 | for(i = 0; i < l.batch*l.inputs; ++i){ 88 | if(net.truth[i] == SECRET_NUM) net.input[i] = SECRET_NUM; 89 | } 90 | } 91 | if(l.cost_type == SMOOTH){ 92 | smooth_l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output); 93 | }else if(l.cost_type == L1){ 94 | l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output); 95 | } else { 96 | l2_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output); 97 | } 98 | l.cost[0] = sum_array(l.output, l.batch*l.inputs); 99 | } 100 | 101 | void backward_cost_layer(const cost_layer l, network net) 102 | { 103 | axpy_cpu(l.batch*l.inputs, l.scale, l.delta, 1, net.delta, 1); 104 | } 105 | 106 | #ifdef GPU 107 | 108 | void pull_cost_layer(cost_layer l) 109 | { 110 | cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs); 111 | } 112 | 113 | void push_cost_layer(cost_layer l) 114 | { 115 | cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs); 116 | } 117 | 118 | int float_abs_compare (const void * a, const void * b) 119 | { 120 | float fa = *(const float*) a; 121 | if(fa < 0) fa = -fa; 122 | float fb = *(const float*) b; 123 | if(fb < 0) fb = -fb; 124 | return (fa > fb) - (fa < fb); 125 | } 126 | 127 | void forward_cost_layer_gpu(cost_layer l, network net) 128 | { 129 | if (!net.truth) return; 130 | if(l.smooth){ 131 | scal_gpu(l.batch*l.inputs, (1-l.smooth), net.truth_gpu, 1); 132 | add_gpu(l.batch*l.inputs, l.smooth * 1./l.inputs, net.truth_gpu, 1); 133 | } 134 | 135 | if(l.cost_type == SMOOTH){ 136 | smooth_l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu); 137 | } else if (l.cost_type == L1){ 138 | l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu); 139 | } else if (l.cost_type == WGAN){ 140 | wgan_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu); 141 | } else { 142 | l2_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu); 143 | } 144 | 145 | if (l.cost_type == SEG && l.noobject_scale != 1) { 146 | scale_mask_gpu(l.batch*l.inputs, l.delta_gpu, 0, net.truth_gpu, l.noobject_scale); 147 | scale_mask_gpu(l.batch*l.inputs, l.output_gpu, 0, net.truth_gpu, l.noobject_scale); 148 | } 149 | if (l.cost_type == MASKED) { 150 | mask_gpu(l.batch*l.inputs, net.delta_gpu, SECRET_NUM, net.truth_gpu, 0); 151 | } 152 | 153 | if(l.ratio){ 154 | cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs); 155 | qsort(l.delta, l.batch*l.inputs, sizeof(float), float_abs_compare); 156 | int n = (1-l.ratio) * l.batch*l.inputs; 157 | float thresh = l.delta[n]; 158 | thresh = 0; 159 | printf("%f\n", thresh); 160 | supp_gpu(l.batch*l.inputs, thresh, l.delta_gpu, 1); 161 | } 162 | 163 | if(l.thresh){ 164 | supp_gpu(l.batch*l.inputs, l.thresh*1./l.inputs, l.delta_gpu, 1); 165 | } 166 | 167 | cuda_pull_array(l.output_gpu, l.output, l.batch*l.inputs); 168 | l.cost[0] = sum_array(l.output, l.batch*l.inputs); 169 | } 170 | 171 | void backward_cost_layer_gpu(const cost_layer l, network net) 172 | { 173 | axpy_gpu(l.batch*l.inputs, l.scale, l.delta_gpu, 1, net.delta_gpu, 1); 174 | } 175 | #endif 176 | 177 | --------------------------------------------------------------------------------