├── README.md └── caffe-master ├── examples ├── GetYoloTraindata │ └── convert.sh ├── yolov1 │ ├── detection │ │ └── yolodeploy.prototxt │ └── train │ │ ├── gnet_solver.prototxt │ │ └── gnet_train.prototxt └── yolov2 │ └── detection │ └── yolov2deploy.prototxt ├── include └── caffe │ ├── blob.hpp │ ├── caffe.hpp │ ├── common.hpp │ ├── data_reader.hpp │ ├── data_transformer.hpp │ ├── filler.hpp │ ├── internal_thread.hpp │ ├── layer.hpp │ ├── layer_factory.hpp │ ├── layers │ ├── absval_layer.hpp │ ├── accuracy_layer.hpp │ ├── argmax_layer.hpp │ ├── base_conv_layer.hpp │ ├── base_data_layer.hpp │ ├── batch_norm_layer.hpp │ ├── batch_reindex_layer.hpp │ ├── bias_layer.hpp │ ├── bnll_layer.hpp │ ├── box_annotator_ohem_layer.hpp │ ├── box_data_layer.hpp │ ├── concat_layer.hpp │ ├── contrastive_loss_layer.hpp │ ├── conv_layer.hpp │ ├── crop_layer.hpp │ ├── cudnn_conv_layer.hpp │ ├── cudnn_lcn_layer.hpp │ ├── cudnn_lrn_layer.hpp │ ├── cudnn_pooling_layer.hpp │ ├── cudnn_relu_layer.hpp │ ├── cudnn_sigmoid_layer.hpp │ ├── cudnn_softmax_layer.hpp │ ├── cudnn_tanh_layer.hpp │ ├── data_layer.hpp │ ├── deconv_layer.hpp │ ├── detection_evaluate_layer.hpp │ ├── detection_loss_layer.hpp │ ├── detection_output_layer.hpp │ ├── dropout_layer.hpp │ ├── dummy_data_layer.hpp │ ├── eltwise_layer.hpp │ ├── elu_layer.hpp │ ├── embed_layer.hpp │ ├── euclidean_loss_layer.hpp │ ├── eval_detection_layer.hpp │ ├── exp_layer.hpp │ ├── filter_layer.hpp │ ├── flatten_layer.hpp │ ├── hdf5_data_layer.hpp │ ├── hdf5_output_layer.hpp │ ├── hinge_loss_layer.hpp │ ├── im2col_layer.hpp │ ├── image_data_layer.hpp │ ├── infogain_loss_layer.hpp │ ├── inner_product_layer.hpp │ ├── input_layer.hpp │ ├── loss_layer.hpp │ ├── lrn_layer.hpp │ ├── lstm_layer.hpp │ ├── memory_data_layer.hpp │ ├── multinomial_logistic_loss_layer.hpp │ ├── mvn_layer.hpp │ ├── neuron_layer.hpp │ ├── parameter_layer.hpp │ ├── pooling_layer.hpp │ ├── power_layer.hpp │ ├── prelu_layer.hpp │ ├── psroi_pooling_layer.hpp │ ├── python_layer.hpp │ ├── recurrent_layer.hpp │ ├── reduction_layer.hpp │ ├── region_loss_layer.hpp │ ├── relu_layer.hpp │ ├── reorg_layer.hpp │ ├── reshape_layer.hpp │ ├── rnn_layer.hpp │ ├── roi_pooling_layer.hpp │ ├── scale_layer.hpp │ ├── sigmoid_cross_entropy_loss_layer.hpp │ ├── sigmoid_layer.hpp │ ├── silence_layer.hpp │ ├── slice_layer.hpp │ ├── smooth_l1_loss_layer.hpp │ ├── smooth_l1_loss_ohem_layer.hpp │ ├── softmax_layer.hpp │ ├── softmax_loss_layer.hpp │ ├── softmax_loss_ohem_layer.hpp │ ├── split_layer.hpp │ ├── spp_layer.hpp │ ├── tanh_layer.hpp │ ├── threshold_layer.hpp │ ├── tile_layer.hpp │ ├── window_data_layer.hpp │ ├── yolov3DetectResult_layer.hpp │ ├── yolov3Detection_layer.hpp │ └── yolov3_loss_layer.hpp │ ├── net.hpp │ ├── parallel.hpp │ ├── sgd_solvers.hpp │ ├── solver.hpp │ ├── solver_factory.hpp │ ├── syncedmem.hpp │ ├── test │ ├── test_caffe_main.hpp │ └── test_gradient_check_util.hpp │ └── util │ ├── bbox_util.hpp │ ├── benchmark.hpp │ ├── blocking_queue.hpp │ ├── cudnn.hpp │ ├── db.hpp │ ├── db_leveldb.hpp │ ├── db_lmdb.hpp │ ├── device_alternate.hpp │ ├── format.hpp │ ├── gpu_util.cuh │ ├── hdf5.hpp │ ├── im2col.hpp │ ├── insert_splits.hpp │ ├── io.hpp │ ├── math_functions.hpp │ ├── mkl_alternate.hpp │ ├── rng.hpp │ ├── signal_handler.h │ ├── tree.hpp │ └── upgrade_proto.hpp ├── python ├── convert │ ├── convert_weights_to_caffemodel.py │ ├── create_yolo_caffemodel.py │ ├── create_yolo_prototxt.py │ └── create_yolov3_prototxt.py └── yolo_detection │ └── test_yolo_v2.py ├── src ├── caffe │ ├── CMakeLists.txt │ ├── blob.cpp │ ├── common.cpp │ ├── data_reader.cpp │ ├── data_transformer.cpp │ ├── internal_thread.cpp │ ├── layer.cpp │ ├── layer_factory.cpp │ ├── layers │ │ ├── absval_layer.cpp │ │ ├── absval_layer.cu │ │ ├── accuracy_layer.cpp │ │ ├── argmax_layer.cpp │ │ ├── base_conv_layer.cpp │ │ ├── base_data_layer.cpp │ │ ├── base_data_layer.cu │ │ ├── batch_norm_layer.cpp │ │ ├── batch_norm_layer.cu │ │ ├── batch_reindex_layer.cpp │ │ ├── batch_reindex_layer.cu │ │ ├── bias_layer.cpp │ │ ├── bias_layer.cu │ │ ├── bnll_layer.cpp │ │ ├── bnll_layer.cu │ │ ├── box_annotator_ohem_layer.cpp │ │ ├── box_annotator_ohem_layer.cu │ │ ├── box_data_layer.cpp │ │ ├── concat_layer.cpp │ │ ├── concat_layer.cu │ │ ├── contrastive_loss_layer.cpp │ │ ├── contrastive_loss_layer.cu │ │ ├── conv_layer.cpp │ │ ├── conv_layer.cu │ │ ├── crop_layer.cpp │ │ ├── crop_layer.cu │ │ ├── cudnn_conv_layer.cpp │ │ ├── cudnn_conv_layer.cu │ │ ├── cudnn_lcn_layer.cpp │ │ ├── cudnn_lcn_layer.cu │ │ ├── cudnn_lrn_layer.cpp │ │ ├── cudnn_lrn_layer.cu │ │ ├── cudnn_pooling_layer.cpp │ │ ├── cudnn_pooling_layer.cu │ │ ├── cudnn_relu_layer.cpp │ │ ├── cudnn_relu_layer.cu │ │ ├── cudnn_sigmoid_layer.cpp │ │ ├── cudnn_sigmoid_layer.cu │ │ ├── cudnn_softmax_layer.cpp │ │ ├── cudnn_softmax_layer.cu │ │ ├── cudnn_tanh_layer.cpp │ │ ├── cudnn_tanh_layer.cu │ │ ├── data_layer.cpp │ │ ├── deconv_layer.cpp │ │ ├── deconv_layer.cu │ │ ├── detection_evaluate_layer.cpp │ │ ├── detection_loss_layer.cpp │ │ ├── detection_loss_layer_back.cpp │ │ ├── detection_output_layer.cpp │ │ ├── dropout_layer.cpp │ │ ├── dropout_layer.cu │ │ ├── dummy_data_layer.cpp │ │ ├── eltwise_layer.cpp │ │ ├── eltwise_layer.cu │ │ ├── elu_layer.cpp │ │ ├── elu_layer.cu │ │ ├── embed_layer.cpp │ │ ├── embed_layer.cu │ │ ├── euclidean_loss_layer.cpp │ │ ├── euclidean_loss_layer.cu │ │ ├── eval_detection_layer.cpp │ │ ├── exp_layer.cpp │ │ ├── exp_layer.cu │ │ ├── filter_layer.cpp │ │ ├── filter_layer.cu │ │ ├── flatten_layer.cpp │ │ ├── hdf5_data_layer.cpp │ │ ├── hdf5_data_layer.cu │ │ ├── hdf5_output_layer.cpp │ │ ├── hdf5_output_layer.cu │ │ ├── hinge_loss_layer.cpp │ │ ├── im2col_layer.cpp │ │ ├── im2col_layer.cu │ │ ├── image_data_layer.cpp │ │ ├── infogain_loss_layer.cpp │ │ ├── inner_product_layer.cpp │ │ ├── inner_product_layer.cu │ │ ├── input_layer.cpp │ │ ├── loss_layer.cpp │ │ ├── lrn_layer.cpp │ │ ├── lrn_layer.cu │ │ ├── lstm_layer.cpp │ │ ├── lstm_unit_layer.cpp │ │ ├── lstm_unit_layer.cu │ │ ├── memory_data_layer.cpp │ │ ├── multinomial_logistic_loss_layer.cpp │ │ ├── mvn_layer.cpp │ │ ├── mvn_layer.cu │ │ ├── neuron_layer.cpp │ │ ├── parameter_layer.cpp │ │ ├── pooling_layer.cpp │ │ ├── pooling_layer.cu │ │ ├── power_layer.cpp │ │ ├── power_layer.cu │ │ ├── prelu_layer.cpp │ │ ├── prelu_layer.cu │ │ ├── psroi_pooling_layer.cpp │ │ ├── psroi_pooling_layer.cu │ │ ├── recurrent_layer.cpp │ │ ├── recurrent_layer.cu │ │ ├── reduction_layer.cpp │ │ ├── reduction_layer.cu │ │ ├── region_loss_layer.cpp │ │ ├── region_loss_layer.cpp.tree │ │ ├── region_loss_layer.cpp.v2 │ │ ├── region_loss_layer_back.cpp │ │ ├── relu_layer.cpp │ │ ├── relu_layer.cu │ │ ├── reorg_layer.cpp │ │ ├── reorg_layer.cu │ │ ├── reshape_layer.cpp │ │ ├── rnn_layer.cpp │ │ ├── roi_pooling_layer.cpp │ │ ├── roi_pooling_layer.cu │ │ ├── scale_layer.cpp │ │ ├── scale_layer.cu │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ ├── sigmoid_layer.cpp │ │ ├── sigmoid_layer.cu │ │ ├── silence_layer.cpp │ │ ├── silence_layer.cu │ │ ├── slice_layer.cpp │ │ ├── slice_layer.cu │ │ ├── smooth_L1_loss_ohem_layer.cpp │ │ ├── smooth_L1_loss_ohem_layer.cu │ │ ├── smooth_l1_loss_layer.cpp │ │ ├── smooth_l1_loss_layer.cu │ │ ├── softmax_layer.cpp │ │ ├── softmax_layer.cu │ │ ├── softmax_loss_layer.cpp │ │ ├── softmax_loss_layer.cu │ │ ├── softmax_loss_ohem_layer.cpp │ │ ├── softmax_loss_ohem_layer.cu │ │ ├── split_layer.cpp │ │ ├── split_layer.cu │ │ ├── spp_layer.cpp │ │ ├── tanh_layer.cpp │ │ ├── tanh_layer.cu │ │ ├── threshold_layer.cpp │ │ ├── threshold_layer.cu │ │ ├── tile_layer.cpp │ │ ├── tile_layer.cu │ │ ├── window_data_layer.cpp │ │ ├── yolov3DetectResult_layer.cpp │ │ ├── yolov3Detection_layer.cpp │ │ └── yolov3_loss_layer.cpp │ ├── net.cpp │ ├── parallel.cpp │ ├── proto │ │ └── caffe.proto │ ├── solver.cpp │ ├── solvers │ │ ├── adadelta_solver.cpp │ │ ├── adadelta_solver.cu │ │ ├── adagrad_solver.cpp │ │ ├── adagrad_solver.cu │ │ ├── adam_solver.cpp │ │ ├── adam_solver.cu │ │ ├── nesterov_solver.cpp │ │ ├── nesterov_solver.cu │ │ ├── rmsprop_solver.cpp │ │ ├── rmsprop_solver.cu │ │ ├── sgd_solver.cpp │ │ └── sgd_solver.cu │ ├── syncedmem.cpp │ ├── test │ │ ├── CMakeLists.txt │ │ ├── test_accuracy_layer.cpp │ │ ├── test_argmax_layer.cpp │ │ ├── test_batch_norm_layer.cpp │ │ ├── test_batch_reindex_layer.cpp │ │ ├── test_benchmark.cpp │ │ ├── test_bias_layer.cpp │ │ ├── test_blob.cpp │ │ ├── test_caffe_main.cpp │ │ ├── test_common.cpp │ │ ├── test_concat_layer.cpp │ │ ├── test_contrastive_loss_layer.cpp │ │ ├── test_convolution_layer.cpp │ │ ├── test_crop_layer.cpp │ │ ├── test_data │ │ │ ├── generate_sample_data.py │ │ │ ├── sample_data.h5 │ │ │ ├── sample_data_2_gzip.h5 │ │ │ ├── sample_data_list.txt │ │ │ ├── solver_data.h5 │ │ │ └── solver_data_list.txt │ │ ├── test_data_layer.cpp │ │ ├── test_data_transformer.cpp │ │ ├── test_db.cpp │ │ ├── test_deconvolution_layer.cpp │ │ ├── test_dummy_data_layer.cpp │ │ ├── test_eltwise_layer.cpp │ │ ├── test_embed_layer.cpp │ │ ├── test_euclidean_loss_layer.cpp │ │ ├── test_filler.cpp │ │ ├── test_filter_layer.cpp │ │ ├── test_flatten_layer.cpp │ │ ├── test_gradient_based_solver.cpp │ │ ├── test_hdf5_output_layer.cpp │ │ ├── test_hdf5data_layer.cpp │ │ ├── test_hinge_loss_layer.cpp │ │ ├── test_im2col_kernel.cu │ │ ├── test_im2col_layer.cpp │ │ ├── test_image_data_layer.cpp │ │ ├── test_infogain_loss_layer.cpp │ │ ├── test_inner_product_layer.cpp │ │ ├── test_internal_thread.cpp │ │ ├── test_io.cpp │ │ ├── test_layer_factory.cpp │ │ ├── test_lrn_layer.cpp │ │ ├── test_lstm_layer.cpp │ │ ├── test_math_functions.cpp │ │ ├── test_maxpool_dropout_layers.cpp │ │ ├── test_memory_data_layer.cpp │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ ├── test_mvn_layer.cpp │ │ ├── test_net.cpp │ │ ├── test_neuron_layer.cpp │ │ ├── test_platform.cpp │ │ ├── test_pooling_layer.cpp │ │ ├── test_power_layer.cpp │ │ ├── test_protobuf.cpp │ │ ├── test_random_number_generator.cpp │ │ ├── test_reduction_layer.cpp │ │ ├── test_reshape_layer.cpp │ │ ├── test_rnn_layer.cpp │ │ ├── test_roi_pooling_layer.cpp │ │ ├── test_scale_layer.cpp │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ ├── test_slice_layer.cpp │ │ ├── test_smooth_l1_loss_layer.cpp │ │ ├── test_softmax_layer.cpp │ │ ├── test_softmax_with_loss_layer.cpp │ │ ├── test_solver.cpp │ │ ├── test_solver_factory.cpp │ │ ├── test_split_layer.cpp │ │ ├── test_spp_layer.cpp │ │ ├── test_stochastic_pooling.cpp │ │ ├── test_syncedmem.cpp │ │ ├── test_tanh_layer.cpp │ │ ├── test_threshold_layer.cpp │ │ ├── test_tile_layer.cpp │ │ ├── test_upgrade_proto.cpp │ │ └── test_util_blas.cpp │ └── util │ │ ├── bbox_util.cpp │ │ ├── benchmark.cpp │ │ ├── blocking_queue.cpp │ │ ├── cudnn.cpp │ │ ├── db.cpp │ │ ├── db_leveldb.cpp │ │ ├── db_lmdb.cpp │ │ ├── hdf5.cpp │ │ ├── im2col.cpp │ │ ├── im2col.cu │ │ ├── insert_splits.cpp │ │ ├── io.cpp │ │ ├── math_functions.cpp │ │ ├── math_functions.cu │ │ ├── signal_handler.cpp │ │ ├── tree.cpp │ │ └── upgrade_proto.cpp └── gtest │ ├── CMakeLists.txt │ ├── gtest-all.cpp │ ├── gtest.h │ └── gtest_main.cc ├── tools ├── CMakeLists.txt ├── caffe.cpp ├── compute_image_mean.cpp ├── convert_box_data.cpp ├── convert_imageset.cpp ├── device_query.cpp ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.py │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py.example │ ├── resize_and_crop_images.py │ └── summarize.py ├── extract_features.cpp ├── finetune_net.cpp ├── net_speed_benchmark.cpp ├── test_detection.cpp ├── test_net.cpp ├── test_yolo_v2.cpp ├── train_net.cpp ├── upgrade_net_proto_binary.cpp ├── upgrade_net_proto_text.cpp └── upgrade_solver_proto_text.cpp └── windows ├── Caffe.sln ├── CommonSettings.props.example ├── CommonSettings.targets ├── caffe.managed ├── AssemblyInfo.cpp ├── Stdafx.cpp ├── Stdafx.h ├── _CaffeModel.cpp ├── _CaffeModel.h ├── caffe.managed.vcxproj ├── caffe.managed.vcxproj.filters ├── caffelib.cpp └── packages.config ├── caffe ├── caffe.vcxproj └── packages.config ├── classification ├── classification.vcxproj └── packages.config ├── compute_image_mean ├── compute_image_mean.vcxproj └── packages.config ├── convert_box_data ├── convert_box_data.cpp ├── convert_box_data.vcxproj └── packages.config ├── convert_cifar_data ├── convert_cifar_data.vcxproj └── packages.config ├── convert_imageset ├── convert_imageset.vcxproj └── packages.config ├── convert_mnist_data ├── convert_mnist_data.vcxproj └── packages.config ├── convert_mnist_siamese_data ├── convert_mnist_siamese_data.vcxproj └── packages.config ├── copyback └── CommonSettings.props.example ├── extract_features ├── extract_features.vcxproj └── packages.config ├── libcaffe ├── libcaffe.vcxproj ├── libcaffe.vcxproj.filters └── packages.config ├── matcaffe ├── matcaffe.def ├── matcaffe.vcxproj └── packages.config ├── nuget.config ├── pycaffe ├── packages.config └── pycaffe.vcxproj ├── scripts ├── BinplaceCudaDependencies.cmd ├── FixGFlagsNaming.cmd ├── MatlabPostBuild.cmd ├── MatlabPreBuild.cmd ├── ProtoCompile.cmd ├── PythonPostBuild.cmd └── PythonPreBuild.cmd ├── test_MTCNN ├── mtcnn.cpp ├── packages.config └── test_mtcnn.vcxproj ├── test_all ├── packages.config ├── test_all.vcxproj └── test_all.vcxproj.filters ├── test_yolo ├── My Inspector XE Results - test_yolo │ └── My Inspector XE Results - test_yolo.inspxeproj ├── packages.config ├── test_yolo.cpp ├── test_yolo.vcxproj ├── test_yolo_a1.cpp └── yolotestresult.jpg ├── test_yolo_v2 ├── packages.config └── test_yolo_v2.vcxproj ├── upgrade_net_proto_binary ├── packages.config └── upgrade_net_proto_binary.vcxproj ├── upgrade_net_proto_text ├── packages.config └── upgrade_net_proto_text.vcxproj └── upgrade_solver_proto_text ├── packages.config └── upgrade_solver_proto_text.vcxproj /README.md: -------------------------------------------------------------------------------- 1 | # Yolo_on_Caffe 2 | Yolo(including yolov1 yolov2 yolov3)running on caffe windows. Anyone that is not familiar with linux can use this project to learn caffe developing 3 | -------------------------------------------------------------------------------- /caffe-master/examples/GetYoloTraindata/convert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | CAFFE_ROOT=D:/Code_local/caffe/caffe-master/caffe-master 4 | CAFFEDATA_ROOT=D:/Code_local/caffe/caffe_yolo 5 | ROOT_DIR=D:/VOCdevkit/VOC2012/ 6 | LABEL_FILE=$CAFFEDATA_ROOT/label_map.txt 7 | 8 | # 2007 + 2012 trainval 9 | LIST_FILE=$CAFFEDATA_ROOT/trainval.txt 10 | LMDB_DIR=./lmdb/trainval_lmdb 11 | SHUFFLE=true 12 | 13 | # 2012 test 14 | #TESTLIST_FILE=$CAFFEDATA_ROOT/test_2007.txt 15 | #TESTLMDB_DIR=./lmdb/test2007_lmdb 16 | #SHUFFLE=false 17 | 18 | RESIZE_W=448 19 | RESIZE_H=448 20 | 21 | $CAFFE_ROOT/Build/x64/Debug/convert_box_data.exe --resize_width=$RESIZE_W --resize_height=$RESIZE_H \ 22 | --label_file=$LABEL_FILE $ROOT_DIR $LIST_FILE $LMDB_DIR --encoded=true --encode_type=jpg --shuffle=$SHUFFLE 23 | 24 | -------------------------------------------------------------------------------- /caffe-master/examples/yolov1/train/gnet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "D:/Code_local/caffe/caffe_yolo/gnet_train.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | test_iter: 4952 5 | # Carry out testing every 32000 training iterations. 6 | test_interval: 32000 7 | test_initialization: false 8 | display: 20 9 | average_loss: 100 10 | # The multifixed policy specifies that learning rate changed at a certain stage, lr at each stage is fixed. 11 | lr_policy: "multifixed" 12 | stagelr: 0.001 13 | stagelr: 0.01 14 | stagelr: 0.001 15 | stagelr: 0.0001 16 | stageiter: 520 17 | stageiter: 16000 18 | stageiter: 24000 19 | stageiter: 32000 20 | max_iter: 32000 21 | # momentum and the weight decay of the network. 22 | momentum: 0.9 23 | weight_decay: 0.0005 24 | snapshot: 2000 25 | snapshot_prefix: "D:/Code_local/caffe/caffe_yolo/models/gnet_yolo" 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/solver_factory.hpp" 17 | #include "caffe/util/benchmark.hpp" 18 | #include "caffe/util/io.hpp" 19 | #include "caffe/util/upgrade_proto.hpp" 20 | 21 | #endif // CAFFE_CAFFE_HPP_ 22 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/internal_thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_ 2 | #define CAFFE_INTERNAL_THREAD_HPP_ 3 | 4 | #include "caffe/common.hpp" 5 | 6 | /** 7 | Forward declare boost::thread instead of including boost/thread.hpp 8 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. 9 | */ 10 | namespace boost { class thread; } 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * Virtual class encapsulate boost::thread for use in base class 16 | * The child class will acquire the ability to run a single thread, 17 | * by reimplementing the virtual function InternalThreadEntry. 18 | */ 19 | class InternalThread { 20 | public: 21 | InternalThread() : thread_() {} 22 | virtual ~InternalThread(); 23 | 24 | /** 25 | * Caffe's thread local state will be initialized using the current 26 | * thread values, e.g. device id, solver index etc. The random seed 27 | * is initialized using caffe_rng_rand. 28 | */ 29 | void StartInternalThread(); 30 | 31 | /** Will not return until the internal thread has exited. */ 32 | void StopInternalThread(); 33 | 34 | bool is_started() const; 35 | 36 | protected: 37 | /* Implement this method in your subclass 38 | with the code you want your thread to run. */ 39 | virtual void InternalThreadEntry() {} 40 | 41 | /* Should be tested when running loops to exit when requested. */ 42 | bool must_stop(); 43 | 44 | private: 45 | void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count, 46 | bool root_solver); 47 | 48 | shared_ptr thread_; 49 | }; 50 | 51 | } // namespace caffe 52 | 53 | #endif // CAFFE_INTERNAL_THREAD_HPP_ 54 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/bias_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_BIAS_LAYER_HPP_ 2 | #define CAFFE_BIAS_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Computes a sum of two input Blobs, with the shape of the 14 | * latter Blob "broadcast" to match the shape of the former. 15 | * Equivalent to tiling the latter Blob, then computing the elementwise 16 | * sum. 17 | * 18 | * The second input may be omitted, in which case it's learned as a parameter 19 | * of the layer. 20 | */ 21 | template 22 | class BiasLayer : public Layer { 23 | public: 24 | explicit BiasLayer(const LayerParameter& param) 25 | : Layer(param) {} 26 | virtual void LayerSetUp(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "Bias"; } 32 | virtual inline int MinBottomBlobs() const { return 1; } 33 | virtual inline int MaxBottomBlobs() const { return 2; } 34 | virtual inline int ExactNumTopBlobs() const { return 1; } 35 | 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Forward_gpu(const vector*>& bottom, 39 | const vector*>& top); 40 | virtual void Backward_cpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom); 42 | virtual void Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, const vector*>& bottom); 44 | 45 | private: 46 | Blob bias_multiplier_; 47 | int outer_dim_, bias_dim_, inner_dim_, dim_; 48 | }; 49 | 50 | 51 | 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_BIAS_LAYER_HPP_ 55 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/box_annotator_ohem_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_BOX_ANNOTATOR_OHEM_LAYER_HPP_ 2 | #define CAFFE_BOX_ANNOTATOR_OHEM_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/proto/caffe.pb.h" 10 | 11 | #include "caffe/layers/loss_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | /** 16 | * @brief BoxAnnotatorOHEMLayer: Annotate box labels for Online Hard Example Mining (OHEM) training 17 | * R-FCN 18 | * Written by Yi Li 19 | */ 20 | template 21 | class BoxAnnotatorOHEMLayer :public Layer{ 22 | public: 23 | explicit BoxAnnotatorOHEMLayer(const LayerParameter& param) 24 | : Layer(param) {} 25 | virtual void LayerSetUp(const vector*>& bottom, 26 | const vector*>& top); 27 | virtual void Reshape(const vector*>& bottom, 28 | const vector*>& top); 29 | 30 | virtual inline const char* type() const { return "BoxAnnotatorOHEM"; } 31 | 32 | virtual inline int ExactNumBottomBlobs() const { return 4; } 33 | virtual inline int ExactNumTopBlobs() const { return 2; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Forward_gpu(const vector*>& bottom, 39 | const vector*>& top); 40 | virtual void Backward_cpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom); 42 | virtual void Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, const vector*>& bottom); 44 | 45 | int num_; 46 | int height_; 47 | int width_; 48 | int spatial_dim_; 49 | int bbox_channels_; 50 | 51 | int roi_per_img_; 52 | int ignore_label_; 53 | }; 54 | 55 | } // namespace caffe 56 | 57 | #endif // CAFFE_BOX_ANNOTATOR_OHEM_LAYER_HPP_ 58 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/box_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class BoxDataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit BoxDataLayer(const LayerParameter& param); 21 | virtual ~BoxDataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "BoxData"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 100; } 30 | 31 | void transform_label(int count, Dtype* top_label, const vector& box_labels, int side, int ver); 32 | 33 | protected: 34 | virtual void load_batch(Batch* batch); 35 | 36 | DataReader reader_; 37 | vector sides_; 38 | int yolo_version_flag_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_DATA_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_lcn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LCN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | #include "caffe/layers/power_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | template 17 | class CuDNNLCNLayer : public LRNLayer { 18 | public: 19 | explicit CuDNNLCNLayer(const LayerParameter& param) 20 | : LRNLayer(param), handles_setup_(false), tempDataSize(0), 21 | tempData1(NULL), tempData2(NULL) {} 22 | virtual void LayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual void Reshape(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual ~CuDNNLCNLayer(); 27 | 28 | protected: 29 | virtual void Forward_gpu(const vector*>& bottom, 30 | const vector*>& top); 31 | virtual void Backward_gpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | 34 | bool handles_setup_; 35 | cudnnHandle_t handle_; 36 | cudnnLRNDescriptor_t norm_desc_; 37 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 38 | 39 | int size_, pre_pad_; 40 | Dtype alpha_, beta_, k_; 41 | 42 | size_t tempDataSize; 43 | void *tempData1, *tempData2; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_LCN_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_lrn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LRN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | template 16 | class CuDNNLRNLayer : public LRNLayer { 17 | public: 18 | explicit CuDNNLRNLayer(const LayerParameter& param) 19 | : LRNLayer(param), handles_setup_(false) {} 20 | virtual void LayerSetUp(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual ~CuDNNLRNLayer(); 25 | 26 | protected: 27 | virtual void Forward_gpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | 32 | bool handles_setup_; 33 | cudnnHandle_t handle_; 34 | cudnnLRNDescriptor_t norm_desc_; 35 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 36 | 37 | int size_; 38 | Dtype alpha_, beta_, k_; 39 | }; 40 | #endif 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_CUDNN_LRN_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_pooling_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_ 2 | #define CAFFE_CUDNN_POOLING_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/pooling_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /* 16 | * @brief cuDNN implementation of PoolingLayer. 17 | * Fallback to PoolingLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNPoolingLayer : public PoolingLayer { 21 | public: 22 | explicit CuDNNPoolingLayer(const LayerParameter& param) 23 | : PoolingLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNPoolingLayer(); 29 | // Currently, cuDNN does not support the extra top blob. 30 | virtual inline int MinTopBlobs() const { return -1; } 31 | virtual inline int ExactNumTopBlobs() const { return 1; } 32 | 33 | protected: 34 | virtual void Forward_gpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | bool handles_setup_; 40 | cudnnHandle_t handle_; 41 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 42 | cudnnPoolingDescriptor_t pooling_desc_; 43 | cudnnPoolingMode_t mode_; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_POOLING_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_relu_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ 2 | #define CAFFE_CUDNN_RELU_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/relu_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of ReLULayer. 18 | */ 19 | template 20 | class CuDNNReLULayer : public ReLULayer { 21 | public: 22 | explicit CuDNNReLULayer(const LayerParameter& param) 23 | : ReLULayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNReLULayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_RELU_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_sigmoid_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of SigmoidLayer. 18 | */ 19 | template 20 | class CuDNNSigmoidLayer : public SigmoidLayer { 21 | public: 22 | explicit CuDNNSigmoidLayer(const LayerParameter& param) 23 | : SigmoidLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSigmoidLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/softmax_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /** 16 | * @brief cuDNN implementation of SoftmaxLayer. 17 | * Fallback to SoftmaxLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNSoftmaxLayer : public SoftmaxLayer { 21 | public: 22 | explicit CuDNNSoftmaxLayer(const LayerParameter& param) 23 | : SoftmaxLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSoftmaxLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/cudnn_tanh_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ 2 | #define CAFFE_CUDNN_TANH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/tanh_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of TanHLayer. 18 | */ 19 | template 20 | class CuDNNTanHLayer : public TanHLayer { 21 | public: 22 | explicit CuDNNTanHLayer(const LayerParameter& param) 23 | : TanHLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNTanHLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_TANH_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class DataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit DataLayer(const LayerParameter& param); 21 | virtual ~DataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "Data"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 2; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | }; 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_DATA_LAYER_HPP_ 40 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/detection_loss_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DETECTION_LOSS_LAYER_HPP_ 2 | #define CAFFE_DETECTION_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/loss_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | Dtype Overlap(Dtype x1, Dtype w1, Dtype x2, Dtype w2); 16 | template 17 | Dtype Calc_iou(const vector& box, const vector& truth); 18 | template 19 | Dtype Calc_rmse(const vector& box, const vector& truth); 20 | 21 | template 22 | class DetectionLossLayer : public LossLayer { 23 | public: 24 | explicit DetectionLossLayer(const LayerParameter& param) 25 | : LossLayer(param), diff_() {} 26 | virtual void LayerSetUp(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "DetectionLoss"; } 32 | 33 | protected: 34 | virtual void Forward_cpu(const vector*>& bottom, 35 | const vector*>& top); 36 | #ifdef CPU_ONLY 37 | virtual void Forward_gpu(const vector*>& bottom, 38 | const vector*>& top); 39 | #endif 40 | 41 | virtual void Backward_cpu(const vector*>& top, 42 | const vector& propagate_down, const vector*>& bottom); 43 | #ifdef CPU_ONLY 44 | virtual void Backward_gpu(const vector*>& top, 45 | const vector& propagate_down, const vector*>& bottom); 46 | #endif 47 | 48 | int side_; 49 | int num_class_; 50 | int num_object_; 51 | float object_scale_; 52 | float class_scale_; 53 | float noobject_scale_; 54 | float coord_scale_; 55 | bool sqrt_; 56 | bool constriant_; 57 | 58 | Blob diff_; 59 | }; 60 | 61 | } // namespace caffe 62 | 63 | #endif // CAFFE_DETECTION_LOSS_LAYER_HPP_ 64 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/dummy_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DUMMY_DATA_LAYER_HPP_ 2 | #define CAFFE_DUMMY_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/filler.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/proto/caffe.pb.h" 10 | 11 | namespace caffe { 12 | 13 | /** 14 | * @brief Provides data to the Net generated by a Filler. 15 | * 16 | * TODO(dox): thorough documentation for Forward and proto params. 17 | */ 18 | template 19 | class DummyDataLayer : public Layer { 20 | public: 21 | explicit DummyDataLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // Data layers should be shared by multiple solvers in parallel 26 | virtual inline bool ShareInParallel() const { return true; } 27 | // Data layers have no bottoms, so reshaping is trivial. 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top) {} 30 | 31 | virtual inline const char* type() const { return "DummyData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int MinTopBlobs() const { return 1; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) {} 40 | virtual void Backward_gpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom) {} 42 | 43 | vector > > fillers_; 44 | vector refill_; 45 | }; 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_DUMMY_DATA_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/eltwise_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_ELTWISE_LAYER_HPP_ 2 | #define CAFFE_ELTWISE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Compute elementwise operations, such as product and sum, 14 | * along multiple input Blobs. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class EltwiseLayer : public Layer { 20 | public: 21 | explicit EltwiseLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "Eltwise"; } 29 | virtual inline int MinBottomBlobs() const { return 2; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | EltwiseParameter_EltwiseOp op_; 43 | vector coeffs_; 44 | Blob max_idx_; 45 | 46 | bool stable_prod_grad_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_ELTWISE_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/embed_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_EMBED_LAYER_HPP_ 2 | #define CAFFE_EMBED_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief A layer for learning "embeddings" of one-hot vector input. 14 | * Equivalent to an InnerProductLayer with one-hot vectors as input, but 15 | * for efficiency the input is the "hot" index of each column itself. 16 | * 17 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 18 | */ 19 | template 20 | class EmbedLayer : public Layer { 21 | public: 22 | explicit EmbedLayer(const LayerParameter& param) 23 | : Layer(param) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | 29 | virtual inline const char* type() const { return "Embed"; } 30 | virtual inline int ExactNumBottomBlobs() const { return 1; } 31 | virtual inline int ExactNumTopBlobs() const { return 1; } 32 | 33 | protected: 34 | virtual void Forward_cpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Forward_gpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom); 40 | virtual void Backward_gpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom); 42 | 43 | int M_; 44 | int K_; 45 | int N_; 46 | bool bias_term_; 47 | Blob bias_multiplier_; 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_EMBED_LAYER_HPP_ 53 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/eval_detection_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_EVAL_DETECTION_LAYER_HPP_ 2 | #define CAFFE_EVAL_DETECTION_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | template 13 | class EvalDetectionLayer : public Layer { 14 | public: 15 | explicit EvalDetectionLayer(const LayerParameter& param) 16 | : Layer(param) {} 17 | virtual void LayerSetUp(const vector*>& bottom, 18 | const vector*>& top); 19 | virtual void Reshape(const vector*>& bottom, 20 | const vector*>& top); 21 | 22 | virtual inline const char* type() const { return "EvalDetection"; } 23 | 24 | protected: 25 | virtual void Forward_cpu(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual void Backward_cpu(const vector*>& top, 29 | const vector& propagate_down, const vector*>& bottom) { 30 | for (int i = 0; i < propagate_down.size(); ++i) { 31 | if (propagate_down[i]) { NOT_IMPLEMENTED; } 32 | } 33 | } 34 | 35 | int side_; 36 | int num_class_; 37 | int num_object_; 38 | float threshold_; 39 | bool sqrt_; 40 | bool constriant_; 41 | int score_type_; 42 | float nms_; 43 | }; 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_EVAL_DETECTION_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/image_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from image files. 19 | * 20 | * TODO(dox): thorough documentation for Forward and proto params. 21 | */ 22 | template 23 | class ImageDataLayer : public BasePrefetchingDataLayer { 24 | public: 25 | explicit ImageDataLayer(const LayerParameter& param) 26 | : BasePrefetchingDataLayer(param) {} 27 | virtual ~ImageDataLayer(); 28 | virtual void DataLayerSetUp(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "ImageData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int ExactNumTopBlobs() const { return 2; } 34 | 35 | protected: 36 | shared_ptr prefetch_rng_; 37 | virtual void ShuffleImages(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | vector > lines_; 41 | int lines_id_; 42 | }; 43 | 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/inner_product_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INNER_PRODUCT_LAYER_HPP_ 2 | #define CAFFE_INNER_PRODUCT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Also known as a "fully-connected" layer, computes an inner product 14 | * with a set of learned weights, and (optionally) adds biases. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class InnerProductLayer : public Layer { 20 | public: 21 | explicit InnerProductLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "InnerProduct"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 1; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | int M_; 43 | int K_; 44 | int N_; 45 | bool bias_term_; 46 | Blob bias_multiplier_; 47 | bool transpose_; ///< if true, assume transposed weights 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_INNER_PRODUCT_LAYER_HPP_ 53 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/input_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INPUT_LAYER_HPP_ 2 | #define CAFFE_INPUT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Provides data to the Net by assigning tops directly. 14 | * 15 | * This data layer is a container that merely holds the data assigned to it; 16 | * forward, backward, and reshape are all no-ops. 17 | */ 18 | template 19 | class InputLayer : public Layer { 20 | public: 21 | explicit InputLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // Data layers should be shared by multiple solvers in parallel 26 | virtual inline bool ShareInParallel() const { return true; } 27 | // Data layers have no bottoms, so reshaping is trivial. 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top) {} 30 | 31 | virtual inline const char* type() const { return "Input"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int MinTopBlobs() const { return 1; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top) {} 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) {} 40 | }; 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_INPUT_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/loss_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_LOSS_LAYER_HPP_ 2 | #define CAFFE_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | const float kLOG_THRESHOLD = 1e-20; 13 | 14 | /** 15 | * @brief An interface for Layer%s that take two Blob%s as input -- usually 16 | * (1) predictions and (2) ground-truth labels -- and output a 17 | * singleton Blob representing the loss. 18 | * 19 | * LossLayers are typically only capable of backpropagating to their first input 20 | * -- the predictions. 21 | */ 22 | template 23 | class LossLayer : public Layer { 24 | public: 25 | explicit LossLayer(const LayerParameter& param) 26 | : Layer(param) {} 27 | virtual void LayerSetUp( 28 | const vector*>& bottom, const vector*>& top); 29 | virtual void Reshape( 30 | const vector*>& bottom, const vector*>& top); 31 | 32 | virtual inline int ExactNumBottomBlobs() const { return 2; } 33 | 34 | /** 35 | * @brief For convenience and backwards compatibility, instruct the Net to 36 | * automatically allocate a single top Blob for LossLayers, into which 37 | * they output their singleton loss, (even if the user didn't specify 38 | * one in the prototxt, etc.). 39 | */ 40 | virtual inline bool AutoTopBlobs() const { return true; } 41 | virtual inline int ExactNumTopBlobs() const { return 1; } 42 | /** 43 | * We usually cannot backpropagate to the labels; ignore force_backward for 44 | * these inputs. 45 | */ 46 | virtual inline bool AllowForceBackward(const int bottom_index) const { 47 | return bottom_index != 1; 48 | } 49 | }; 50 | 51 | } // namespace caffe 52 | 53 | #endif // CAFFE_LOSS_LAYER_HPP_ 54 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/memory_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MEMORY_DATA_LAYER_HPP_ 2 | #define CAFFE_MEMORY_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/base_data_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * @brief Provides data to the Net from memory. 16 | * 17 | * TODO(dox): thorough documentation for Forward and proto params. 18 | */ 19 | template 20 | class MemoryDataLayer : public BaseDataLayer { 21 | public: 22 | explicit MemoryDataLayer(const LayerParameter& param) 23 | : BaseDataLayer(param), has_new_data_(false) {} 24 | virtual void DataLayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | 27 | virtual inline const char* type() const { return "MemoryData"; } 28 | virtual inline int ExactNumBottomBlobs() const { return 0; } 29 | virtual inline int ExactNumTopBlobs() const { return 2; } 30 | 31 | virtual void AddDatumVector(const vector& datum_vector); 32 | #ifdef USE_OPENCV 33 | virtual void AddMatVector(const vector& mat_vector, 34 | const vector& labels); 35 | #endif // USE_OPENCV 36 | 37 | // Reset should accept const pointers, but can't, because the memory 38 | // will be given to Blob, which is mutable 39 | void Reset(Dtype* data, Dtype* label, int n); 40 | void set_batch_size(int new_size); 41 | 42 | int batch_size() { return batch_size_; } 43 | int channels() { return channels_; } 44 | int height() { return height_; } 45 | int width() { return width_; } 46 | 47 | protected: 48 | virtual void Forward_cpu(const vector*>& bottom, 49 | const vector*>& top); 50 | 51 | int batch_size_, channels_, height_, width_, size_; 52 | Dtype* data_; 53 | Dtype* labels_; 54 | int n_; 55 | size_t pos_; 56 | Blob added_data_; 57 | Blob added_label_; 58 | bool has_new_data_; 59 | }; 60 | 61 | } // namespace caffe 62 | 63 | #endif // CAFFE_MEMORY_DATA_LAYER_HPP_ 64 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/mvn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MVN_LAYER_HPP_ 2 | #define CAFFE_MVN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Normalizes the input to have 0-mean and/or unit (1) variance. 14 | * 15 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class MVNLayer : public Layer { 19 | public: 20 | explicit MVNLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | 25 | virtual inline const char* type() const { return "MVN"; } 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | 29 | protected: 30 | virtual void Forward_cpu(const vector*>& bottom, 31 | const vector*>& top); 32 | virtual void Forward_gpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | Blob mean_, variance_, temp_; 40 | 41 | /// sum_multiplier is used to carry out sum using BLAS 42 | Blob sum_multiplier_; 43 | Dtype eps_; 44 | }; 45 | 46 | } // namespace caffe 47 | 48 | #endif // CAFFE_MVN_LAYER_HPP_ 49 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/parameter_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_PARAMETER_LAYER_HPP_ 2 | #define CAFFE_PARAMETER_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | class ParameterLayer : public Layer { 12 | public: 13 | explicit ParameterLayer(const LayerParameter& param) 14 | : Layer(param) {} 15 | virtual void LayerSetUp(const vector*>& bottom, 16 | const vector*>& top) { 17 | if (this->blobs_.size() > 0) { 18 | LOG(INFO) << "Skipping parameter initialization"; 19 | } else { 20 | this->blobs_.resize(1); 21 | this->blobs_[0].reset(new Blob()); 22 | this->blobs_[0]->Reshape(this->layer_param_.parameter_param().shape()); 23 | } 24 | top[0]->Reshape(this->layer_param_.parameter_param().shape()); 25 | } 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top) { } 28 | virtual inline const char* type() const { return "Parameter"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 0; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top) { 35 | top[0]->ShareData(*(this->blobs_[0])); 36 | top[0]->ShareDiff(*(this->blobs_[0])); 37 | } 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) 40 | { } 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/python_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_PYTHON_LAYER_HPP_ 2 | #define CAFFE_PYTHON_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | 9 | namespace bp = boost::python; 10 | 11 | namespace caffe { 12 | 13 | template 14 | class PythonLayer : public Layer { 15 | public: 16 | PythonLayer(PyObject* self, const LayerParameter& param) 17 | : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } 18 | 19 | virtual void LayerSetUp(const vector*>& bottom, 20 | const vector*>& top) { 21 | // Disallow PythonLayer in MultiGPU training stage, due to GIL issues 22 | // Details: https://github.com/BVLC/caffe/issues/2936 23 | if (this->phase_ == TRAIN && Caffe::solver_count() > 1 24 | && !ShareInParallel()) { 25 | LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training"; 26 | } 27 | self_.attr("param_str") = bp::str( 28 | this->layer_param_.python_param().param_str()); 29 | self_.attr("phase") = static_cast(this->phase_); 30 | self_.attr("setup")(bottom, top); 31 | } 32 | virtual void Reshape(const vector*>& bottom, 33 | const vector*>& top) { 34 | self_.attr("reshape")(bottom, top); 35 | } 36 | 37 | virtual inline bool ShareInParallel() const { 38 | return this->layer_param_.python_param().share_in_parallel(); 39 | } 40 | 41 | virtual inline const char* type() const { return "Python"; } 42 | 43 | protected: 44 | virtual void Forward_cpu(const vector*>& bottom, 45 | const vector*>& top) { 46 | self_.attr("forward")(bottom, top); 47 | } 48 | virtual void Backward_cpu(const vector*>& top, 49 | const vector& propagate_down, const vector*>& bottom) { 50 | self_.attr("backward")(top, propagate_down, bottom); 51 | } 52 | 53 | private: 54 | bp::object self_; 55 | }; 56 | 57 | } // namespace caffe 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/reshape_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_XXX_LAYER_HPP_ 2 | #define CAFFE_XXX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /* 13 | * @brief Reshapes the input Blob into an arbitrary-sized output Blob. 14 | * 15 | * Note: similarly to FlattenLayer, this layer does not change the input values 16 | * (see FlattenLayer, Blob::ShareData and Blob::ShareDiff). 17 | */ 18 | template 19 | class ReshapeLayer : public Layer { 20 | public: 21 | explicit ReshapeLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "Reshape"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 1; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top) {} 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom) {} 37 | virtual void Forward_gpu(const vector*>& bottom, 38 | const vector*>& top) {} 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom) {} 41 | 42 | /// @brief vector of axes indices whose dimensions we'll copy from the bottom 43 | vector copy_axes_; 44 | /// @brief the index of the axis whose dimension we infer, or -1 if none 45 | int inferred_axis_; 46 | /// @brief the product of the "constant" output dimensions 47 | int constant_count_; 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_XXX_LAYER_HPP_ 53 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/rnn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNN_LAYER_HPP_ 2 | #define CAFFE_RNN_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/recurrent_layer.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | template class RecurrentLayer; 18 | 19 | /** 20 | * @brief Processes time-varying inputs using a simple recurrent neural network 21 | * (RNN). Implemented as a network unrolling the RNN computation in time. 22 | * 23 | * Given time-varying inputs @f$ x_t @f$, computes hidden state @f$ 24 | * h_t := \tanh[ W_{hh} h_{t_1} + W_{xh} x_t + b_h ] 25 | * @f$, and outputs @f$ 26 | * o_t := \tanh[ W_{ho} h_t + b_o ] 27 | * @f$. 28 | */ 29 | template 30 | class RNNLayer : public RecurrentLayer { 31 | public: 32 | explicit RNNLayer(const LayerParameter& param) 33 | : RecurrentLayer(param) {} 34 | 35 | virtual inline const char* type() const { return "RNN"; } 36 | 37 | protected: 38 | virtual void FillUnrolledNet(NetParameter* net_param) const; 39 | virtual void RecurrentInputBlobNames(vector* names) const; 40 | virtual void RecurrentOutputBlobNames(vector* names) const; 41 | virtual void RecurrentInputShapes(vector* shapes) const; 42 | virtual void OutputBlobNames(vector* names) const; 43 | }; 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_RNN_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/silence_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SILENCE_LAYER_HPP_ 2 | #define CAFFE_SILENCE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Ignores bottom blobs while producing no top blobs. (This is useful 14 | * to suppress outputs during testing.) 15 | */ 16 | template 17 | class SilenceLayer : public Layer { 18 | public: 19 | explicit SilenceLayer(const LayerParameter& param) 20 | : Layer(param) {} 21 | virtual void Reshape(const vector*>& bottom, 22 | const vector*>& top) {} 23 | 24 | virtual inline const char* type() const { return "Silence"; } 25 | virtual inline int MinBottomBlobs() const { return 1; } 26 | virtual inline int ExactNumTopBlobs() const { return 0; } 27 | 28 | protected: 29 | virtual void Forward_cpu(const vector*>& bottom, 30 | const vector*>& top) {} 31 | // We can't define Forward_gpu here, since STUB_GPU will provide 32 | // its own definition for CPU_ONLY mode. 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_SILENCE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/slice_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SLICE_LAYER_HPP_ 2 | #define CAFFE_SLICE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Takes a Blob and slices it along either the num or channel dimension, 14 | * outputting multiple sliced Blob results. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class SliceLayer : public Layer { 20 | public: 21 | explicit SliceLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "Slice"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 1; } 30 | virtual inline int MinTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | int count_; 43 | int num_slices_; 44 | int slice_size_; 45 | int slice_axis_; 46 | vector slice_point_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_SLICE_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Computes the softmax function. 14 | * 15 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class SoftmaxLayer : public Layer { 19 | public: 20 | explicit SoftmaxLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | 25 | virtual inline const char* type() const { return "Softmax"; } 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | 29 | protected: 30 | virtual void Forward_cpu(const vector*>& bottom, 31 | const vector*>& top); 32 | virtual void Forward_gpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | int outer_num_; 40 | int inner_num_; 41 | int softmax_axis_; 42 | /// sum_multiplier is used to carry out sum using BLAS 43 | Blob sum_multiplier_; 44 | /// scale is an intermediate Blob to hold temporary results. 45 | Blob scale_; 46 | }; 47 | 48 | } // namespace caffe 49 | 50 | #endif // CAFFE_SOFTMAX_LAYER_HPP_ 51 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/split_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SPLIT_LAYER_HPP_ 2 | #define CAFFE_SPLIT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Creates a "split" path in the network by copying the bottom Blob 14 | * into multiple top Blob%s to be used by multiple consuming layers. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class SplitLayer : public Layer { 20 | public: 21 | explicit SplitLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline const char* type() const { return "Split"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 1; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | 30 | protected: 31 | virtual void Forward_cpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | 40 | int count_; 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_SPLIT_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/tile_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_TILE_LAYER_HPP_ 2 | #define CAFFE_TILE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Copy a Blob along specified dimensions. 14 | */ 15 | template 16 | class TileLayer : public Layer { 17 | public: 18 | explicit TileLayer(const LayerParameter& param) 19 | : Layer(param) {} 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | 23 | virtual inline const char* type() const { return "Tile"; } 24 | virtual inline int ExactNumBottomBlobs() const { return 1; } 25 | virtual inline int ExactNumTopBlobs() const { return 1; } 26 | 27 | protected: 28 | virtual void Forward_cpu(const vector*>& bottom, 29 | const vector*>& top); 30 | virtual void Forward_gpu(const vector*>& bottom, 31 | const vector*>& top); 32 | 33 | virtual void Backward_cpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | virtual void Backward_gpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | 38 | unsigned int axis_, tiles_, outer_dim_, inner_dim_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_TILE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/window_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_WINDOW_DATA_LAYER_HPP_ 2 | #define CAFFE_WINDOW_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from windows of images files, specified 19 | * by a window data file. 20 | * 21 | * TODO(dox): thorough documentation for Forward and proto params. 22 | */ 23 | template 24 | class WindowDataLayer : public BasePrefetchingDataLayer { 25 | public: 26 | explicit WindowDataLayer(const LayerParameter& param) 27 | : BasePrefetchingDataLayer(param) {} 28 | virtual ~WindowDataLayer(); 29 | virtual void DataLayerSetUp(const vector*>& bottom, 30 | const vector*>& top); 31 | 32 | virtual inline const char* type() const { return "WindowData"; } 33 | virtual inline int ExactNumBottomBlobs() const { return 0; } 34 | virtual inline int ExactNumTopBlobs() const { return 2; } 35 | 36 | protected: 37 | virtual unsigned int PrefetchRand(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | shared_ptr prefetch_rng_; 41 | vector > > image_database_; 42 | enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; 43 | vector > fg_windows_; 44 | vector > bg_windows_; 45 | Blob data_mean_; 46 | vector mean_values_; 47 | bool has_mean_file_; 48 | bool has_mean_values_; 49 | bool cache_images_; 50 | vector > image_database_cache_; 51 | }; 52 | 53 | } // namespace caffe 54 | 55 | #endif // CAFFE_WINDOW_DATA_LAYER_HPP_ 56 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/yolov3DetectResult_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_YOLOV3_DETECTRESULT_LAYER_HPP_ 2 | #define CAFFE_YOLOV3_DETECTRESULT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | template 13 | class Yolov3DetectResultLayer : public Layer < Dtype > { 14 | public: 15 | explicit Yolov3DetectResultLayer(const LayerParameter& param) 16 | : Layer(param) {} 17 | virtual void LayerSetUp(const vector*>& bottom, 18 | const vector*>& top); 19 | virtual void Reshape(const vector*>& bottom, 20 | const vector*>& top); 21 | 22 | virtual inline const char* type() const { return "EvalDetection"; } 23 | 24 | protected: 25 | virtual void Forward_cpu(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual void Backward_cpu(const vector*>& top, 29 | const vector& propagate_down, const vector*>& bottom) { 30 | for (int i = 0; i < propagate_down.size(); ++i) { 31 | if (propagate_down[i]) { NOT_IMPLEMENTED; } 32 | } 33 | } 34 | int side_; 35 | int num_class_; 36 | int num_object_; 37 | int coords_; 38 | float obj_threshold_; 39 | float nms_threshold_; 40 | int score_type_; 41 | float nms_; 42 | 43 | int max_boxes_; 44 | vector biases_; 45 | vector masks_; 46 | 47 | Blob diff_; 48 | Blob real_diff_; 49 | 50 | string class_map_; 51 | map cls_map_; 52 | }; 53 | 54 | } // namespace caffe 55 | 56 | #endif // CAFFE_YOLOV3_DETECTION_LAYER_HPP_ -------------------------------------------------------------------------------- /caffe-master/include/caffe/layers/yolov3Detection_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_YOLOV3_DETECTION_LAYER_HPP_ 2 | #define CAFFE_YOLOV3_DETECTION_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | template 13 | class Yolov3DetectLayer : public Layer < Dtype > { 14 | public: 15 | explicit Yolov3DetectLayer(const LayerParameter& param) 16 | : Layer(param) {} 17 | virtual void LayerSetUp(const vector*>& bottom, 18 | const vector*>& top); 19 | virtual void Reshape(const vector*>& bottom, 20 | const vector*>& top); 21 | 22 | virtual inline const char* type() const { return "EvalDetection"; } 23 | 24 | protected: 25 | virtual void Forward_cpu(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual void Backward_cpu(const vector*>& top, 29 | const vector& propagate_down, const vector*>& bottom) { 30 | for (int i = 0; i < propagate_down.size(); ++i) { 31 | if (propagate_down[i]) { NOT_IMPLEMENTED; } 32 | } 33 | } 34 | int side_; 35 | int num_class_; 36 | int num_object_; 37 | int coords_; 38 | float obj_threshold_; 39 | float nms_threshold_; 40 | int score_type_; 41 | float nms_; 42 | 43 | int max_boxes_; 44 | vector biases_; 45 | vector masks_; 46 | 47 | Blob diff_; 48 | Blob real_diff_; 49 | 50 | string class_map_; 51 | map cls_map_; 52 | }; 53 | 54 | } // namespace caffe 55 | 56 | #endif // CAFFE_YOLOV3_DETECTION_LAYER_HPP_ -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /caffe-master/include/caffe/util/tree.hpp: -------------------------------------------------------------------------------- 1 | #ifndef TREE_H 2 | #define TREE_H 3 | #include 4 | #include 5 | 6 | using std::vector; 7 | using std::string; 8 | 9 | class tree 10 | { 11 | public: 12 | tree(); 13 | tree(string filename); 14 | 15 | vector leaf; 16 | int n; 17 | vector parent; 18 | vector child; 19 | vector group; 20 | vector name; 21 | 22 | int groups; 23 | vector group_size; 24 | vector group_offset; 25 | }; 26 | 27 | 28 | #endif // TREE_H 29 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | set_target_properties(caffe PROPERTIES 24 | VERSION ${CAFFE_TARGET_VERSION} 25 | SOVERSION ${CAFFE_TARGET_SOVERSION} 26 | ) 27 | 28 | # ---[ Tests 29 | add_subdirectory(test) 30 | 31 | # ---[ Install 32 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 33 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 34 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 35 | 36 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 37 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 38 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 39 | 40 | 41 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | InternalThread::~InternalThread() { 10 | StopInternalThread(); 11 | } 12 | 13 | bool InternalThread::is_started() const { 14 | return thread_ && thread_->joinable(); 15 | } 16 | 17 | bool InternalThread::must_stop() { 18 | return thread_ && thread_->interruption_requested(); 19 | } 20 | 21 | void InternalThread::StartInternalThread() { 22 | CHECK(!is_started()) << "Threads should persist and not be restarted."; 23 | 24 | int device = 0; 25 | #ifndef CPU_ONLY 26 | CUDA_CHECK(cudaGetDevice(&device)); 27 | #endif 28 | Caffe::Brew mode = Caffe::mode(); 29 | int rand_seed = caffe_rng_rand(); 30 | int solver_count = Caffe::solver_count(); 31 | bool root_solver = Caffe::root_solver(); 32 | 33 | try { 34 | thread_.reset(new boost::thread(&InternalThread::entry, this, device, mode, 35 | rand_seed, solver_count, root_solver)); 36 | } catch (std::exception& e) { 37 | LOG(FATAL) << "Thread exception: " << e.what(); 38 | } 39 | } 40 | 41 | void InternalThread::entry(int device, Caffe::Brew mode, int rand_seed, 42 | int solver_count, bool root_solver) { 43 | #ifndef CPU_ONLY 44 | CUDA_CHECK(cudaSetDevice(device)); 45 | #endif 46 | Caffe::set_mode(mode); 47 | Caffe::set_random_seed(rand_seed); 48 | Caffe::set_solver_count(solver_count); 49 | Caffe::set_root_solver(root_solver); 50 | 51 | InternalThreadEntry(); 52 | } 53 | 54 | void InternalThread::StopInternalThread() { 55 | if (is_started()) { 56 | thread_->interrupt(); 57 | try { 58 | thread_->join(); 59 | } catch (boost::thread_interrupted&) { 60 | } catch (std::exception& e) { 61 | LOG(FATAL) << "Thread exception: " << e.what(); 62 | } 63 | } 64 | } 65 | 66 | } // namespace caffe 67 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void Layer::InitMutex() { 8 | forward_mutex_.reset(new boost::mutex()); 9 | } 10 | 11 | template 12 | void Layer::Lock() { 13 | if (IsShared()) { 14 | forward_mutex_->lock(); 15 | } 16 | } 17 | 18 | template 19 | void Layer::Unlock() { 20 | if (IsShared()) { 21 | forward_mutex_->unlock(); 22 | } 23 | } 24 | 25 | INSTANTIATE_CLASS(Layer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | NeuronLayer::LayerSetUp(bottom, top); 12 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 13 | "allow in-place computation."; 14 | } 15 | 16 | template 17 | void AbsValLayer::Forward_cpu( 18 | const vector*>& bottom, const vector*>& top) { 19 | const int count = top[0]->count(); 20 | Dtype* top_data = top[0]->mutable_cpu_data(); 21 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 22 | } 23 | 24 | template 25 | void AbsValLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const int count = top[0]->count(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | if (propagate_down[0]) { 30 | const Dtype* bottom_data = bottom[0]->cpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | caffe_cpu_sign(count, bottom_data, bottom_diff); 33 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 34 | } 35 | } 36 | 37 | #ifdef CPU_ONLY 38 | STUB_GPU(AbsValLayer); 39 | #endif 40 | 41 | INSTANTIATE_CLASS(AbsValLayer); 42 | REGISTER_LAYER_CLASS(AbsVal); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/base_data_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); 11 | // Reshape to loaded data. 12 | top[0]->ReshapeLike(batch->data_); 13 | // Copy the data 14 | caffe_copy(batch->data_.count(), batch->data_.gpu_data(), 15 | top[0]->mutable_gpu_data()); 16 | if (this->output_labels_) { 17 | // Reshape to loaded labels. 18 | top[1]->ReshapeLike(batch->label_); 19 | // Copy the labels. 20 | caffe_copy(batch->label_.count(), batch->label_.gpu_data(), 21 | top[1]->mutable_gpu_data()); 22 | } 23 | // Ensure the copy is synchronous wrt the host, so that the next batch isn't 24 | // copied in meanwhile. 25 | CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); 26 | prefetch_free_.push(batch); 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/bnll_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | const float kBNLL_THRESHOLD = 50.; 9 | 10 | template 11 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = bottom_data[i] > 0 ? 18 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 19 | log(1. + exp(bottom_data[i])); 20 | } 21 | } 22 | 23 | template 24 | void BNLLLayer::Backward_cpu(const vector*>& top, 25 | const vector& propagate_down, 26 | const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* bottom_data = bottom[0]->cpu_data(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 31 | const int count = bottom[0]->count(); 32 | Dtype expval; 33 | for (int i = 0; i < count; ++i) { 34 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 35 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(BNLLLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(BNLLLayer); 45 | REGISTER_LAYER_CLASS(BNLL); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/bnll_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/bnll_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | __constant__ float kBNLL_THRESHOLD = 50.; 9 | 10 | template 11 | __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = in[index] > 0 ? 14 | in[index] + log(1. + exp(-in[index])) : 15 | log(1. + exp(in[index])); 16 | } 17 | } 18 | 19 | template 20 | void BNLLLayer::Forward_gpu(const vector*>& bottom, 21 | const vector*>& top) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* top_data = top[0]->mutable_gpu_data(); 24 | const int count = bottom[0]->count(); 25 | // NOLINT_NEXT_LINE(whitespace/operators) 26 | BNLLForward<<>>( 27 | count, bottom_data, top_data); 28 | CUDA_POST_KERNEL_CHECK; 29 | } 30 | 31 | template 32 | __global__ void BNLLBackward(const int n, const Dtype* in_diff, 33 | const Dtype* in_data, Dtype* out_diff) { 34 | CUDA_KERNEL_LOOP(index, n) { 35 | Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); 36 | out_diff[index] = in_diff[index] * expval / (expval + 1.); 37 | } 38 | } 39 | 40 | template 41 | void BNLLLayer::Backward_gpu(const vector*>& top, 42 | const vector& propagate_down, 43 | const vector*>& bottom) { 44 | if (propagate_down[0]) { 45 | const Dtype* bottom_data = bottom[0]->gpu_data(); 46 | const Dtype* top_diff = top[0]->gpu_diff(); 47 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 48 | const int count = bottom[0]->count(); 49 | // NOLINT_NEXT_LINE(whitespace/operators) 50 | BNLLBackward<<>>( 51 | count, top_diff, bottom_data, bottom_diff); 52 | CUDA_POST_KERNEL_CHECK; 53 | } 54 | } 55 | 56 | INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); 57 | 58 | 59 | } // namespace caffe 60 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_lcn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lcn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLCNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnDivisiveNormalizationForward( 15 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | NULL, // srcMeansData 19 | this->tempData1, this->tempData2, 20 | cudnn::dataType::zero, 21 | top_desc_, top_data) ); 22 | } 23 | 24 | template 25 | void CuDNNLCNLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const Dtype* top_diff = top[0]->gpu_diff(); 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnDivisiveNormalizationBackward( 33 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 34 | cudnn::dataType::one, 35 | bottom_desc_, bottom_data, 36 | NULL, top_diff, // NULL - srcMeansData 37 | this->tempData1, this->tempData2, 38 | cudnn::dataType::zero, 39 | bottom_desc_, bottom_diff, 40 | NULL) ); 41 | } 42 | 43 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLCNLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_lrn_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | LRNLayer::LayerSetUp(bottom, top); 12 | 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | CUDNN_CHECK(cudnnCreateLRNDescriptor(&norm_desc_)); 15 | cudnn::createTensor4dDesc(&bottom_desc_); 16 | cudnn::createTensor4dDesc(&top_desc_); 17 | 18 | // create a LRN handle 19 | handles_setup_ = true; 20 | 21 | size_ = this->layer_param().lrn_param().local_size(); 22 | alpha_ = this->layer_param().lrn_param().alpha(); 23 | beta_ = this->layer_param().lrn_param().beta(); 24 | k_ = this->layer_param().lrn_param().k(); 25 | } 26 | 27 | template 28 | void CuDNNLRNLayer::Reshape(const vector*>& bottom, 29 | const vector*>& top) { 30 | LRNLayer::Reshape(bottom, top); 31 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 32 | this->channels_, this->height_, this->width_); 33 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 34 | this->channels_, this->height_, this->width_); 35 | CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); 36 | } 37 | 38 | template 39 | CuDNNLRNLayer::~CuDNNLRNLayer() { 40 | // Check that handles have been setup before destroying. 41 | if (!handles_setup_) { return; } 42 | 43 | cudnnDestroyTensorDescriptor(bottom_desc_); 44 | cudnnDestroyTensorDescriptor(top_desc_); 45 | 46 | // destroy LRN handle 47 | cudnnDestroy(handle_); 48 | } 49 | 50 | INSTANTIATE_CLASS(CuDNNLRNLayer); 51 | 52 | } // namespace caffe 53 | #endif 54 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_lrn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnLRNCrossChannelForward( 15 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | cudnn::dataType::zero, 19 | top_desc_, top_data) ); 20 | } 21 | 22 | template 23 | void CuDNNLRNLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, const vector*>& bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | const Dtype* top_data = top[0]->gpu_data(); 27 | const Dtype* bottom_data = bottom[0]->gpu_data(); 28 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 29 | 30 | CUDNN_CHECK(cudnnLRNCrossChannelBackward( 31 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 32 | cudnn::dataType::one, 33 | top_desc_, top_data, 34 | top_desc_, top_diff, 35 | bottom_desc_, bottom_data, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff) ); 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLRNLayer); 41 | 42 | }; // namespace caffe 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | PoolingLayer::LayerSetUp(bottom, top); 12 | CUDNN_CHECK(cudnnCreate(&handle_)); 13 | cudnn::createTensor4dDesc(&bottom_desc_); 14 | cudnn::createTensor4dDesc(&top_desc_); 15 | cudnn::createPoolingDesc(&pooling_desc_, 16 | this->layer_param_.pooling_param().pool(), &mode_, 17 | this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, 18 | this->stride_h_, this->stride_w_); 19 | handles_setup_ = true; 20 | } 21 | 22 | template 23 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom, 24 | const vector*>& top) { 25 | PoolingLayer::Reshape(bottom, top); 26 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 27 | this->channels_, this->height_, this->width_); 28 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 29 | this->channels_, this->pooled_height_, this->pooled_width_); 30 | } 31 | 32 | template 33 | CuDNNPoolingLayer::~CuDNNPoolingLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(bottom_desc_); 38 | cudnnDestroyTensorDescriptor(top_desc_); 39 | cudnnDestroyPoolingDescriptor(pooling_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 14 | cudnn::dataType::one, 15 | bottom_desc_, bottom_data, 16 | cudnn::dataType::zero, 17 | top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, const vector*>& bottom) { 23 | if (!propagate_down[0]) { 24 | return; 25 | } 26 | const Dtype* top_diff = top[0]->gpu_diff(); 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* bottom_data = bottom[0]->gpu_data(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 31 | cudnn::dataType::one, 32 | top_desc_, top_data, top_desc_, top_diff, 33 | bottom_desc_, bottom_data, 34 | cudnn::dataType::zero, 35 | bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | ReLULayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_RELU); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | ReLULayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNReLULayer::~CuDNNReLULayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroy(this->handle_); 40 | } 41 | 42 | INSTANTIATE_CLASS(CuDNNReLULayer); 43 | 44 | } // namespace caffe 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | SigmoidLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, 17 | CUDNN_ACTIVATION_SIGMOID); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SigmoidLayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensorDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | SoftmaxLayer::LayerSetUp(bottom, top); 14 | // Initialize CUDNN. 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SoftmaxLayer::Reshape(bottom, top); 25 | int N = this->outer_num_; 26 | int K = bottom[0]->shape(this->softmax_axis_); 27 | int H = this->inner_num_; 28 | int W = 1; 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(bottom_desc_); 39 | cudnnDestroyTensorDescriptor(top_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_softmax_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, 16 | CUDNN_SOFTMAX_MODE_CHANNEL, 17 | cudnn::dataType::one, 18 | bottom_desc_, bottom_data, 19 | cudnn::dataType::zero, 20 | top_desc_, top_data)); 21 | } 22 | 23 | template 24 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | if (propagate_down[0]) { 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* top_diff = top[0]->gpu_diff(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, 33 | CUDNN_SOFTMAX_MODE_CHANNEL, 34 | cudnn::dataType::one, 35 | top_desc_, top_data, top_desc_, top_diff, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff)); 38 | } 39 | } 40 | 41 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSoftmaxLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_tanh_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | TanHLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_TANH); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | TanHLayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNTanHLayer::~CuDNNTanHLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroy(this->handle_); 40 | } 41 | 42 | INSTANTIATE_CLASS(CuDNNTanHLayer); 43 | 44 | } // namespace caffe 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/elu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/elu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ELULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype alpha = this->layer_param_.elu_param().alpha(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + alpha * (exp(std::min(bottom_data[i], Dtype(0))) - Dtype(1)); 18 | } 19 | } 20 | 21 | template 22 | void ELULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_data = top[0]->cpu_data(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 30 | const int count = bottom[0]->count(); 31 | Dtype alpha = this->layer_param_.elu_param().alpha(); 32 | for (int i = 0; i < count; ++i) { 33 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 34 | + (alpha + top_data[i]) * (bottom_data[i] <= 0)); 35 | } 36 | } 37 | } 38 | 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(ELULayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(ELULayer); 45 | REGISTER_LAYER_CLASS(ELU); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/euclidean_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/euclidean_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void EuclideanLossLayer::Reshape( 10 | const vector*>& bottom, const vector*>& top) { 11 | LossLayer::Reshape(bottom, top); 12 | CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) 13 | << "Inputs must have the same dimension."; 14 | diff_.ReshapeLike(*bottom[0]); 15 | } 16 | 17 | template 18 | void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | int count = bottom[0]->count(); 21 | caffe_sub( 22 | count, 23 | bottom[0]->cpu_data(), 24 | bottom[1]->cpu_data(), 25 | diff_.mutable_cpu_data()); 26 | Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); 27 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 28 | top[0]->mutable_cpu_data()[0] = loss; 29 | } 30 | 31 | template 32 | void EuclideanLossLayer::Backward_cpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | for (int i = 0; i < 2; ++i) { 35 | if (propagate_down[i]) { 36 | const Dtype sign = (i == 0) ? 1 : -1; 37 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 38 | caffe_cpu_axpby( 39 | bottom[i]->count(), // count 40 | alpha, // alpha 41 | diff_.cpu_data(), // a 42 | Dtype(0), // beta 43 | bottom[i]->mutable_cpu_diff()); // b 44 | } 45 | } 46 | } 47 | 48 | #ifdef CPU_ONLY 49 | STUB_GPU(EuclideanLossLayer); 50 | #endif 51 | 52 | INSTANTIATE_CLASS(EuclideanLossLayer); 53 | REGISTER_LAYER_CLASS(EuclideanLoss); 54 | 55 | } // namespace caffe 56 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/euclidean_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/euclidean_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | int count = bottom[0]->count(); 12 | caffe_gpu_sub( 13 | count, 14 | bottom[0]->gpu_data(), 15 | bottom[1]->gpu_data(), 16 | diff_.mutable_gpu_data()); 17 | Dtype dot; 18 | caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); 19 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 20 | top[0]->mutable_cpu_data()[0] = loss; 21 | } 22 | 23 | template 24 | void EuclideanLossLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | for (int i = 0; i < 2; ++i) { 27 | if (propagate_down[i]) { 28 | const Dtype sign = (i == 0) ? 1 : -1; 29 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 30 | caffe_gpu_axpby( 31 | bottom[i]->count(), // count 32 | alpha, // alpha 33 | diff_.gpu_data(), // a 34 | Dtype(0), // beta 35 | bottom[i]->mutable_gpu_diff()); // b 36 | } 37 | } 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer); 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/exp_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ExpLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | const Dtype* bottom_data = bottom[0]->gpu_data(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | if (inner_scale_ == Dtype(1)) { 15 | caffe_gpu_exp(count, bottom_data, top_data); 16 | } else { 17 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 18 | caffe_gpu_exp(count, top_data, top_data); 19 | } 20 | if (outer_scale_ != Dtype(1)) { 21 | caffe_gpu_scal(count, outer_scale_, top_data); 22 | } 23 | } 24 | 25 | template 26 | void ExpLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | if (!propagate_down[0]) { return; } 29 | const int count = bottom[0]->count(); 30 | const Dtype* top_data = top[0]->gpu_data(); 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 33 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 34 | if (inner_scale_ != Dtype(1)) { 35 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 36 | } 37 | } 38 | 39 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 40 | 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/flatten_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void FlattenLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 11 | "allow in-place computation."; 12 | const int start_axis = bottom[0]->CanonicalAxisIndex( 13 | this->layer_param_.flatten_param().axis()); 14 | const int end_axis = bottom[0]->CanonicalAxisIndex( 15 | this->layer_param_.flatten_param().end_axis()); 16 | vector top_shape; 17 | for (int i = 0; i < start_axis; ++i) { 18 | top_shape.push_back(bottom[0]->shape(i)); 19 | } 20 | const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); 21 | top_shape.push_back(flattened_dim); 22 | for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { 23 | top_shape.push_back(bottom[0]->shape(i)); 24 | } 25 | top[0]->Reshape(top_shape); 26 | CHECK_EQ(top[0]->count(), bottom[0]->count()); 27 | } 28 | 29 | template 30 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 31 | const vector*>& top) { 32 | top[0]->ShareData(*bottom[0]); 33 | } 34 | 35 | template 36 | void FlattenLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom) { 38 | bottom[0]->ShareDiff(*top[0]); 39 | } 40 | 41 | INSTANTIATE_CLASS(FlattenLayer); 42 | REGISTER_LAYER_CLASS(Flatten); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/hdf5_data_layer.cu: -------------------------------------------------------------------------------- 1 | /* 2 | TODO: 3 | - only load parts of the file, in accordance with a prototxt param "max_mem" 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include "hdf5.h" 10 | #include "hdf5_hl.h" 11 | 12 | #include "caffe/layers/hdf5_data_layer.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | void HDF5DataLayer::Forward_gpu(const vector*>& bottom, 18 | const vector*>& top) { 19 | const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); 20 | for (int i = 0; i < batch_size; ++i, ++current_row_) { 21 | if (current_row_ == hdf_blobs_[0]->shape(0)) { 22 | if (num_files_ > 1) { 23 | current_file_ += 1; 24 | if (current_file_ == num_files_) { 25 | current_file_ = 0; 26 | if (this->layer_param_.hdf5_data_param().shuffle()) { 27 | std::random_shuffle(file_permutation_.begin(), 28 | file_permutation_.end()); 29 | } 30 | DLOG(INFO) << "Looping around to first file."; 31 | } 32 | LoadHDF5FileData( 33 | hdf_filenames_[file_permutation_[current_file_]].c_str()); 34 | } 35 | current_row_ = 0; 36 | if (this->layer_param_.hdf5_data_param().shuffle()) 37 | std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); 38 | } 39 | for (int j = 0; j < this->layer_param_.top_size(); ++j) { 40 | int data_dim = top[j]->count() / top[j]->shape(0); 41 | caffe_copy(data_dim, 42 | &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] 43 | * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); 44 | } 45 | } 46 | } 47 | 48 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer); 49 | 50 | } // namespace caffe 51 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/layers/hdf5_output_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | CHECK_GE(bottom.size(), 2); 14 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 15 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 16 | bottom[0]->height(), bottom[0]->width()); 17 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 18 | bottom[1]->height(), bottom[1]->width()); 19 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 20 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 21 | 22 | for (int i = 0; i < bottom[0]->num(); ++i) { 23 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 24 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 25 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 26 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 27 | } 28 | SaveBlobs(); 29 | } 30 | 31 | template 32 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | return; 35 | } 36 | 37 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); 38 | 39 | } // namespace caffe 40 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/input_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/input_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void InputLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | const int num_top = top.size(); 11 | const InputParameter& param = this->layer_param_.input_param(); 12 | const int num_shape = param.shape_size(); 13 | CHECK(num_shape == 0 || num_shape == 1 || num_shape == num_top) 14 | << "Must specify 'shape' once, once per top blob, or not at all: " 15 | << num_top << " tops vs. " << num_shape << " shapes."; 16 | if (num_shape > 0) { 17 | for (int i = 0; i < num_top; ++i) { 18 | const int shape_index = (param.shape_size() == 1) ? 0 : i; 19 | top[i]->Reshape(param.shape(shape_index)); 20 | } 21 | } 22 | } 23 | 24 | INSTANTIATE_CLASS(InputLayer); 25 | REGISTER_LAYER_CLASS(Input); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/loss_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void LossLayer::LayerSetUp( 9 | const vector*>& bottom, const vector*>& top) { 10 | // LossLayers have a non-zero (1) loss by default. 11 | if (this->layer_param_.loss_weight_size() == 0) { 12 | this->layer_param_.add_loss_weight(Dtype(1)); 13 | } 14 | } 15 | 16 | template 17 | void LossLayer::Reshape( 18 | const vector*>& bottom, const vector*>& top) { 19 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 20 | << "The data and label should have the same number."; 21 | vector loss_shape(0); // Loss layers output a scalar; 0 axes. 22 | top[0]->Reshape(loss_shape); 23 | } 24 | 25 | INSTANTIATE_CLASS(LossLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/parameter_layer.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/layers/parameter_layer.hpp" 2 | 3 | namespace caffe { 4 | 5 | INSTANTIATE_CLASS(ParameterLayer); 6 | REGISTER_LAYER_CLASS(Parameter); 7 | 8 | } // namespace caffe 9 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/recurrent_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/blob.hpp" 4 | #include "caffe/common.hpp" 5 | #include "caffe/filler.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/layers/recurrent_layer.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void RecurrentLayer::Forward_gpu(const vector*>& bottom, 14 | const vector*>& top) { 15 | // Hacky fix for test time... reshare all the shared blobs. 16 | // TODO: somehow make this work non-hackily. 17 | if (this->phase_ == TEST) { 18 | unrolled_net_->ShareWeights(); 19 | } 20 | 21 | DCHECK_EQ(recur_input_blobs_.size(), recur_output_blobs_.size()); 22 | if (!expose_hidden_) { 23 | for (int i = 0; i < recur_input_blobs_.size(); ++i) { 24 | const int count = recur_input_blobs_[i]->count(); 25 | DCHECK_EQ(count, recur_output_blobs_[i]->count()); 26 | const Dtype* timestep_T_data = recur_output_blobs_[i]->gpu_data(); 27 | Dtype* timestep_0_data = recur_input_blobs_[i]->mutable_gpu_data(); 28 | caffe_copy(count, timestep_T_data, timestep_0_data); 29 | } 30 | } 31 | 32 | unrolled_net_->ForwardTo(last_layer_index_); 33 | 34 | if (expose_hidden_) { 35 | const int top_offset = output_blobs_.size(); 36 | for (int i = top_offset, j = 0; i < top.size(); ++i, ++j) { 37 | top[i]->ShareData(*recur_output_blobs_[j]); 38 | } 39 | } 40 | } 41 | 42 | INSTANTIATE_LAYER_GPU_FORWARD(RecurrentLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ReLULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 18 | } 19 | } 20 | 21 | template 22 | void ReLULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 31 | for (int i = 0; i < count; ++i) { 32 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 33 | + negative_slope * (bottom_data[i] <= 0)); 34 | } 35 | } 36 | } 37 | 38 | 39 | #ifdef CPU_ONLY 40 | STUB_GPU(ReLULayer); 41 | #endif 42 | 43 | INSTANTIATE_CLASS(ReLULayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 10 | const vector*>& top, const vector& propagate_down, 11 | const vector*>& bottom) { 12 | if (propagate_down[1]) { 13 | LOG(FATAL) << this->type() 14 | << " Layer cannot backpropagate to label inputs."; 15 | } 16 | if (propagate_down[0]) { 17 | // First, compute the diff 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 21 | const Dtype* target = bottom[1]->gpu_data(); 22 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 23 | caffe_copy(count, sigmoid_output_data, bottom_diff); 24 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 25 | // Scale down gradient 26 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 27 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | inline Dtype sigmoid(Dtype x) { 10 | return 1. / (1. + exp(-x)); 11 | } 12 | 13 | template 14 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 15 | const vector*>& top) { 16 | const Dtype* bottom_data = bottom[0]->cpu_data(); 17 | Dtype* top_data = top[0]->mutable_cpu_data(); 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | top_data[i] = sigmoid(bottom_data[i]); 21 | } 22 | } 23 | 24 | template 25 | void SigmoidLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | for (int i = 0; i < count; ++i) { 34 | const Dtype sigmoid_x = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(SigmoidLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(SigmoidLayer); 45 | 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/split_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Reshape(const vector*>& bottom, 10 | const vector*>& top) { 11 | count_ = bottom[0]->count(); 12 | for (int i = 0; i < top.size(); ++i) { 13 | // Do not allow in-place computation in the SplitLayer. Instead, share data 14 | // by reference in the forward pass, and keep separate diff allocations in 15 | // the backward pass. (Technically, it should be possible to share the diff 16 | // blob of the first split output with the input, but this seems to cause 17 | // some strange effects in practice...) 18 | CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " 19 | "allow in-place computation."; 20 | top[i]->ReshapeLike(*bottom[0]); 21 | CHECK_EQ(count_, top[i]->count()); 22 | } 23 | } 24 | 25 | template 26 | void SplitLayer::Forward_cpu(const vector*>& bottom, 27 | const vector*>& top) { 28 | for (int i = 0; i < top.size(); ++i) { 29 | top[i]->ShareData(*bottom[0]); 30 | } 31 | } 32 | 33 | template 34 | void SplitLayer::Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom) { 36 | if (!propagate_down[0]) { return; } 37 | if (top.size() == 1) { 38 | caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); 39 | return; 40 | } 41 | caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), 42 | bottom[0]->mutable_cpu_diff()); 43 | // Add remaining top blob diffs. 44 | for (int i = 2; i < top.size(); ++i) { 45 | const Dtype* top_diff = top[i]->cpu_diff(); 46 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 47 | caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); 48 | } 49 | } 50 | 51 | 52 | #ifdef CPU_ONLY 53 | STUB_GPU(SplitLayer); 54 | #endif 55 | 56 | INSTANTIATE_CLASS(SplitLayer); 57 | REGISTER_LAYER_CLASS(Split); 58 | 59 | } // namespace caffe 60 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void TanHLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = tanh(bottom_data[i]); 18 | } 19 | } 20 | 21 | template 22 | void TanHLayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* top_data = top[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype tanhx; 31 | for (int i = 0; i < count; ++i) { 32 | tanhx = top_data[i]; 33 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 34 | } 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(TanHLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(TanHLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/tanh_layer.cu: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = tanh(in[index]); 14 | } 15 | } 16 | 17 | template 18 | void TanHLayer::Forward_gpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = top[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | // NOLINT_NEXT_LINE(whitespace/operators) 24 | TanHForward<<>>( 25 | count, bottom_data, top_data); 26 | CUDA_POST_KERNEL_CHECK; 27 | } 28 | 29 | template 30 | __global__ void TanHBackward(const int n, const Dtype* in_diff, 31 | const Dtype* out_data, Dtype* out_diff) { 32 | CUDA_KERNEL_LOOP(index, n) { 33 | Dtype tanhx = out_data[index]; 34 | out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); 35 | } 36 | } 37 | 38 | template 39 | void TanHLayer::Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, 41 | const vector*>& bottom) { 42 | if (propagate_down[0]) { 43 | const Dtype* top_data = top[0]->gpu_data(); 44 | const Dtype* top_diff = top[0]->gpu_diff(); 45 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 46 | const int count = bottom[0]->count(); 47 | // NOLINT_NEXT_LINE(whitespace/operators) 48 | TanHBackward<<>>( 49 | count, top_diff, top_data, bottom_diff); 50 | CUDA_POST_KERNEL_CHECK; 51 | } 52 | } 53 | 54 | INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer); 55 | 56 | 57 | } // namespace caffe 58 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/yolov3DetectResult_layer.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/layers/yolov3DetectResult_layer.cpp -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/yolov3Detection_layer.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/layers/yolov3Detection_layer.cpp -------------------------------------------------------------------------------- /caffe-master/src/caffe/layers/yolov3_loss_layer.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/layers/yolov3_loss_layer.cpp -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/adadelta_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2, 8 | Dtype momentum, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi; 12 | gi = gi * sqrt((h2[i] + delta) / (hi + delta)); 13 | h2[i] = momentum * h2[i] + (1-momentum) * gi * gi; 14 | g[i] = local_rate * gi; 15 | } 16 | } 17 | template 18 | void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum, 19 | Dtype delta, Dtype local_rate) { 20 | AdaDeltaUpdate // NOLINT_NEXT_LINE(whitespace/operators) 21 | <<>>( 22 | N, g, h, h2, momentum, delta, local_rate); 23 | CUDA_POST_KERNEL_CHECK; 24 | } 25 | template void adadelta_update_gpu(int , float*, float*, float*, 26 | float, float, float); 27 | template void adadelta_update_gpu(int, double*, double*, double*, 28 | double, double, double); 29 | 30 | } // namespace caffe 31 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/adagrad_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, 8 | Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = h[i] + gi*gi; 12 | g[i] = local_rate * gi / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, 17 | Dtype local_rate) { 18 | AdaGradUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void adagrad_update_gpu(int, float*, float*, float, float); 24 | template void adagrad_update_gpu(int, double*, double*, double, double); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/adam_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, 8 | Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float mi = m[i] = m[i]*beta1 + gi*(1-beta1); 12 | float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); 13 | g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); 14 | } 15 | } 16 | template 17 | void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, 18 | Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 19 | AdamUpdate // NOLINT_NEXT_LINE(whitespace/operators) 20 | <<>>( 21 | N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); 22 | CUDA_POST_KERNEL_CHECK; 23 | } 24 | template void adam_update_gpu(int, float*, float*, float*, 25 | float, float, float, float); 26 | template void adam_update_gpu(int, double*, double*, double*, 27 | double, double, double, double); 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/nesterov_solver.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/sgd_solvers.hpp" 4 | 5 | namespace caffe { 6 | 7 | #ifndef CPU_ONLY 8 | template 9 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 10 | Dtype local_rate); 11 | #endif 12 | 13 | template 14 | void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { 15 | CHECK(Caffe::root_solver()); 16 | const vector*>& net_params = this->net_->learnable_params(); 17 | const vector& net_params_lr = this->net_->params_lr(); 18 | Dtype momentum = this->param_.momentum(); 19 | Dtype local_rate = rate * net_params_lr[param_id]; 20 | switch (Caffe::mode()) { 21 | case Caffe::CPU: { 22 | // save history momentum for stepping back 23 | caffe_copy(net_params[param_id]->count(), 24 | this->history_[param_id]->cpu_data(), 25 | this->update_[param_id]->mutable_cpu_data()); 26 | 27 | // update history 28 | caffe_cpu_axpby(net_params[param_id]->count(), local_rate, 29 | net_params[param_id]->cpu_diff(), momentum, 30 | this->history_[param_id]->mutable_cpu_data()); 31 | 32 | // compute update: step back then over step 33 | caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, 34 | this->history_[param_id]->cpu_data(), -momentum, 35 | this->update_[param_id]->mutable_cpu_data()); 36 | 37 | // copy 38 | caffe_copy(net_params[param_id]->count(), 39 | this->update_[param_id]->cpu_data(), 40 | net_params[param_id]->mutable_cpu_diff()); 41 | break; 42 | } 43 | case Caffe::GPU: { 44 | #ifndef CPU_ONLY 45 | nesterov_update_gpu(net_params[param_id]->count(), 46 | net_params[param_id]->mutable_gpu_diff(), 47 | this->history_[param_id]->mutable_gpu_data(), 48 | momentum, local_rate); 49 | #else 50 | NO_GPU; 51 | #endif 52 | break; 53 | } 54 | default: 55 | LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); 56 | } 57 | } 58 | 59 | INSTANTIATE_CLASS(NesterovSolver); 60 | REGISTER_SOLVER_CLASS(Nesterov); 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/nesterov_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float hi = h[i]; 11 | float hi_new = h[i] = momentum * hi + local_rate * g[i]; 12 | g[i] = (1+momentum) * hi_new - momentum * hi; 13 | } 14 | } 15 | template 16 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 17 | Dtype local_rate) { 18 | NesterovUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, momentum, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void nesterov_update_gpu(int, float*, float*, float, float); 24 | template void nesterov_update_gpu(int, double*, double*, double, 25 | double); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/rmsprop_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype rms_decay, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi; 12 | g[i] = local_rate * g[i] / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay, 17 | Dtype delta, Dtype local_rate) { 18 | RMSPropUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, rms_decay, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void rmsprop_update_gpu(int, float*, float*, float, float, 24 | float); 25 | template void rmsprop_update_gpu(int, double*, double*, double, double, 26 | double); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/solvers/sgd_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | g[i] = h[i] = momentum*h[i] + local_rate*g[i]; 11 | } 12 | } 13 | template 14 | void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 15 | Dtype local_rate) { 16 | SGDUpdate // NOLINT_NEXT_LINE(whitespace/operators) 17 | <<>>( 18 | N, g, h, momentum, local_rate); 19 | CUDA_POST_KERNEL_CHECK; 20 | } 21 | template void sgd_update_gpu(int, float*, float*, float, float); 22 | template void sgd_update_gpu(int, double*, double*, double, double); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # The option allows to include in build only selected test files and exclude all others 2 | # Usage example: 3 | # cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" 4 | set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") 5 | caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) 6 | caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) 7 | 8 | # For 'make runtest' target we don't need to embed test data paths to 9 | # source files, because test target is executed in source directory 10 | # That's why the lines below are commented. TODO: remove them 11 | 12 | # definition needed to include CMake generated files 13 | #add_definitions(-DCMAKE_BUILD) 14 | 15 | # generates test_data/sample_data_list.txt.gen.cmake 16 | #caffe_configure_testdatafile(test_data/sample_data_list.txt) 17 | 18 | set(the_target test.testbin) 19 | set(test_args --gtest_shuffle) 20 | 21 | if(HAVE_CUDA) 22 | caffe_cuda_compile(test_cuda_objs ${test_cuda}) 23 | list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) 24 | else() 25 | list(APPEND test_args --gtest_filter="-*GPU*") 26 | endif() 27 | 28 | # ---[ Adding test target 29 | add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) 30 | target_link_libraries(${the_target} gtest ${Caffe_LINK}) 31 | caffe_default_properties(${the_target}) 32 | caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") 33 | 34 | # ---[ Adding runtest 35 | add_custom_target(runtest COMMAND ${the_target} ${test_args} 36 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) 37 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | #include "caffe/test/test_caffe_main.hpp" 3 | 4 | namespace caffe { 5 | #ifndef CPU_ONLY 6 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 7 | #endif 8 | } 9 | 10 | #ifndef CPU_ONLY 11 | using caffe::CAFFE_TEST_CUDA_PROP; 12 | #endif 13 | 14 | int main(int argc, char** argv) { 15 | ::testing::InitGoogleTest(&argc, argv); 16 | caffe::GlobalInit(&argc, &argv); 17 | #ifndef CPU_ONLY 18 | // Before starting testing, let's first print out a few cuda defice info. 19 | int device; 20 | cudaGetDeviceCount(&device); 21 | cout << "Cuda number of devices: " << device << endl; 22 | if (argc > 1) { 23 | // Use the given device 24 | device = atoi(argv[1]); 25 | cudaSetDevice(device); 26 | cout << "Setting to use device " << device << endl; 27 | } else if (CUDA_TEST_DEVICE >= 0) { 28 | // Use the device assigned in build configuration; but with a lower priority 29 | device = CUDA_TEST_DEVICE; 30 | } 31 | cudaGetDevice(&device); 32 | cout << "Current device id: " << device << endl; 33 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 34 | cout << "Current device name: " << CAFFE_TEST_CUDA_PROP.name << endl; 35 | #endif 36 | // invoke the test. 37 | return RUN_ALL_TESTS(); 38 | } 39 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_common.cpp: -------------------------------------------------------------------------------- 1 | #include "gtest/gtest.h" 2 | 3 | #include "caffe/common.hpp" 4 | #include "caffe/syncedmem.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | class CommonTest : public ::testing::Test {}; 12 | 13 | #ifndef CPU_ONLY // GPU Caffe singleton test. 14 | 15 | TEST_F(CommonTest, TestCublasHandlerGPU) { 16 | int cuda_device_id; 17 | CUDA_CHECK(cudaGetDevice(&cuda_device_id)); 18 | EXPECT_TRUE(Caffe::cublas_handle()); 19 | } 20 | 21 | #endif 22 | 23 | TEST_F(CommonTest, TestBrewMode) { 24 | Caffe::set_mode(Caffe::CPU); 25 | EXPECT_EQ(Caffe::mode(), Caffe::CPU); 26 | Caffe::set_mode(Caffe::GPU); 27 | EXPECT_EQ(Caffe::mode(), Caffe::GPU); 28 | } 29 | 30 | TEST_F(CommonTest, TestRandSeedCPU) { 31 | SyncedMemory data_a(10 * sizeof(int)); 32 | SyncedMemory data_b(10 * sizeof(int)); 33 | Caffe::set_random_seed(1701); 34 | caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); 35 | 36 | Caffe::set_random_seed(1701); 37 | caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); 38 | 39 | for (int i = 0; i < 10; ++i) { 40 | EXPECT_EQ(static_cast(data_a.cpu_data())[i], 41 | static_cast(data_b.cpu_data())[i]); 42 | } 43 | } 44 | 45 | #ifndef CPU_ONLY // GPU Caffe singleton test. 46 | 47 | TEST_F(CommonTest, TestRandSeedGPU) { 48 | SyncedMemory data_a(10 * sizeof(unsigned int)); 49 | SyncedMemory data_b(10 * sizeof(unsigned int)); 50 | Caffe::set_random_seed(1701); 51 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 52 | static_cast(data_a.mutable_gpu_data()), 10)); 53 | Caffe::set_random_seed(1701); 54 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 55 | static_cast(data_b.mutable_gpu_data()), 10)); 56 | for (int i = 0; i < 10; ++i) { 57 | EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], 58 | ((const unsigned int*)(data_b.cpu_data()))[i]); 59 | } 60 | } 61 | 62 | #endif 63 | 64 | } // namespace caffe 65 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | 12 | class InternalThreadTest : public ::testing::Test {}; 13 | 14 | TEST_F(InternalThreadTest, TestStartAndExit) { 15 | InternalThread thread; 16 | EXPECT_FALSE(thread.is_started()); 17 | thread.StartInternalThread(); 18 | EXPECT_TRUE(thread.is_started()); 19 | thread.StopInternalThread(); 20 | EXPECT_FALSE(thread.is_started()); 21 | } 22 | 23 | class TestThreadA : public InternalThread { 24 | void InternalThreadEntry() { 25 | EXPECT_EQ(4244559767, caffe_rng_rand()); 26 | } 27 | }; 28 | 29 | class TestThreadB : public InternalThread { 30 | void InternalThreadEntry() { 31 | EXPECT_EQ(1726478280, caffe_rng_rand()); 32 | } 33 | }; 34 | 35 | TEST_F(InternalThreadTest, TestRandomSeed) { 36 | TestThreadA t1; 37 | Caffe::set_random_seed(9658361); 38 | t1.StartInternalThread(); 39 | t1.StopInternalThread(); 40 | 41 | TestThreadA t2; 42 | Caffe::set_random_seed(9658361); 43 | t2.StartInternalThread(); 44 | t2.StopInternalThread(); 45 | 46 | TestThreadB t3; 47 | Caffe::set_random_seed(3435563); 48 | t3.StartInternalThread(); 49 | t3.StopInternalThread(); 50 | } 51 | 52 | } // namespace caffe 53 | 54 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_layer_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "gtest/gtest.h" 6 | 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/layer_factory.hpp" 10 | #include "caffe/util/db.hpp" 11 | #include "caffe/util/io.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class LayerFactoryTest : public MultiDeviceTest {}; 19 | 20 | TYPED_TEST_CASE(LayerFactoryTest, TestDtypesAndDevices); 21 | 22 | TYPED_TEST(LayerFactoryTest, TestCreateLayer) { 23 | typedef typename TypeParam::Dtype Dtype; 24 | typename LayerRegistry::CreatorRegistry& registry = 25 | LayerRegistry::Registry(); 26 | shared_ptr > layer; 27 | for (typename LayerRegistry::CreatorRegistry::iterator iter = 28 | registry.begin(); iter != registry.end(); ++iter) { 29 | // Special case: PythonLayer is checked by pytest 30 | if (iter->first == "Python") { continue; } 31 | LayerParameter layer_param; 32 | // Data layers expect a DB 33 | if (iter->first == "Data") { 34 | #ifdef USE_LEVELDB 35 | string tmp; 36 | MakeTempDir(&tmp); 37 | boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); 38 | db->Open(tmp, db::NEW); 39 | db->Close(); 40 | layer_param.mutable_data_param()->set_source(tmp); 41 | #else 42 | continue; 43 | #endif // USE_LEVELDB 44 | } 45 | layer_param.set_type(iter->first); 46 | layer = LayerRegistry::CreateLayer(layer_param); 47 | EXPECT_EQ(iter->first, layer->type()); 48 | } 49 | } 50 | 51 | } // namespace caffe 52 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_multinomial_logistic_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "gtest/gtest.h" 4 | 5 | #include "caffe/blob.hpp" 6 | #include "caffe/common.hpp" 7 | #include "caffe/filler.hpp" 8 | #include "caffe/layers/multinomial_logistic_loss_layer.hpp" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | #include "caffe/test/test_gradient_check_util.hpp" 12 | 13 | namespace caffe { 14 | 15 | template 16 | class MultinomialLogisticLossLayerTest : public CPUDeviceTest { 17 | protected: 18 | MultinomialLogisticLossLayerTest() 19 | : blob_bottom_data_(new Blob(10, 5, 1, 1)), 20 | blob_bottom_label_(new Blob(10, 1, 1, 1)), 21 | blob_top_loss_(new Blob()) { 22 | Caffe::set_random_seed(1701); 23 | // fill the values 24 | FillerParameter filler_param; 25 | PositiveUnitballFiller filler(filler_param); 26 | filler.Fill(this->blob_bottom_data_); 27 | blob_bottom_vec_.push_back(blob_bottom_data_); 28 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 29 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 30 | } 31 | blob_bottom_vec_.push_back(blob_bottom_label_); 32 | blob_top_vec_.push_back(blob_top_loss_); 33 | } 34 | virtual ~MultinomialLogisticLossLayerTest() { 35 | delete blob_bottom_data_; 36 | delete blob_bottom_label_; 37 | delete blob_top_loss_; 38 | } 39 | Blob* const blob_bottom_data_; 40 | Blob* const blob_bottom_label_; 41 | Blob* const blob_top_loss_; 42 | vector*> blob_bottom_vec_; 43 | vector*> blob_top_vec_; 44 | }; 45 | 46 | TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); 47 | 48 | 49 | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { 50 | LayerParameter layer_param; 51 | MultinomialLogisticLossLayer layer(layer_param); 52 | layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); 53 | GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); 54 | checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, 55 | this->blob_top_vec_, 0); 56 | } 57 | 58 | } // namespace caffe 59 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/test/test_solver_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/common.hpp" 9 | #include "caffe/solver.hpp" 10 | #include "caffe/solver_factory.hpp" 11 | 12 | #include "caffe/test/test_caffe_main.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | class SolverFactoryTest : public MultiDeviceTest { 18 | protected: 19 | SolverParameter simple_solver_param() { 20 | const string solver_proto = 21 | "train_net_param { " 22 | " layer { " 23 | " name: 'data' type: 'DummyData' top: 'data' " 24 | " dummy_data_param { shape { dim: 1 } } " 25 | " } " 26 | "} "; 27 | SolverParameter solver_param; 28 | CHECK(google::protobuf::TextFormat::ParseFromString( 29 | solver_proto, &solver_param)); 30 | return solver_param; 31 | } 32 | }; 33 | 34 | TYPED_TEST_CASE(SolverFactoryTest, TestDtypesAndDevices); 35 | 36 | TYPED_TEST(SolverFactoryTest, TestCreateSolver) { 37 | typedef typename TypeParam::Dtype Dtype; 38 | typename SolverRegistry::CreatorRegistry& registry = 39 | SolverRegistry::Registry(); 40 | shared_ptr > solver; 41 | SolverParameter solver_param = this->simple_solver_param(); 42 | for (typename SolverRegistry::CreatorRegistry::iterator iter = 43 | registry.begin(); iter != registry.end(); ++iter) { 44 | solver_param.set_type(iter->first); 45 | solver.reset(SolverRegistry::CreateSolver(solver_param)); 46 | EXPECT_EQ(iter->first, solver->type()); 47 | } 48 | } 49 | 50 | } // namespace caffe 51 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /caffe-master/src/caffe/util/tree.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/tree.hpp" 2 | #include 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | tree::tree() 9 | { 10 | 11 | } 12 | 13 | tree::tree(string filename) 14 | { 15 | //char file[20] = filename.c_str(); 16 | //std::cout<< "filename: " << filename <> parent; 36 | //parent = std::atoi(line.substr(10).c_str()); 37 | //std::cout<< line << " p:" << parent <parent.push_back(parent); 40 | this->child.push_back(-1); 41 | this->name.push_back(id); 42 | 43 | if (parent != last_parent) 44 | { 45 | ++groups; 46 | this->group_offset.push_back(n - group_size); 47 | this->group_size.push_back(group_size); 48 | group_size = 0; 49 | last_parent = parent; 50 | } 51 | this->group.push_back(groups); 52 | if (parent >= 0) 53 | { 54 | this->child[parent] = groups; 55 | } 56 | ++n; 57 | ++group_size; 58 | } 59 | ++groups; 60 | this->group_offset.push_back(n-group_size); 61 | this->group_size.push_back(group_size); 62 | this->n = n; 63 | this->groups = groups; 64 | 65 | for (int i = 0; i < n; ++ i) 66 | this->leaf.push_back(1); 67 | for (int i = 0; i < n; ++ i) 68 | if (this->parent[i] >= 0) 69 | this->leaf[this->parent[i]] = 0; 70 | 71 | fin.close(); 72 | //std::cout<< "groups: "<< groups << "\t" << this->groups << std::endl; 73 | } 74 | -------------------------------------------------------------------------------- /caffe-master/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /caffe-master/src/gtest/gtest_main.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2006, Google Inc. 2 | // All rights reserved. 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | #include 31 | 32 | #include "gtest/gtest.h" 33 | 34 | GTEST_API_ int main(int argc, char **argv) { 35 | std::cout << "Running main() from gtest_main.cc\n"; 36 | 37 | testing::InitGoogleTest(&argc, argv); 38 | return RUN_ALL_TESTS(); 39 | } 40 | -------------------------------------------------------------------------------- /caffe-master/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /caffe-master/tools/caffe.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/tools/caffe.cpp -------------------------------------------------------------------------------- /caffe-master/tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-master/tools/extra/launch_resize_and_crop_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer 3 | 4 | # If you encounter error that the address already in use, kill the process. 5 | # 11235 is the port of server process 6 | # https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py 7 | # sudo netstat -ap | grep 11235 8 | # The last column of the output is PID/Program name 9 | # kill -9 PID 10 | # Second solution: 11 | # nmap localhost 12 | # fuser -k 11235/tcp 13 | # Or just wait a few seconds. 14 | 15 | ## Launch your Mapreduce locally 16 | # num_clients: number of processes 17 | # image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. 18 | # input: the file containing one image path relative to input_folder each line 19 | # input_folder: where are the original images 20 | # output_folder: where to save the resized and cropped images 21 | ./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 22 | 23 | ## Launch your Mapreduce with MPI 24 | # mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 25 | -------------------------------------------------------------------------------- /caffe-master/tools/extra/parse_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage parse_log.sh caffe.log 3 | # It creates the following two text files, each containing a table: 4 | # caffe.log.test (columns: '#Iters Seconds TestAccuracy TestLoss') 5 | # caffe.log.train (columns: '#Iters Seconds TrainingLoss LearningRate') 6 | 7 | 8 | # get the dirname of the script 9 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 10 | 11 | if [ "$#" -lt 1 ] 12 | then 13 | echo "Usage parse_log.sh /path/to/your.log" 14 | exit 15 | fi 16 | LOG=`basename $1` 17 | sed -n '/Iteration .* Testing net/,/Iteration *. loss/p' $1 > aux.txt 18 | sed -i '/Waiting for data/d' aux.txt 19 | sed -i '/prefetch queue empty/d' aux.txt 20 | sed -i '/Iteration .* loss/d' aux.txt 21 | sed -i '/Iteration .* lr/d' aux.txt 22 | sed -i '/Train net/d' aux.txt 23 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 24 | grep 'Test net output #0' aux.txt | awk '{print $11}' > aux1.txt 25 | grep 'Test net output #1' aux.txt | awk '{print $11}' > aux2.txt 26 | 27 | # Extracting elapsed seconds 28 | # For extraction of time since this line contains the start time 29 | grep '] Solving ' $1 > aux3.txt 30 | grep 'Testing net' $1 >> aux3.txt 31 | $DIR/extract_seconds.py aux3.txt aux4.txt 32 | 33 | # Generating 34 | echo '#Iters Seconds TestAccuracy TestLoss'> $LOG.test 35 | paste aux0.txt aux4.txt aux1.txt aux2.txt | column -t >> $LOG.test 36 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt 37 | 38 | # For extraction of time since this line contains the start time 39 | grep '] Solving ' $1 > aux.txt 40 | grep ', loss = ' $1 >> aux.txt 41 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 42 | grep ', loss = ' $1 | awk '{print $9}' > aux1.txt 43 | grep ', lr = ' $1 | awk '{print $9}' > aux2.txt 44 | 45 | # Extracting elapsed seconds 46 | $DIR/extract_seconds.py aux.txt aux3.txt 47 | 48 | # Generating 49 | echo '#Iters Seconds TrainingLoss LearningRate'> $LOG.train 50 | paste aux0.txt aux3.txt aux1.txt aux2.txt | column -t >> $LOG.train 51 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt 52 | -------------------------------------------------------------------------------- /caffe-master/tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-master/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-master/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-master/tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-master/tools/upgrade_net_proto_binary.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromBinaryFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input binary file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | WriteProtoToBinaryFile(net_param, argv[2]); 47 | 48 | LOG(INFO) << "Wrote upgraded NetParameter binary proto to " << argv[2]; 49 | return !success; 50 | } 51 | -------------------------------------------------------------------------------- /caffe-master/tools/upgrade_net_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromTextFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input text file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | // Save new format prototxt. 47 | WriteProtoToTextFile(net_param, argv[2]); 48 | 49 | LOG(INFO) << "Wrote upgraded NetParameter text proto to " << argv[2]; 50 | return !success; 51 | } 52 | -------------------------------------------------------------------------------- /caffe-master/tools/upgrade_solver_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade old solver prototxts to the new format. 2 | // Usage: 3 | // upgrade_solver_proto_text old_solver_proto_file_in solver_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: upgrade_solver_proto_text " 23 | << "old_solver_proto_file_in solver_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | SolverParameter solver_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromTextFile(input_filename, &solver_param)) { 30 | LOG(ERROR) << "Failed to parse input text file as SolverParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = SolverNeedsTypeUpgrade(solver_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeSolverAsNeeded(input_filename, &solver_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | // Save new format prototxt. 47 | WriteProtoToTextFile(solver_param, argv[2]); 48 | 49 | LOG(INFO) << "Wrote upgraded SolverParameter text proto to " << argv[2]; 50 | return !success; 51 | } 52 | -------------------------------------------------------------------------------- /caffe-master/windows/CommonSettings.targets: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 10 | 11 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/AssemblyInfo.cpp: -------------------------------------------------------------------------------- 1 | #include "stdafx.h" 2 | 3 | using namespace System; 4 | using namespace System::Reflection; 5 | using namespace System::Runtime::CompilerServices; 6 | using namespace System::Runtime::InteropServices; 7 | using namespace System::Security::Permissions; 8 | 9 | // 10 | // General Information about an assembly is controlled through the following 11 | // set of attributes. Change these attribute values to modify the information 12 | // associated with an assembly. 13 | // 14 | [assembly:AssemblyTitleAttribute(L"CaffeLib")]; 15 | [assembly:AssemblyDescriptionAttribute(L"")]; 16 | [assembly:AssemblyConfigurationAttribute(L"")]; 17 | [assembly:AssemblyCompanyAttribute(L"")]; 18 | [assembly:AssemblyProductAttribute(L"CaffeLib")]; 19 | [assembly:AssemblyCopyrightAttribute(L"Copyright (c) 2015")]; 20 | [assembly:AssemblyTrademarkAttribute(L"")]; 21 | [assembly:AssemblyCultureAttribute(L"")]; 22 | 23 | // 24 | // Version information for an assembly consists of the following four values: 25 | // 26 | // Major Version 27 | // Minor Version 28 | // Build Number 29 | // Revision 30 | // 31 | // You can specify all the value or you can default the Revision and Build Numbers 32 | // by using the '*' as shown below: 33 | 34 | [assembly:AssemblyVersionAttribute("1.0.*")]; 35 | 36 | [assembly:ComVisible(false)]; 37 | 38 | [assembly:CLSCompliantAttribute(true)]; -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/Stdafx.cpp: -------------------------------------------------------------------------------- 1 | // stdafx.cpp : source file that includes just the standard includes 2 | // caffeLibMC.pch will be the pre-compiled header 3 | // stdafx.obj will contain the pre-compiled type information 4 | 5 | #include "stdafx.h" 6 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/Stdafx.h: -------------------------------------------------------------------------------- 1 | // stdafx.h : include file for standard system include files, 2 | // or project specific include files that are used frequently, 3 | // but are changed infrequently 4 | 5 | #pragma once 6 | #include 7 | #ifndef CPU_ONLY 8 | #include 9 | #endif 10 | #include "_CaffeModel.h" 11 | 12 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/_CaffeModel.h: -------------------------------------------------------------------------------- 1 | // Due to a bug caused by C++/CLI and boost (used indirectly via caffe headers, not this one), 2 | // we have to seperate code related to boost from CLI compiling environment. 3 | // This wrapper class serves for this purpose. 4 | // See: http://stackoverflow.com/questions/8144630/mixed-mode-c-cli-dll-throws-exception-on-exit 5 | // and http://article.gmane.org/gmane.comp.lib.boost.user/44515/match=string+binding+invalid+mixed 6 | 7 | #pragma once 8 | 9 | #include 10 | #include 11 | 12 | //Declare an abstract Net class instead of including caffe headers, which include boost headers. 13 | //The definition of Net is defined in cpp code, which does include caffe header files. 14 | namespace caffe 15 | { 16 | template 17 | class Net; 18 | } 19 | 20 | struct FloatArray 21 | { 22 | const float* Data; 23 | int Size; 24 | FloatArray(const float* data, int size); 25 | }; 26 | 27 | typedef std::vector FloatVec; 28 | 29 | class _CaffeModel 30 | { 31 | caffe::Net* _net; 32 | 33 | public: 34 | static void SetDevice(int device_id); //Use a negative number for CPU only 35 | 36 | _CaffeModel(const std::string &netFile, const std::string &modelFile); 37 | ~_CaffeModel(); 38 | 39 | int GetInputImageWidth(); 40 | int GetInputImageHeight(); 41 | int GetInputImageChannels(); 42 | 43 | //REVIEW ktran: these APIs only make sense for images 44 | FloatArray ExtractOutputs(const std::string &imageFile, int interpolation, const std::string &layerName); 45 | std::vector ExtractOutputs(const std::string &imageFile, int interpolation, const std::vector &layerNames); 46 | 47 | // imageData needs to be of size channel*height*width as required by the "data" blob. 48 | // The C++/CLI caller can use GetInputImageWidth()/Height/Channels to get the desired dimension. 49 | FloatArray ExtractBitmapOutputs(const std::string &imageData, int interpolation, const std::string &layerName); 50 | std::vector ExtractBitmapOutputs(const std::string &imageData, int interpolation, const std::vector &layerNames); 51 | 52 | }; 53 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/caffe.managed.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;hm;inl;inc;xsd 11 | 12 | 13 | 14 | 15 | Header Files 16 | 17 | 18 | Header Files 19 | 20 | 21 | 22 | 23 | Source Files 24 | 25 | 26 | Source Files 27 | 28 | 29 | Source Files 30 | 31 | 32 | Source Files 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe.managed/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/caffe/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/classification/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/compute_image_mean/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/convert_box_data/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/convert_cifar_data/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/convert_imageset/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/convert_mnist_data/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/convert_mnist_siamese_data/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/extract_features/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/libcaffe/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /caffe-master/windows/matcaffe/matcaffe.def: -------------------------------------------------------------------------------- 1 | LIBRARY "caffe_.mexw64" 2 | EXPORTS mexFunction 3 | -------------------------------------------------------------------------------- /caffe-master/windows/matcaffe/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /caffe-master/windows/nuget.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /caffe-master/windows/pycaffe/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /caffe-master/windows/scripts/BinplaceCudaDependencies.cmd: -------------------------------------------------------------------------------- 1 | set CUDA_TOOLKIT_BIN_DIR=%~1% 2 | set CUDNN_PATH=%~2% 3 | set IS_CPU_ONLY_BUILD=%3% 4 | set USE_CUDNN=%4% 5 | set OUTPUT_DIR=%~5% 6 | 7 | if %IS_CPU_ONLY_BUILD% == true ( 8 | echo BinplaceCudaDependencies : CPU only build, don't copy cuda dependencies. 9 | ) else ( 10 | echo BinplaceCudaDependencies : Copy cudart*.dll, cublas*dll, curand*.dll to output. 11 | 12 | copy /y "%CUDA_TOOLKIT_BIN_DIR%\cudart*.dll" "%OUTPUT_DIR%" 13 | copy /y "%CUDA_TOOLKIT_BIN_DIR%\cublas*.dll" "%OUTPUT_DIR%" 14 | copy /y "%CUDA_TOOLKIT_BIN_DIR%\curand*.dll" "%OUTPUT_DIR%" 15 | 16 | if %USE_CUDNN% == true ( 17 | echo BinplaceCudaDependencies : Copy cudnn*.dll to output. 18 | 19 | if "%CUDNN_PATH%" == "" ( 20 | copy /y "%CUDA_TOOLKIT_BIN_DIR%\cudnn*.dll" "%OUTPUT_DIR%" 21 | ) else ( 22 | copy /y "%CUDNN_PATH%\cuda\bin\cudnn*.dll" "%OUTPUT_DIR%" 23 | ) 24 | ) else ( 25 | echo BinplaceCudaDependencies : cuDNN isn't enabled. 26 | ) 27 | ) -------------------------------------------------------------------------------- /caffe-master/windows/scripts/FixGFlagsNaming.cmd: -------------------------------------------------------------------------------- 1 | :: Glog nuget package has dependency on GFlags nuget package 2 | :: Caffe also has direct dependency on GFlags 3 | :: Unfortunately in GLog nuget package, dependency to GFlags dll was incorrectly set (naming is wrong) 4 | :: For this reasons Caffe needs gflags.dll/gflagsd.dll in release/debug 5 | :: and GLog needs libgflags.dll/libgflags-debug.dll in release/debug 6 | :: This scripts is a workaround for this issue. 7 | 8 | set OUTPUT_DIR=%~1% 9 | set BUILD_CONFIG=%2% 10 | 11 | if %BUILD_CONFIG% == Release ( 12 | set originalDllName=gflags.dll 13 | set newDllName=libgflags.dll 14 | ) else ( 15 | set originalDllName=gflagsd.dll 16 | set newDllName=libgflags-debug.dll 17 | ) 18 | 19 | if exist "%OUTPUT_DIR%\%newDllName%" ( 20 | echo FixGFlagsNaming.cmd : "%newDllName%" already exists 21 | ) else ( 22 | echo FixGFlagsNaming.cmd : mklink /H "%OUTPUT_DIR%\%newDllName%" "%OUTPUT_DIR%\%originalDllName%" 23 | mklink /H "%OUTPUT_DIR%\%newDllName%" "%OUTPUT_DIR%\%originalDllName%" 24 | ) -------------------------------------------------------------------------------- /caffe-master/windows/scripts/MatlabPostBuild.cmd: -------------------------------------------------------------------------------- 1 | set SOLUTION_DIR=%~1% 2 | set OUTPUT_DIR=%~2% 3 | 4 | echo MatlabPostBuild.cmd : copy matlab generated scripts to output. 5 | 6 | @echo run_tests.m > "%temp%\excludelist.txt" 7 | xcopy /y "%SOLUTION_DIR%..\matlab\+caffe\*.m" "%OUTPUT_DIR%matcaffe\+caffe" /exclude:%temp%\excludelist.txt 8 | copy /y "%SOLUTION_DIR%..\matlab\+caffe\private\*.m" "%OUTPUT_DIR%matcaffe\+caffe\private" 9 | move /y "%OUTPUT_DIR%caffe_.*" "%OUTPUT_DIR%matcaffe\+caffe\private" 10 | -------------------------------------------------------------------------------- /caffe-master/windows/scripts/MatlabPreBuild.cmd: -------------------------------------------------------------------------------- 1 | set SOLUTION_DIR=%~1% 2 | set OUTPUT_DIR=%~2% 3 | 4 | echo MatlabPreBuild.cmd : Create output directories for matlab scripts. 5 | 6 | if not exist "%OUTPUT_DIR%\matcaffe" mkdir "%OUTPUT_DIR%\matcaffe" 7 | if not exist "%OUTPUT_DIR%\matcaffe\+caffe" mkdir "%OUTPUT_DIR%\matcaffe\+caffe" 8 | if not exist "%OUTPUT_DIR%\matcaffe\+caffe\private" mkdir "%OUTPUT_DIR%\matcaffe\+caffe\private" 9 | -------------------------------------------------------------------------------- /caffe-master/windows/scripts/ProtoCompile.cmd: -------------------------------------------------------------------------------- 1 | set SOLUTION_DIR=%~1% 2 | set PROTO_DIR=%~2% 3 | 4 | set INCLUDE_PROTO_DIR=%SOLUTION_DIR%..\include\caffe\proto 5 | SET SRC_PROTO_DIR=%SOLUTION_DIR%..\src\caffe\proto 6 | set PROTO_TEMP_DIR=%SRC_PROTO_DIR%\temp 7 | 8 | echo ProtoCompile.cmd : Create proto temp directory "%PROTO_TEMP_DIR%" 9 | mkdir "%PROTO_TEMP_DIR%" 10 | 11 | echo ProtoCompile.cmd : Generating "%PROTO_TEMP_DIR%\caffe.pb.h" and "%PROTO_TEMP_DIR%\caffe.pb.cc" 12 | "%PROTO_DIR%protoc" --proto_path="%SRC_PROTO_DIR%" --cpp_out="%PROTO_TEMP_DIR%" "%SRC_PROTO_DIR%\caffe.proto" 13 | 14 | echo ProtoCompile.cmd : Create proto include directory 15 | mkdir "%INCLUDE_PROTO_DIR%" 16 | 17 | echo ProtoCompile.cmd : Compare newly compiled caffe.pb.h with existing one 18 | fc /b "%PROTO_TEMP_DIR%\caffe.pb.h" "%INCLUDE_PROTO_DIR%\caffe.pb.h" > NUL 19 | 20 | if errorlevel 1 ( 21 | echo ProtoCompile.cmd : Move newly generated caffe.pb.h to "%INCLUDE_PROTO_DIR%\caffe.pb.h" 22 | echo ProtoCompile.cmd : and caffe.pb.cc to "%SRC_PROTO_DIR%\caffe.pb.cc" 23 | move /y "%PROTO_TEMP_DIR%\caffe.pb.h" "%INCLUDE_PROTO_DIR%\caffe.pb.h" 24 | move /y "%PROTO_TEMP_DIR%\caffe.pb.cc" "%SRC_PROTO_DIR%\caffe.pb.cc" 25 | ) 26 | 27 | rmdir /S /Q "%PROTO_TEMP_DIR%" -------------------------------------------------------------------------------- /caffe-master/windows/scripts/PythonPostBuild.cmd: -------------------------------------------------------------------------------- 1 | set SOLUTION_DIR=%~1% 2 | set OUTPUT_DIR=%~2% 3 | 4 | echo PythonPostBuild.cmd : copy python generated scripts to output. 5 | 6 | copy /y "%SOLUTION_DIR%..\python\caffe\*.py" "%OUTPUT_DIR%pycaffe\caffe" 7 | copy /y "%SOLUTION_DIR%..\python\*.py" "%OUTPUT_DIR%pycaffe" 8 | move /y "%OUTPUT_DIR%_caffe.*" "%OUTPUT_DIR%pycaffe\caffe" 9 | copy /y "%OUTPUT_DIR%\*.dll" "%OUTPUT_DIR%pycaffe\caffe" -------------------------------------------------------------------------------- /caffe-master/windows/scripts/PythonPreBuild.cmd: -------------------------------------------------------------------------------- 1 | set SOLUTION_DIR=%~1% 2 | set PROTO_COMPILER_DIR=%~2% 3 | set OUTPUT_DIR=%~3% 4 | 5 | echo PythonPreBuild.cmd : Create output directories for python scripts. 6 | 7 | if not exist "%OUTPUT_DIR%\pycaffe" mkdir "%OUTPUT_DIR%\pycaffe" 8 | if not exist "%OUTPUT_DIR%\pycaffe\caffe" mkdir "%OUTPUT_DIR%\pycaffe\caffe" 9 | if not exist "%OUTPUT_DIR%\pycaffe\caffe\proto" mkdir "%OUTPUT_DIR%\pycaffe\caffe\proto" 10 | 11 | echo PythonPreBuild.cmd : Create dummy __init__.py file 12 | rem. > "%OUTPUT_DIR%\pycaffe\caffe\proto\__init__.py" 13 | 14 | echo PythonPreBuild.cmd : Generating src\caffe\proto\caffe.pb.h with python bindings 15 | "%PROTO_COMPILER_DIR%\protoc" "%SOLUTION_DIR%\..\src\caffe\proto\caffe.proto" --proto_path="%SOLUTION_DIR%\..\src\caffe\proto" --python_out="%OUTPUT_DIR%\pycaffe\caffe\proto" -------------------------------------------------------------------------------- /caffe-master/windows/test_MTCNN/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/test_all/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/test_yolo/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/test_yolo/test_yolo_a1.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/windows/test_yolo/test_yolo_a1.cpp -------------------------------------------------------------------------------- /caffe-master/windows/test_yolo/yolotestresult.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChriswooTalent/Yolo_on_Caffe/00337827a55615cda1bfd5f871aea1c07de4bd26/caffe-master/windows/test_yolo/yolotestresult.jpg -------------------------------------------------------------------------------- /caffe-master/windows/test_yolo_v2/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/upgrade_net_proto_binary/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/upgrade_net_proto_text/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /caffe-master/windows/upgrade_solver_proto_text/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | --------------------------------------------------------------------------------