├── .Doxyfile ├── .gitignore ├── .travis.yml ├── CMakeLists.txt ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config.example ├── README.md ├── caffe.cloc ├── cmake ├── ConfigGen.cmake ├── Cuda.cmake ├── Dependencies.cmake ├── External │ ├── gflags.cmake │ └── glog.cmake ├── Misc.cmake ├── Modules │ ├── FindAtlas.cmake │ ├── FindGFlags.cmake │ ├── FindGlog.cmake │ ├── FindLAPACK.cmake │ ├── FindLMDB.cmake │ ├── FindLevelDB.cmake │ ├── FindMKL.cmake │ ├── FindMatlabMex.cmake │ ├── FindNCCL.cmake │ ├── FindNumPy.cmake │ ├── FindOpenBLAS.cmake │ ├── FindSnappy.cmake │ └── FindvecLib.cmake ├── ProtoBuf.cmake ├── Summary.cmake ├── Targets.cmake ├── Templates │ ├── CaffeConfig.cmake.in │ ├── CaffeConfigVersion.cmake.in │ └── caffe_config.h.in ├── Utils.cmake └── lint.cmake ├── examples ├── 00-classification.ipynb ├── 01-learning-lenet.ipynb ├── 02-fine-tuning.ipynb ├── CMakeLists.txt ├── FRCNN │ ├── calculate_f1.py-bad │ ├── calculate_recall.py │ ├── calculate_voc_ap.py │ ├── config │ │ ├── default_config.json │ │ ├── face_detection_config.json │ │ ├── imagenetvid_config.json │ │ ├── reference.json │ │ └── voc_config.json │ ├── convert_frcnn_model.sh │ ├── convert_model.py │ ├── convert_model.sh │ ├── dataset │ │ ├── voc2007.test │ │ └── voc2007.trainval │ ├── demo_frcnn.sh │ ├── demo_frcnn_api.cpp │ ├── fetch_imagenet_models.sh │ ├── fpn-res50 │ │ ├── generate_anchors.py │ │ ├── mix_config.json │ │ ├── solver.proto │ │ ├── test.sh │ │ ├── test_merged-dcn.proto │ │ ├── test_merged.proto │ │ ├── train.sh │ │ ├── train_val_merged-dcn-ohem.proto │ │ └── train_val_merged-ohem.proto │ ├── fpn │ │ ├── fpn-vgg16-test.proto │ │ ├── fpn-vgg16-train.proto │ │ └── solver.proto │ ├── generate_anchors.py │ ├── googlenet_v1 │ │ ├── solver.proto │ │ ├── test.proto │ │ └── train.proto │ ├── images │ │ ├── 000456.jpg │ │ ├── 000542.jpg │ │ ├── 001150.jpg │ │ ├── 001763.jpg │ │ └── 004545.jpg │ ├── light-cascade │ │ ├── test_merged-light-atrous.proto │ │ └── train_val_merged-light-ohem.proto │ ├── light-rfcn-dcn │ │ ├── test_merged-light-atrous.proto │ │ └── train_val_merged-light-ohem.proto │ ├── light-rfcn-res50 │ │ ├── mix_config.json │ │ ├── solver.proto │ │ ├── test.sh │ │ ├── test_merged-light-atrous.proto │ │ ├── test_merged-rfcn++-stack.proto │ │ ├── test_merged-rfcn++.proto │ │ ├── train.sh │ │ ├── train_val_merged-light-ohem.proto │ │ ├── train_val_merged-rfcn++-stack.proto │ │ └── train_val_merged-rfcn++.proto │ ├── loc_merge_frcnn.cpp │ ├── loc_test_frcnn.cpp │ ├── log │ │ └── .gitignore │ ├── merge_resnet.sh │ ├── merge_resnet_deploy.py │ ├── res101 │ │ ├── README.md │ │ ├── ResNet-101-deploy.prototxt │ │ ├── solver.proto │ │ ├── test.proto │ │ ├── test_frcnn.sh │ │ ├── train_frcnn.sh │ │ └── train_val.proto │ ├── res101_rpn │ │ ├── solver.proto │ │ ├── test.proto │ │ └── train.proto │ ├── res152 │ │ ├── ResNet-152-deploy.prototxt │ │ ├── solver.proto │ │ ├── test.proto │ │ └── train.proto │ ├── res152_rpn │ │ ├── solver.proto │ │ ├── test.proto │ │ └── train.proto │ ├── res50 │ │ ├── backup │ │ │ ├── ResNet-50-deploy.prototxt │ │ │ ├── solver.proto │ │ │ ├── test.proto │ │ │ ├── test_merged.pt │ │ │ ├── train_val.proto │ │ │ └── train_val_merged.pt │ │ ├── gen_merged_model.py │ │ ├── merge1.sh │ │ ├── restore_train.sh │ │ ├── test.proto │ │ ├── test_frcnn.sh │ │ ├── test_merged.proto │ │ ├── train_frcnn.sh │ │ ├── train_val.proto │ │ └── train_val_merged.proto │ ├── results │ │ └── .gitignore │ ├── rfcn-cascade │ │ ├── test_merged-atrous.proto │ │ └── train_val_merged-atrous-ohem.proto │ ├── rfcn-dcn │ │ ├── test_merged-atrous.proto │ │ └── train_val_merged-atrous-ohem.proto │ ├── rfcn-res50 │ │ ├── mix_config.json │ │ ├── solver.proto │ │ ├── test.sh │ │ ├── test_merged-atrous.proto │ │ ├── test_merged.proto │ │ ├── train.sh │ │ ├── train_val_merged-atrous-ohem.proto │ │ ├── train_val_merged-atrous.proto │ │ └── train_val_merged.proto │ ├── rfcn-resnext │ │ ├── solver.proto │ │ ├── test_resnext101-32x4d-merge.proto │ │ ├── test_resnext50-32x4d-merge.proto │ │ ├── test_resnext50-32x4d-rfcn++.proto │ │ ├── train_resnext101-32x4d-merge.proto │ │ ├── train_resnext50-32x4d-merge.proto │ │ └── train_resnext50-32x4d-rfcn++.proto │ ├── test_frcnn.cpp │ ├── test_frcnn_cascade.cpp │ ├── test_list.cpp │ ├── test_rpn.cpp │ ├── test_rpn.sh │ ├── train_rpn.sh │ ├── truth_test_frcnn.cpp │ ├── vgg16 │ │ ├── restore.sh │ │ ├── solver.proto │ │ ├── test.proto │ │ ├── test_frcnn.sh │ │ ├── train_frcnn.sh │ │ └── train_val.proto │ ├── vgg16_rpn │ │ ├── solver.prototxt │ │ ├── test.prototxt │ │ └── train_val.prototxt │ ├── vgg19 │ │ ├── solver.proto │ │ ├── test.proto │ │ └── train.proto │ ├── zf │ │ ├── solver.prototxt │ │ ├── test.prototxt │ │ ├── test_frcnn.sh │ │ ├── train_frcnn.sh │ │ ├── train_val.prototxt │ │ ├── vid_solver.prototxt │ │ └── vid_train.proto │ └── zf_rpn │ │ ├── solver.prototxt │ │ ├── test.prototxt │ │ └── train_val.prototxt ├── Makefile.config.centos ├── Makefile.config.my ├── YOLO │ ├── cfg.py │ ├── darknet2caffe.py │ ├── demo_yolov3.cpp │ ├── demo_yolov3.sh │ ├── prototxt.py │ └── shrink_bn_caffe.py ├── brewing-logreg.ipynb ├── cifar10 │ ├── cifar10_full.prototxt │ ├── cifar10_full_sigmoid_solver.prototxt │ ├── cifar10_full_sigmoid_solver_bn.prototxt │ ├── cifar10_full_sigmoid_train_test.prototxt │ ├── cifar10_full_sigmoid_train_test_bn.prototxt │ ├── cifar10_full_solver.prototxt │ ├── cifar10_full_solver_lr1.prototxt │ ├── cifar10_full_solver_lr2.prototxt │ ├── cifar10_full_train_test.prototxt │ ├── cifar10_quick.prototxt │ ├── cifar10_quick_solver.prototxt │ ├── cifar10_quick_solver_lr1.prototxt │ ├── cifar10_quick_train_test.prototxt │ ├── convert_cifar_data.cpp │ ├── create_cifar10.sh │ ├── readme.md │ ├── train_full.sh │ ├── train_full_sigmoid.sh │ ├── train_full_sigmoid_bn.sh │ └── train_quick.sh ├── cifar100 │ ├── PadCifar100.py │ ├── convert_cifar100_data.cpp │ └── create_cifar100.sh ├── cpp_classification │ ├── classification.cpp │ └── readme.md ├── detection.ipynb ├── feature_extraction │ ├── imagenet_val.prototxt │ └── readme.md ├── finetune_flickr_style │ ├── assemble_data.py │ ├── flickr_style.csv.gz │ ├── readme.md │ └── style_names.txt ├── finetune_pascal_detection │ ├── pascal_finetune_solver.prototxt │ └── pascal_finetune_trainval_test.prototxt ├── hdf5_classification │ ├── nonlinear_auto_test.prototxt │ ├── nonlinear_auto_train.prototxt │ ├── nonlinear_train_val.prototxt │ └── train_val.prototxt ├── imagenet │ ├── create_imagenet.sh │ ├── make_imagenet_mean.sh │ ├── readme.md │ ├── resume_training.sh │ └── train_caffenet.sh ├── images │ ├── cat gray.jpg │ ├── cat.jpg │ ├── cat_gray.jpg │ └── fish-bike.jpg ├── mnist │ ├── convert_mnist_data.cpp │ ├── create_mnist.sh │ ├── lenet.prototxt │ ├── lenet_adadelta_solver.prototxt │ ├── lenet_auto_solver.prototxt │ ├── lenet_consolidated_solver.prototxt │ ├── lenet_multistep_solver.prototxt │ ├── lenet_solver.prototxt │ ├── lenet_solver_adam.prototxt │ ├── lenet_solver_rmsprop.prototxt │ ├── lenet_train_test.prototxt │ ├── mnist_autoencoder.prototxt │ ├── mnist_autoencoder_solver.prototxt │ ├── mnist_autoencoder_solver_adadelta.prototxt │ ├── mnist_autoencoder_solver_adagrad.prototxt │ ├── mnist_autoencoder_solver_nesterov.prototxt │ ├── readme.md │ ├── train_lenet.sh │ ├── train_lenet_adam.sh │ ├── train_lenet_consolidated.sh │ ├── train_lenet_docker.sh │ ├── train_lenet_rmsprop.sh │ ├── train_mnist_autoencoder.sh │ ├── train_mnist_autoencoder_adadelta.sh │ ├── train_mnist_autoencoder_adagrad.sh │ └── train_mnist_autoencoder_nesterov.sh ├── net_surgery.ipynb ├── net_surgery │ ├── bvlc_caffenet_full_conv.prototxt │ └── conv.prototxt ├── pascal-multilabel-with-datalayer.ipynb ├── pycaffe │ ├── caffenet.py │ ├── layers │ │ ├── pascal_multilabel_datalayers.py │ │ └── pyloss.py │ ├── linreg.prototxt │ └── tools.py ├── siamese │ ├── convert_mnist_siamese_data.cpp │ ├── create_mnist_siamese.sh │ ├── mnist_siamese.ipynb │ ├── mnist_siamese.prototxt │ ├── mnist_siamese_solver.prototxt │ ├── mnist_siamese_train_test.prototxt │ ├── readme.md │ └── train_mnist_siamese.sh └── web_demo │ ├── app.py │ ├── exifutil.py │ ├── readme.md │ ├── requirements.txt │ └── templates │ └── index.html ├── include ├── api │ ├── FRCNN │ │ ├── frcnn_api.hpp │ │ └── rpn_api.hpp │ ├── api.hpp │ └── util │ │ └── blowfish.hpp ├── caffe │ ├── ACTION_REC │ │ └── video_data_layer.hpp │ ├── FRCNN │ │ ├── frcnn_anchor_target_layer.hpp │ │ ├── frcnn_proposal_layer.hpp │ │ ├── frcnn_proposal_target_layer.hpp │ │ ├── frcnn_roi_data_layer.hpp │ │ ├── frcnn_vid_data_layer.hpp │ │ ├── roi_mask_pooling_layers.hpp │ │ ├── roi_pooling_layers.hpp │ │ ├── smooth_L1_loss_layer.hpp │ │ └── util │ │ │ ├── frcnn_gpu_nms.hpp │ │ │ ├── frcnn_helper.hpp │ │ │ ├── frcnn_param.hpp │ │ │ ├── frcnn_utils.hpp │ │ │ └── frcnn_vis.hpp │ ├── blob.hpp │ ├── caffe.hpp │ ├── common.hpp │ ├── data_transformer.hpp │ ├── filler.hpp │ ├── internal_thread.hpp │ ├── layer.hpp │ ├── layer_factory.hpp │ ├── layers │ │ ├── absval_layer.hpp │ │ ├── accuracy_layer.hpp │ │ ├── argmax_layer.hpp │ │ ├── base_conv_layer.hpp │ │ ├── base_data_layer.hpp │ │ ├── batch_norm_layer.hpp │ │ ├── batch_reindex_layer.hpp │ │ ├── bias_layer.hpp │ │ ├── bnll_layer.hpp │ │ ├── concat_layer.hpp │ │ ├── contrastive_loss_layer.hpp │ │ ├── conv_layer.hpp │ │ ├── crop_layer.hpp │ │ ├── cudnn_conv_layer.hpp │ │ ├── cudnn_lcn_layer.hpp │ │ ├── cudnn_lrn_layer.hpp │ │ ├── cudnn_pooling_layer.hpp │ │ ├── cudnn_relu_layer.hpp │ │ ├── cudnn_sigmoid_layer.hpp │ │ ├── cudnn_softmax_layer.hpp │ │ ├── cudnn_tanh_layer.hpp │ │ ├── data_layer.hpp │ │ ├── deconv_layer.hpp │ │ ├── dropout_layer.hpp │ │ ├── dummy_data_layer.hpp │ │ ├── eltwise_layer.hpp │ │ ├── elu_layer.hpp │ │ ├── embed_layer.hpp │ │ ├── euclidean_loss_layer.hpp │ │ ├── exp_layer.hpp │ │ ├── filter_layer.hpp │ │ ├── flatten_layer.hpp │ │ ├── hdf5_data_layer.hpp │ │ ├── hdf5_output_layer.hpp │ │ ├── hinge_loss_layer.hpp │ │ ├── im2col_layer.hpp │ │ ├── image_data_layer.hpp │ │ ├── infogain_loss_layer.hpp │ │ ├── inner_product_layer.hpp │ │ ├── input_layer.hpp │ │ ├── log_layer.hpp │ │ ├── loss_layer.hpp │ │ ├── lrn_layer.hpp │ │ ├── lstm_layer.hpp │ │ ├── memory_data_layer.hpp │ │ ├── multinomial_logistic_loss_layer.hpp │ │ ├── mvn_layer.hpp │ │ ├── neuron_layer.hpp │ │ ├── parameter_layer.hpp │ │ ├── pooling_layer.hpp │ │ ├── power_layer.hpp │ │ ├── prelu_layer.hpp │ │ ├── python_layer.hpp │ │ ├── recurrent_layer.hpp │ │ ├── reduction_layer.hpp │ │ ├── relu_layer.hpp │ │ ├── reshape_layer.hpp │ │ ├── rnn_layer.hpp │ │ ├── scale_layer.hpp │ │ ├── sigmoid_cross_entropy_loss_layer.hpp │ │ ├── sigmoid_layer.hpp │ │ ├── silence_layer.hpp │ │ ├── slice_layer.hpp │ │ ├── softmax_layer.hpp │ │ ├── softmax_loss_layer.hpp │ │ ├── split_layer.hpp │ │ ├── spp_layer.hpp │ │ ├── tanh_layer.hpp │ │ ├── threshold_layer.hpp │ │ ├── tile_layer.hpp │ │ └── window_data_layer.hpp │ ├── net.hpp │ ├── parallel.hpp │ ├── sgd_solvers.hpp │ ├── solver.hpp │ ├── solver_factory.hpp │ ├── syncedmem.hpp │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ └── util │ │ ├── benchmark.hpp │ │ ├── blocking_queue.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── db_leveldb.hpp │ │ ├── db_lmdb.hpp │ │ ├── device_alternate.hpp │ │ ├── dynamic_library.hpp │ │ ├── format.hpp │ │ ├── gpu_util.cuh │ │ ├── hdf5.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── nccl.hpp │ │ ├── rng.hpp │ │ ├── search_path.hpp │ │ ├── signal_handler.h │ │ └── upgrade_proto.hpp ├── logger │ └── vis_logger.hpp ├── pybind11 │ ├── attr.h │ ├── buffer_info.h │ ├── cast.h │ ├── chrono.h │ ├── common.h │ ├── complex.h │ ├── detail │ │ ├── class.h │ │ ├── common.h │ │ ├── descr.h │ │ ├── init.h │ │ ├── internals.h │ │ └── typeid.h │ ├── eigen.h │ ├── embed.h │ ├── eval.h │ ├── functional.h │ ├── iostream.h │ ├── numpy.h │ ├── operators.h │ ├── options.h │ ├── pybind11-2.2.1 │ │ └── LICENSE │ ├── pybind11.h │ ├── pytypes.h │ ├── stl.h │ └── stl_bind.h └── yaml-cpp │ ├── anchor.h │ ├── binary.h │ ├── contrib │ ├── anchordict.h │ └── graphbuilder.h │ ├── dll.h │ ├── emitfromevents.h │ ├── emitter.h │ ├── emitterdef.h │ ├── emittermanip.h │ ├── emitterstyle.h │ ├── eventhandler.h │ ├── exceptions.h │ ├── mark.h │ ├── node │ ├── convert.h │ ├── detail │ │ ├── bool_type.h │ │ ├── impl.h │ │ ├── iterator.h │ │ ├── iterator_fwd.h │ │ ├── memory.h │ │ ├── node.h │ │ ├── node_data.h │ │ ├── node_iterator.h │ │ └── node_ref.h │ ├── emit.h │ ├── impl.h │ ├── iterator.h │ ├── node.h │ ├── parse.h │ ├── ptr.h │ └── type.h │ ├── noncopyable.h │ ├── null.h │ ├── ostream_wrapper.h │ ├── parser.h │ ├── stlemitter.h │ ├── traits.h │ ├── yaml-cpp-0.5.3 │ └── LICENSE │ └── yaml.h ├── matlab ├── +caffe │ ├── +test │ │ ├── test_io.m │ │ ├── test_net.m │ │ └── test_solver.m │ ├── Blob.m │ ├── Layer.m │ ├── Net.m │ ├── Solver.m │ ├── get_net.m │ ├── get_solver.m │ ├── imagenet │ │ └── ilsvrc_2012_mean.mat │ ├── io.m │ ├── private │ │ ├── CHECK.m │ │ ├── CHECK_FILE_EXIST.m │ │ ├── caffe_.cpp │ │ └── is_valid_handle.m │ ├── reset_all.m │ ├── run_tests.m │ ├── set_device.m │ ├── set_mode_cpu.m │ ├── set_mode_gpu.m │ └── version.m ├── CMakeLists.txt ├── Cifar10 │ ├── Cifar10.m │ └── GetCifar10Mat.sh ├── FRCNN │ ├── For_LOC │ │ ├── .gitignore │ │ ├── dataset │ │ │ └── .gitignore │ │ ├── eight │ │ │ ├── googlenet_v1 │ │ │ │ ├── solver.proto │ │ │ │ ├── test.proto │ │ │ │ ├── test_frcnn.sh │ │ │ │ ├── train.proto │ │ │ │ └── train_frcnn.sh │ │ │ ├── res101 │ │ │ │ ├── solver.proto │ │ │ │ ├── test.proto │ │ │ │ ├── test_frcnn.sh │ │ │ │ ├── train.proto │ │ │ │ └── train_frcnn.sh │ │ │ ├── res152 │ │ │ │ ├── solver.proto │ │ │ │ ├── test.proto │ │ │ │ ├── test_frcnn.sh │ │ │ │ ├── train.proto │ │ │ │ └── train_frcnn.sh │ │ │ ├── trecvid.json │ │ │ └── vgg19 │ │ │ │ ├── solver.proto │ │ │ │ ├── test.proto │ │ │ │ ├── test_frcnn.sh │ │ │ │ ├── train.proto │ │ │ │ └── train_frcnn.sh │ │ ├── python │ │ │ ├── Generate_Trecvid_Data.py │ │ │ ├── Generate_Trecvid_Test.py │ │ │ ├── Handle_Trecvid_Train.py │ │ │ ├── Merge_Score.py │ │ │ ├── base_function.py │ │ │ ├── calculate_trecvid_ap.py │ │ │ └── eval.sh │ │ └── two │ │ │ ├── googlenet_v1 │ │ │ ├── solver.proto │ │ │ ├── test.proto │ │ │ ├── test_frcnn.sh │ │ │ ├── train.proto │ │ │ └── train_frcnn.sh │ │ │ ├── res101 │ │ │ ├── solver.proto │ │ │ ├── test.proto │ │ │ ├── test_frcnn.sh │ │ │ ├── train.proto │ │ │ └── train_frcnn.sh │ │ │ ├── res152 │ │ │ ├── solver.proto │ │ │ ├── test.proto │ │ │ ├── test_frcnn.sh │ │ │ ├── train.proto │ │ │ └── train_frcnn.sh │ │ │ ├── res152_merge_other │ │ │ └── vgg19_pure_rois │ │ │ │ ├── loc_merge.sh │ │ │ │ └── test.proto │ │ │ ├── shell │ │ │ ├── convert_final_model.sh │ │ │ ├── googlenet_S1_test_frcnn.sh │ │ │ ├── googlenet_S2_test_frcnn.sh │ │ │ ├── res101_S1_test_frcnn.sh │ │ │ ├── res101_S2_test_frcnn.sh │ │ │ ├── res152_S1_test_frcnn.sh │ │ │ ├── res152_S2_test_frcnn.sh │ │ │ ├── vgg19_S1_test_frcnn.sh │ │ │ └── vgg19_S2_test_frcnn.sh │ │ │ ├── trecvid.json │ │ │ └── vgg19 │ │ │ ├── solver.proto │ │ │ ├── test.proto │ │ │ ├── test_frcnn.sh │ │ │ ├── train.proto │ │ │ └── train_frcnn.sh │ ├── ILSVRC2015_VID_FRCNN.m │ └── Make_Faster_RCNN_Train_Data.m ├── demo │ └── classification_demo.m └── hdf5creation │ ├── .gitignore │ ├── demo.m │ └── store2hdf5.m ├── python ├── CMakeLists.txt ├── ConvertData │ ├── LMDB.py │ ├── LevelDB.py │ ├── PadBinaryMean.py │ ├── PadCifar10.py │ ├── binaryproto2mat.py │ └── convert.sh ├── caffe │ ├── __init__.py │ ├── _caffe.cpp │ ├── classifier.py │ ├── coord_map.py │ ├── detector.py │ ├── draw.py │ ├── imagenet │ │ └── ilsvrc_2012_mean.npy │ ├── io.py │ ├── model_libs.py │ ├── net_spec.py │ ├── pycaffe.py │ └── test │ │ ├── test_coord_map.py │ │ ├── test_draw.py │ │ ├── test_io.py │ │ ├── test_layer_type_list.py │ │ ├── test_net.py │ │ ├── test_net_spec.py │ │ ├── test_python_layer.py │ │ ├── test_python_layer_with_param_str.py │ │ └── test_solver.py ├── classify.py ├── crawler-cvpr2016 │ └── cvpr2016.py ├── detect.py ├── draw_net.py ├── frcnn │ └── frcnn.cpp ├── requirements.txt └── train.py ├── scripts ├── build_docs.sh ├── copy_notebook.py ├── cpp_lint.py ├── create_annoset.py ├── create_annoset_r.py ├── deploy_docs.sh ├── download_model_binary.py ├── download_model_from_gist.sh ├── gather_examples.sh ├── split_caffe_proto.py ├── travis │ ├── build.sh │ ├── configure-cmake.sh │ ├── configure-make.sh │ ├── configure.sh │ ├── defaults.sh │ ├── install-deps.sh │ ├── install-python-deps.sh │ ├── setup-venv.sh │ └── test.sh └── upload_model_to_gist.sh ├── src ├── api │ ├── FRCNN │ │ ├── CMakeLists.txt │ │ ├── frcnn_api.cpp │ │ ├── frcnn_cascade_api.cpp │ │ └── rpn_api.cpp │ └── util │ │ └── blowfish.cpp ├── caffe │ ├── ACTION_REC │ │ └── video_data_layer.cpp │ ├── CMakeLists.txt │ ├── CTPN │ │ ├── ctpn_layers.hpp │ │ ├── lstm_layer.cpp │ │ ├── lstm_layer.cu │ │ ├── reverse_layer.cpp │ │ ├── reverse_layer.cu │ │ ├── transpose_layer.cpp │ │ └── transpose_layer.cu │ ├── DeformConv │ │ ├── deformable_conv_layer.cpp │ │ ├── deformable_conv_layer.cu │ │ ├── deformable_conv_layer.hpp │ │ ├── deformable_im2col.cu │ │ ├── deformable_im2col.hpp │ │ ├── deformable_psroi_pooling_layer.cpp │ │ ├── deformable_psroi_pooling_layer.cu │ │ └── deformable_psroi_pooling_layer.hpp │ ├── FRCNN │ │ ├── data_augment │ │ │ ├── Makefile │ │ │ ├── data_utils.cpp │ │ │ ├── data_utils.hpp │ │ │ ├── opencv_utils.cpp │ │ │ ├── rotate.cpp │ │ │ ├── test.jpg │ │ │ └── test_augment.cpp │ │ ├── data_enhance │ │ │ ├── haze_free │ │ │ │ ├── Makefile │ │ │ │ ├── guidedfilter.cpp │ │ │ │ ├── guidedfilter.h │ │ │ │ ├── haze.cpp │ │ │ │ ├── haze.h │ │ │ │ └── main.cpp │ │ │ └── histgram │ │ │ │ ├── equalize_hist.cpp │ │ │ │ └── equalize_hist.hpp │ │ ├── decode_bbox_layer.cpp │ │ ├── decode_bbox_layer.hpp │ │ ├── eltwise_layer.cpp │ │ ├── eltwise_layer.cu │ │ ├── focal_loss │ │ │ ├── README.md │ │ │ ├── focal_loss_layer.cpp │ │ │ ├── focal_loss_layer.cu │ │ │ ├── focal_loss_layer.hpp │ │ │ └── plot.py │ │ ├── frcnn_anchor_target_layer.cpp │ │ ├── frcnn_anchor_target_layer.cu │ │ ├── frcnn_proposal_layer.cpp │ │ ├── frcnn_proposal_layer.cu │ │ ├── frcnn_proposal_target_layer.cpp │ │ ├── frcnn_proposal_target_layer.cu │ │ ├── frcnn_roi_data_layer.cpp │ │ ├── frcnn_roi_data_layer.cu │ │ ├── frcnn_vid_data_layer.cpp │ │ ├── frcnn_vid_data_layer.cu │ │ ├── roi_align │ │ │ ├── roi_align_layer.cpp │ │ │ ├── roi_align_layer.cu │ │ │ └── roi_align_layer.hpp │ │ ├── roi_mask_pooling_layer.cpp │ │ ├── roi_mask_pooling_layer.cu │ │ ├── roi_pooling_layer.cpp │ │ ├── roi_pooling_layer.cu │ │ ├── smooth_L1_loss_layer.cpp │ │ ├── smooth_L1_loss_layer.cu │ │ ├── swish │ │ │ ├── swish_layer.cpp │ │ │ ├── swish_layer.cu │ │ │ └── swish_layer.hpp │ │ └── util │ │ │ ├── frcnn_bbox.cpp │ │ │ ├── frcnn_bbox_transform.cpp │ │ │ ├── frcnn_config.cpp │ │ │ ├── frcnn_file.cpp │ │ │ ├── frcnn_nms_kernel.cu │ │ │ ├── frcnn_param.cpp │ │ │ ├── frcnn_vis.cpp │ │ │ └── iou_gpu.cu │ ├── PR │ │ ├── README.md │ │ ├── conv_dw_layer.cpp │ │ ├── conv_dw_layer.cu │ │ ├── conv_dw_layer.hpp │ │ ├── cudnn_deconv_layer.cpp │ │ ├── cudnn_deconv_layer.cu │ │ └── cudnn_deconv_layer.hpp │ ├── RFCN │ │ ├── box_annotator_ohem_layer.cpp │ │ ├── box_annotator_ohem_layer.cu │ │ ├── box_annotator_ohem_layer.hpp │ │ ├── psroi_align_layer.cpp │ │ ├── psroi_align_layer.cu │ │ ├── psroi_align_layer.hpp │ │ ├── psroi_pooling_layer.cpp │ │ ├── psroi_pooling_layer.cu │ │ ├── psroi_pooling_layer.cu.align-better │ │ ├── psroi_pooling_layer.hpp │ │ ├── smooth_L1_loss_ohem_layer.cpp │ │ ├── smooth_L1_loss_ohem_layer.cu │ │ ├── smooth_l1_loss_ohem_layer.hpp │ │ ├── softmax_loss_ohem_layer.cpp │ │ ├── softmax_loss_ohem_layer.cu │ │ └── softmax_loss_ohem_layer.hpp │ ├── SSD │ │ ├── annotated_data_layer.cpp │ │ ├── annotated_data_layer.hpp │ │ ├── annotated_r_data_layer.cpp │ │ ├── annotated_r_data_layer.hpp │ │ ├── data_reader.cpp │ │ ├── data_reader.hpp │ │ ├── detection_output_layer.cpp │ │ ├── detection_output_layer.cu │ │ ├── detection_output_layer.hpp │ │ ├── multibox_loss_layer.cpp │ │ ├── multibox_loss_layer.hpp │ │ ├── multirbox_loss_layer.cpp │ │ ├── multirbox_loss_layer.hpp │ │ ├── normalize_layer.cpp │ │ ├── normalize_layer.cu │ │ ├── normalize_layer.hpp │ │ ├── permute_layer.cpp │ │ ├── permute_layer.cu │ │ ├── permute_layer.hpp │ │ ├── prior_box_layer.cpp │ │ ├── prior_box_layer.hpp │ │ ├── prior_rbox_layer.cpp │ │ ├── prior_rbox_layer.hpp │ │ ├── rdetection_output_layer.cpp │ │ ├── rdetection_output_layer.cu │ │ ├── rdetection_output_layer.hpp │ │ ├── ssd_base_data_layer.cpp │ │ ├── ssd_base_data_layer.cu │ │ ├── ssd_base_data_layer.hpp │ │ ├── ssd_data_transformer.cpp │ │ ├── ssd_data_transformer.hpp │ │ └── util │ │ │ ├── bbox_util.cpp │ │ │ ├── bbox_util.cu │ │ │ ├── bbox_util.hpp │ │ │ ├── im_transforms.cpp │ │ │ ├── im_transforms.hpp │ │ │ ├── rbox_util.cpp │ │ │ ├── rbox_util.cpp.head-tail │ │ │ ├── rbox_util.cpp.ship │ │ │ ├── rbox_util.hpp │ │ │ ├── sampler.cpp │ │ │ └── sampler.hpp │ ├── YOLO │ │ ├── box.cpp │ │ ├── box.h │ │ ├── image.cpp │ │ ├── image.h │ │ ├── yolo_layer.cpp │ │ └── yolo_layer.h │ ├── blob.cpp │ ├── common.cpp │ ├── data_transformer.cpp │ ├── internal_thread.cpp │ ├── layer.cpp │ ├── layer_factory.cpp │ ├── layers │ │ ├── absval_layer.cpp │ │ ├── absval_layer.cu │ │ ├── accuracy_layer.cpp │ │ ├── accuracy_layer.cu │ │ ├── argmax_layer.cpp │ │ ├── base_conv_layer.cpp │ │ ├── base_data_layer.cpp │ │ ├── base_data_layer.cu │ │ ├── batch_norm_layer.cpp │ │ ├── batch_norm_layer.cu │ │ ├── batch_reindex_layer.cpp │ │ ├── batch_reindex_layer.cu │ │ ├── bias_layer.cpp │ │ ├── bias_layer.cu │ │ ├── bnll_layer.cpp │ │ ├── bnll_layer.cu │ │ ├── concat_layer.cpp │ │ ├── concat_layer.cu │ │ ├── contrastive_loss_layer.cpp │ │ ├── contrastive_loss_layer.cu │ │ ├── conv_layer.cpp │ │ ├── conv_layer.cu │ │ ├── crop_layer.cpp │ │ ├── crop_layer.cu │ │ ├── cudnn_conv_layer.cpp │ │ ├── cudnn_conv_layer.cu │ │ ├── cudnn_lcn_layer.cpp │ │ ├── cudnn_lcn_layer.cu │ │ ├── cudnn_lrn_layer.cpp │ │ ├── cudnn_lrn_layer.cu │ │ ├── cudnn_pooling_layer.cpp │ │ ├── cudnn_pooling_layer.cu │ │ ├── cudnn_relu_layer.cpp │ │ ├── cudnn_relu_layer.cu │ │ ├── cudnn_sigmoid_layer.cpp │ │ ├── cudnn_sigmoid_layer.cu │ │ ├── cudnn_softmax_layer.cpp │ │ ├── cudnn_softmax_layer.cu │ │ ├── cudnn_tanh_layer.cpp │ │ ├── cudnn_tanh_layer.cu │ │ ├── data_layer.cpp │ │ ├── deconv_layer.cpp │ │ ├── deconv_layer.cu │ │ ├── dropout_layer.cpp │ │ ├── dropout_layer.cu │ │ ├── dummy_data_layer.cpp │ │ ├── elu_layer.cpp │ │ ├── elu_layer.cu │ │ ├── embed_layer.cpp │ │ ├── embed_layer.cu │ │ ├── euclidean_loss_layer.cpp │ │ ├── euclidean_loss_layer.cu │ │ ├── exp_layer.cpp │ │ ├── exp_layer.cu │ │ ├── filter_layer.cpp │ │ ├── filter_layer.cu │ │ ├── flatten_layer.cpp │ │ ├── hdf5_data_layer.cpp │ │ ├── hdf5_data_layer.cu │ │ ├── hdf5_output_layer.cpp │ │ ├── hdf5_output_layer.cu │ │ ├── hinge_loss_layer.cpp │ │ ├── im2col_layer.cpp │ │ ├── im2col_layer.cu │ │ ├── image_data_layer.cpp │ │ ├── infogain_loss_layer.cpp │ │ ├── inner_product_layer.cpp │ │ ├── inner_product_layer.cu │ │ ├── input_layer.cpp │ │ ├── log_layer.cpp │ │ ├── log_layer.cu │ │ ├── loss_layer.cpp │ │ ├── lrn_layer.cpp │ │ ├── lrn_layer.cu │ │ ├── lstm_layer.cpp │ │ ├── lstm_unit_layer.cpp │ │ ├── lstm_unit_layer.cu │ │ ├── memory_data_layer.cpp │ │ ├── multinomial_logistic_loss_layer.cpp │ │ ├── mvn_layer.cpp │ │ ├── mvn_layer.cu │ │ ├── neuron_layer.cpp │ │ ├── parameter_layer.cpp │ │ ├── pooling_layer.cpp │ │ ├── pooling_layer.cu │ │ ├── power_layer.cpp │ │ ├── power_layer.cu │ │ ├── prelu_layer.cpp │ │ ├── prelu_layer.cu │ │ ├── recurrent_layer.cpp │ │ ├── recurrent_layer.cu │ │ ├── reduction_layer.cpp │ │ ├── reduction_layer.cu │ │ ├── relu_layer.cpp │ │ ├── relu_layer.cu │ │ ├── reshape_layer.cpp │ │ ├── rnn_layer.cpp │ │ ├── scale_layer.cpp │ │ ├── scale_layer.cu │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ ├── sigmoid_layer.cpp │ │ ├── sigmoid_layer.cu │ │ ├── silence_layer.cpp │ │ ├── silence_layer.cu │ │ ├── slice_layer.cpp │ │ ├── slice_layer.cu │ │ ├── softmax_layer.cpp │ │ ├── softmax_layer.cu │ │ ├── softmax_loss_layer.cpp │ │ ├── softmax_loss_layer.cu │ │ ├── split_layer.cpp │ │ ├── split_layer.cu │ │ ├── spp_layer.cpp │ │ ├── tanh_layer.cpp │ │ ├── tanh_layer.cu │ │ ├── threshold_layer.cpp │ │ ├── threshold_layer.cu │ │ ├── tile_layer.cpp │ │ ├── tile_layer.cu │ │ └── window_data_layer.cpp │ ├── net.cpp │ ├── parallel.cpp │ ├── proto │ │ └── caffe.proto │ ├── solver.cpp │ ├── solvers │ │ ├── adadelta_solver.cpp │ │ ├── adadelta_solver.cu │ │ ├── adagrad_solver.cpp │ │ ├── adagrad_solver.cu │ │ ├── adam_solver.cpp │ │ ├── adam_solver.cu │ │ ├── nesterov_solver.cpp │ │ ├── nesterov_solver.cu │ │ ├── rmsprop_solver.cpp │ │ ├── rmsprop_solver.cu │ │ ├── sgd_solver.cpp │ │ └── sgd_solver.cu │ ├── syncedmem.cpp │ ├── test │ │ ├── CMakeLists.txt │ │ ├── test_accuracy_layer.cpp │ │ ├── test_argmax_layer.cpp │ │ ├── test_batch_norm_layer.cpp │ │ ├── test_batch_reindex_layer.cpp │ │ ├── test_benchmark.cpp │ │ ├── test_bias_layer.cpp │ │ ├── test_blob.cpp │ │ ├── test_caffe_main.cpp │ │ ├── test_common.cpp │ │ ├── test_concat_layer.cpp │ │ ├── test_contrastive_loss_layer.cpp │ │ ├── test_convolution_layer.cpp │ │ ├── test_crop_layer.cpp │ │ ├── test_data │ │ │ ├── generate_sample_data.py │ │ │ ├── sample_data.h5 │ │ │ ├── sample_data_2_gzip.h5 │ │ │ ├── sample_data_list.txt │ │ │ ├── solver_data.h5 │ │ │ └── solver_data_list.txt │ │ ├── test_data_layer.cpp │ │ ├── test_data_transformer.cpp │ │ ├── test_db.cpp │ │ ├── test_deconvolution_layer.cpp │ │ ├── test_dummy_data_layer.cpp │ │ ├── test_eltwise_layer.cpp │ │ ├── test_embed_layer.cpp │ │ ├── test_euclidean_loss_layer.cpp │ │ ├── test_filler.cpp │ │ ├── test_filter_layer.cpp │ │ ├── test_flatten_layer.cpp │ │ ├── test_gradient_based_solver.cpp │ │ ├── test_hdf5_output_layer.cpp │ │ ├── test_hdf5data_layer.cpp │ │ ├── test_hinge_loss_layer.cpp │ │ ├── test_im2col_kernel.cu │ │ ├── test_im2col_layer.cpp │ │ ├── test_image_data_layer.cpp │ │ ├── test_infogain_loss_layer.cpp │ │ ├── test_inner_product_layer.cpp │ │ ├── test_internal_thread.cpp │ │ ├── test_io.cpp │ │ ├── test_layer_factory.cpp │ │ ├── test_lrn_layer.cpp │ │ ├── test_lstm_layer.cpp │ │ ├── test_math_functions.cpp │ │ ├── test_maxpool_dropout_layers.cpp │ │ ├── test_memory_data_layer.cpp │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ ├── test_mvn_layer.cpp │ │ ├── test_net.cpp │ │ ├── test_neuron_layer.cpp │ │ ├── test_platform.cpp │ │ ├── test_pooling_layer.cpp │ │ ├── test_power_layer.cpp │ │ ├── test_protobuf.cpp │ │ ├── test_random_number_generator.cpp │ │ ├── test_reduction_layer.cpp │ │ ├── test_reshape_layer.cpp │ │ ├── test_rnn_layer.cpp │ │ ├── test_scale_layer.cpp │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ ├── test_slice_layer.cpp │ │ ├── test_softmax_layer.cpp │ │ ├── test_softmax_with_loss_layer.cpp │ │ ├── test_solver.cpp │ │ ├── test_solver_factory.cpp │ │ ├── test_split_layer.cpp │ │ ├── test_spp_layer.cpp │ │ ├── test_stochastic_pooling.cpp │ │ ├── test_syncedmem.cpp │ │ ├── test_tanh_layer.cpp │ │ ├── test_threshold_layer.cpp │ │ ├── test_tile_layer.cpp │ │ ├── test_upgrade_proto.cpp │ │ └── test_util_blas.cpp │ └── util │ │ ├── benchmark.cpp │ │ ├── blocking_queue.cpp │ │ ├── cudnn.cpp │ │ ├── db.cpp │ │ ├── db_leveldb.cpp │ │ ├── db_lmdb.cpp │ │ ├── dynamic_library.cpp │ │ ├── hdf5.cpp │ │ ├── im2col.cpp │ │ ├── im2col.cu │ │ ├── insert_splits.cpp │ │ ├── io.cpp │ │ ├── math_functions.cpp │ │ ├── math_functions.cu │ │ ├── search_path.cpp │ │ ├── signal_handler.cpp │ │ └── upgrade_proto.cpp ├── gtest │ ├── CMakeLists.txt │ ├── gtest-all.cpp │ ├── gtest.h │ └── gtest_main.cc ├── logger │ ├── Makefile │ ├── README.md │ └── vis_logger.cpp ├── modules │ ├── common │ │ ├── roi_padding_layer.cpp │ │ └── roi_padding_layer.hpp │ └── fpn │ │ ├── fpn_anchor_target_layer.m.cpp │ │ ├── fpn_anchor_target_layer.m.cu │ │ ├── fpn_anchor_target_layer.m.hpp │ │ ├── fpn_proposal_layer.m.cpp │ │ ├── fpn_proposal_layer.m.cu │ │ ├── fpn_proposal_layer.m.hpp │ │ ├── fpn_proposal_target_layer.cpp │ │ ├── fpn_proposal_target_layer.cu │ │ ├── fpn_proposal_target_layer.hpp │ │ ├── fpn_utils.cpp │ │ ├── fpn_utils.hpp │ │ ├── upsample_layer.cpp │ │ ├── upsample_layer.cu │ │ └── upsample_layer.hpp └── yaml-cpp-0.5.3 │ ├── binary.cpp │ ├── collectionstack.h │ ├── contrib │ ├── graphbuilder.cpp │ ├── graphbuilderadapter.cpp │ └── graphbuilderadapter.h │ ├── convert.cpp │ ├── directives.cpp │ ├── directives.h │ ├── emit.cpp │ ├── emitfromevents.cpp │ ├── emitter.cpp │ ├── emitterstate.cpp │ ├── emitterstate.h │ ├── emitterutils.cpp │ ├── emitterutils.h │ ├── exp.cpp │ ├── exp.h │ ├── indentation.h │ ├── memory.cpp │ ├── node.cpp │ ├── node_data.cpp │ ├── nodebuilder.cpp │ ├── nodebuilder.h │ ├── nodeevents.cpp │ ├── nodeevents.h │ ├── null.cpp │ ├── ostream_wrapper.cpp │ ├── parse.cpp │ ├── parser.cpp │ ├── ptr_stack.h │ ├── ptr_vector.h │ ├── regex_yaml.cpp │ ├── regex_yaml.h │ ├── regeximpl.h │ ├── scanner.cpp │ ├── scanner.h │ ├── scanscalar.cpp │ ├── scanscalar.h │ ├── scantag.cpp │ ├── scantag.h │ ├── scantoken.cpp │ ├── setting.h │ ├── simplekey.cpp │ ├── singledocparser.cpp │ ├── singledocparser.h │ ├── stream.cpp │ ├── stream.h │ ├── streamcharsource.h │ ├── stringsource.h │ ├── tag.cpp │ ├── tag.h │ └── token.h └── tools ├── CMakeLists.txt ├── caffe.cpp ├── compute_image_mean.cpp ├── convert_annoset.cpp ├── convert_annoset_r.cpp ├── convert_imageset.cpp ├── device_query.cpp ├── encrypt_model.cpp ├── extra ├── extract_seconds.py ├── launch_resize_and_crop_images.sh ├── parse_log.py ├── parse_log.sh ├── plot_log.gnuplot.example ├── plot_training_log.py ├── plot_training_log.py.example ├── resize_and_crop_images.py └── summarize.py ├── extract_features.cpp ├── finetune_net.cpp ├── test_net.cpp ├── train_net.cpp ├── upgrade_net_proto_binary.cpp ├── upgrade_net_proto_text.cpp └── upgrade_solver_proto_text.cpp /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the users group in case you need help: 7 | https://groups.google.com/forum/#!forum/caffe-users 8 | -------------------------------------------------------------------------------- /cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /cmake/Modules/FindNCCL.cmake: -------------------------------------------------------------------------------- 1 | set(NCCL_INC_PATHS 2 | /usr/include 3 | /usr/local/include 4 | $ENV{NCCL_DIR}/include 5 | ) 6 | 7 | set(NCCL_LIB_PATHS 8 | /lib 9 | /lib64 10 | /usr/lib 11 | /usr/lib64 12 | /usr/local/lib 13 | /usr/local/lib64 14 | $ENV{NCCL_DIR}/lib 15 | ) 16 | 17 | find_path(NCCL_INCLUDE_DIR NAMES nccl.h PATHS ${NCCL_INC_PATHS}) 18 | find_library(NCCL_LIBRARIES NAMES nccl PATHS ${NCCL_LIB_PATHS}) 19 | 20 | include(FindPackageHandleStandardArgs) 21 | find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIR NCCL_LIBRARIES) 22 | 23 | if (NCCL_FOUND) 24 | message(STATUS "Found NCCL (include: ${NCCL_INCLUDE_DIR}, library: ${NCCL_LIBRARIES})") 25 | mark_as_advanced(NCCL_INCLUDE_DIR NCCL_LIBRARIES) 26 | endif () 27 | -------------------------------------------------------------------------------- /cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* Test device */ 8 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 9 | 10 | /* Temporary (TODO: remove) */ 11 | #if 1 12 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 13 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 14 | #define CMAKE_EXT ".gen.cmake" 15 | #else 16 | #define CMAKE_SOURCE_DIR "src/" 17 | #define EXAMPLES_SOURCE_DIR "examples/" 18 | #define CMAKE_EXT "" 19 | #endif 20 | -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB_RECURSE examples_srcs "${PROJECT_SOURCE_DIR}/examples/*.cpp") 2 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 3 | foreach(source_file ${examples_srcs}) 4 | # get file name 5 | get_filename_component(name ${source_file} NAME_WE) 6 | 7 | # get folder name 8 | get_filename_component(path ${source_file} PATH) 9 | get_filename_component(folder ${path} NAME_WE) 10 | 11 | add_executable(${name} ${source_file}) 12 | target_link_libraries(${name} FRCNN_api ${Caffe_LINK}) 13 | caffe_default_properties(${name}) 14 | 15 | # set back RUNTIME_OUTPUT_DIRECTORY 16 | set_target_properties(${name} PROPERTIES 17 | RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/examples/${folder}") 18 | 19 | caffe_set_solution_folder(${name} examples) 20 | 21 | # install 22 | install(TARGETS ${name} DESTINATION bin) 23 | 24 | if(UNIX OR APPLE) 25 | # Funny command to make tutorials work 26 | # TODO: remove in future as soon as naming is standardized everywhere 27 | set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${Caffe_POSTFIX}) 28 | add_custom_command(TARGET ${name} POST_BUILD 29 | COMMAND ln -sf "${__outname}" "${__outname}.bin") 30 | endif() 31 | endforeach() 32 | -------------------------------------------------------------------------------- /examples/FRCNN/convert_frcnn_model.sh: -------------------------------------------------------------------------------- 1 | python examples/FRCNN/convert_model.py \ 2 | --model models/FRCNN/zf/test.prototxt \ 3 | --weights models/FRCNN/snapshot/zf_frcnn_end_to_end_iter_70000.caffemodel \ 4 | --config examples/FRCNN/config/voc_config.json \ 5 | --net_out models/FRCNN/zf_faster_rcnn_final.caffemodel 6 | -------------------------------------------------------------------------------- /examples/FRCNN/convert_model.sh: -------------------------------------------------------------------------------- 1 | TYPE=$1 2 | if [ -z "$1" ];then 3 | echo 'type: vgg16/res50/res101' 4 | exit 0 5 | fi 6 | python examples/FRCNN/convert_model.py \ 7 | --model models/FRCNN/$TYPE/test.proto \ 8 | --weights models/FRCNN/snapshot/"$TYPE"_faster_rcnn_iter_70000.caffemodel \ 9 | --config examples/FRCNN/config/voc_config.json \ 10 | --net_out models/FRCNN/"$TYPE"_faster_rcnn_final.caffemodel 11 | -------------------------------------------------------------------------------- /examples/FRCNN/demo_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | 12 | BUILD=build/examples/FRCNN/demo_frcnn_api.bin 13 | 14 | $BUILD --gpu $gpu \ 15 | --model models/FRCNN/vgg16/test.prototxt \ 16 | --weights models/FRCNN/VGG16_faster_rcnn_final.caffemodel \ 17 | --default_c examples/FRCNN/config/voc_config.json \ 18 | --image_dir examples/FRCNN/images/ \ 19 | --out_dir examples/FRCNN/results/ 20 | -------------------------------------------------------------------------------- /examples/FRCNN/fetch_imagenet_models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE=imagenet_models.tgz 4 | URL=http://www.cs.berkeley.edu/~rbg/faster-rcnn-data/$FILE 5 | CHECKSUM=ed34ca912d6782edfb673a8c3a0bda6d 6 | 7 | if [ -f $FILE ]; then 8 | echo "File already exists. Checking md5..." 9 | os=`uname -s` 10 | if [ "$os" = "Linux" ]; then 11 | checksum=`md5sum $FILE | awk '{ print $1 }'` 12 | elif [ "$os" = "Darwin" ]; then 13 | checksum=`cat $FILE | md5` 14 | fi 15 | if [ "$checksum" = "$CHECKSUM" ]; then 16 | echo "Checksum is correct. No need to download." 17 | exit 0 18 | else 19 | echo "Checksum is incorrect. Need to download again." 20 | fi 21 | fi 22 | 23 | echo "Downloading pretrained ImageNet models (1G)..." 24 | 25 | wget $URL -O $FILE 26 | 27 | echo "Unzipping..." 28 | 29 | tar zxvf $FILE 30 | 31 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 32 | -------------------------------------------------------------------------------- /examples/FRCNN/fpn-res50/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 2 | train_net: "exp/fpn-res50/train_val_merged-ohem.proto" 3 | base_lr: 0.001 4 | #base_lr: 0.0005 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 40000 8 | #stepvalue: 100000 9 | max_iter: 120000 10 | #stepvalue: 50000 11 | #max_iter: 80000 12 | display: 100 13 | average_loss: 100 14 | momentum: 0.9 15 | weight_decay: 0.0001 16 | # function 17 | snapshot: 10000 18 | # We still use the snapshot prefix, though 19 | snapshot_prefix: "exp/snapshot/fpn-res50-mix" 20 | # 每次循环都会以batch_size大小计算梯度和loss,最后再取iter_size次的平均。可以看成iter_size*batch_size次更新一次参数。 21 | iter_size: 2 22 | -------------------------------------------------------------------------------- /examples/FRCNN/fpn-res50/train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | if [ ! -n "$1" ] ;then 4 | echo "use default GPU 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | PATH=`pwd`:$PATH 11 | export OMP_NUM_THREADS=4 12 | #export LD_LIBRARY_PATH=~/fyk/protobuf-3.1.0/lib/:$LD_LIBRARY_PATH 13 | CAFFE=build/tools/caffe 14 | use_snapshot=1 15 | if [ $use_snapshot -gt 0 ];then 16 | for s in `ls exp/snapshot/fpn-res50-mix*.solverstate`;do 17 | ss=$s 18 | done 19 | opt_w="--snapshot $ss" 20 | else 21 | #opt_w="--weights /home/s04/ry/imagenet_models/VGG16.v2.caffemodel" 22 | #opt_w="--weights /home/s03/fyk/ResNet-50-model-merge.caffemodel" 23 | opt_w="--weights /home/gpu/fyk/ResNet-50-model_merged.caffemodel" 24 | fi 25 | time GLOG_log_dir=exp/log \ 26 | $CAFFE train \ 27 | --gpu $gpu \ 28 | --solver exp/fpn-res50/solver.proto \ 29 | $opt_w 30 | 31 | -------------------------------------------------------------------------------- /examples/FRCNN/fpn/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 2 | # fyk: res50 *.proto files is copied from res101 3 | train_net: "exp/fpn/fpn-vgg16-train.proto" 4 | #base_lr: 0.001 5 | # 0.001 maybe too high for swish activation function,and will get NaN,the paper also said decrease a liitle is better. 6 | base_lr: 0.0005 7 | lr_policy: "multistep" 8 | gamma: 0.1 9 | stepvalue: 60000 10 | max_iter: 80000 11 | display: 100 12 | average_loss: 100 13 | momentum: 0.9 14 | weight_decay: 0.0001 15 | # function 16 | snapshot: 200 17 | # We still use the snapshot prefix, though 18 | snapshot_prefix: "exp/fpn/snapshot/fpn-vgg16-NWPU_frcnn" 19 | # 每次循环都会以batch_size大小计算梯度和loss,最后再取iter_size次的平均。可以看成iter_size*batch_size次更新一次参数。 20 | iter_size: 2 21 | -------------------------------------------------------------------------------- /examples/FRCNN/googlenet_v1/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/googlenet_v1/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 50000 6 | max_iter: 70000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 10000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "models/FRCNN/snapshot/googlenet_v1_faster_rcnn" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/images/000456.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/FRCNN/images/000456.jpg -------------------------------------------------------------------------------- /examples/FRCNN/images/000542.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/FRCNN/images/000542.jpg -------------------------------------------------------------------------------- /examples/FRCNN/images/001150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/FRCNN/images/001150.jpg -------------------------------------------------------------------------------- /examples/FRCNN/images/001763.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/FRCNN/images/001763.jpg -------------------------------------------------------------------------------- /examples/FRCNN/images/004545.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/FRCNN/images/004545.jpg -------------------------------------------------------------------------------- /examples/FRCNN/light-rfcn-res50/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "exp/light-rfcn-res50/train_val_merged-light-ohem.proto" 2 | base_lr: 0.001 3 | lr_policy: "multistep" 4 | gamma: 0.1 5 | stepvalue: 60000 6 | max_iter: 80000 7 | display: 100 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0001 11 | snapshot: 20000 12 | # We still use the snapshot prefix, though 13 | snapshot_prefix: "exp/snapshot/light-ohem-res50-mix" 14 | iter_size: 2 15 | -------------------------------------------------------------------------------- /examples/FRCNN/light-rfcn-res50/train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | if [ ! -n "$1" ] ;then 4 | echo "use default GPU 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | PATH=`pwd`:$PATH 11 | export OMP_NUM_THREADS=4 12 | #export LD_LIBRARY_PATH=~/fyk/protobuf-3.1.0/lib/:$LD_LIBRARY_PATH 13 | CAFFE=build/tools/caffe 14 | use_snapshot=0 15 | if [ $use_snapshot -gt 0 ];then 16 | for s in `ls exp/snapshot/rfcn-res50-mix*.solverstate`;do 17 | ss=$s 18 | done 19 | opt_w="--snapshot $ss" 20 | else 21 | #opt_w="--weights /home/s03/fyk/ResNet-50-model-merge.caffemodel" 22 | opt_w="--weights /home/gpu/fyk//ResNet-50-model_merged.caffemodel" 23 | fi 24 | time GLOG_log_dir=exp/log \ 25 | $CAFFE train \ 26 | --gpu $gpu \ 27 | --solver exp/light-rfcn-res50/solver.proto \ 28 | $opt_w 29 | 30 | -------------------------------------------------------------------------------- /examples/FRCNN/log/.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | *INFO* 3 | *ERROR* 4 | *WARNING* 5 | *FATAL* 6 | *log* 7 | -------------------------------------------------------------------------------- /examples/FRCNN/merge_resnet.sh: -------------------------------------------------------------------------------- 1 | python merge_resnet_deploy.py train ResNet-50-model.caffemodel --deploy ResNet-50-deploy.prototxt 2 | -------------------------------------------------------------------------------- /examples/FRCNN/res101/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "models/FRCNN/res101/train_val.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "models/FRCNN/snapshot/res101_faster_rcnn" 17 | iter_size: 2 18 | -------------------------------------------------------------------------------- /examples/FRCNN/res101/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/test_frcnn.bin 13 | 14 | $BUILD --gpu $gpu \ 15 | --model models/FRCNN/res101/test.proto \ 16 | --weights models/FRCNN/res101_faster_rcnn_final.caffemodel \ 17 | --default_c examples/FRCNN/config/voc_config.json \ 18 | --image_root VOCdevkit/VOC2007/JPEGImages/ \ 19 | --image_list examples/FRCNN/dataset/voc2007.test \ 20 | --out_file examples/FRCNN/results/voc2007_test_res101_${pid}.frcnn \ 21 | --max_per_image 100 22 | 23 | CAL_AP=examples/FRCNN/calculate_voc_ap.py 24 | 25 | python $CAL_AP --gt examples/FRCNN/dataset/voc2007.test \ 26 | --answer examples/FRCNN/results/voc2007_test_res101_${pid}.frcnn \ 27 | --overlap 0.5 28 | -------------------------------------------------------------------------------- /examples/FRCNN/res101/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver models/FRCNN/res101/solver.proto \ 16 | --weights models/FRCNN/ResNet-101-model.caffemodel 17 | # --weights models/FRCNN/Res101.v2.caffemodel 18 | echo 'remember to convert_model' 19 | exit 0 20 | time python examples/FRCNN/convert_model.py \ 21 | --model models/FRCNN/res101/test.proto \ 22 | --weights models/FRCNN/snapshot/res101_faster_rcnn_iter_180000.caffemodel \ 23 | --config examples/FRCNN/config/voc_config.json \ 24 | --net_out models/FRCNN/res101_faster_rcnn_final.caffemodel 25 | -------------------------------------------------------------------------------- /examples/FRCNN/res101_rpn/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "models/FRCNN/res101_rpn/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "models/FRCNN/snapshot/res101_rpn" 17 | iter_size: 2 18 | -------------------------------------------------------------------------------- /examples/FRCNN/res152/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "models/FRCNN/res152/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "models/FRCNN/snapshot/res152_faster_rcnn" 17 | iter_size: 2 18 | -------------------------------------------------------------------------------- /examples/FRCNN/res152_rpn/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "models/FRCNN/res152_rpn/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "models/FRCNN/snapshot/res152_rpn" 17 | iter_size: 2 18 | -------------------------------------------------------------------------------- /examples/FRCNN/res50/backup/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 2 | # fyk: res50 *.proto files is copied from res101 3 | train_net: "models/FRCNN/res50/train_val.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 50000 8 | max_iter: 70000 9 | display: 100 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "models/FRCNN/snapshot/res50-NWPU_faster_rcnn" 17 | iter_size: 2 18 | -------------------------------------------------------------------------------- /examples/FRCNN/res50/merge1.sh: -------------------------------------------------------------------------------- 1 | python exp/gen_merged_model.py train models/FRCNN/ResNet-50-model.caffemodel --train exp/fixed/train_val.proto --test exp/fixed/test.proto 2 | -------------------------------------------------------------------------------- /examples/FRCNN/res50/restore_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "restore from snapshot: res50_faster_rcnn_iter_$1" 5 | iter=$1 6 | else 7 | echo 'param: iter' 8 | exit 0 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver models/FRCNN/res50/solver.proto \ 16 | --snapshot models/FRCNN/res50/res50_faster_rcnn_iter_40000.solverstate \ 17 | # --weights models/FRCNN/ResNet-50-model.caffemodel 18 | echo 'remember to convert_model' 19 | exit 0 20 | time python examples/FRCNN/convert_model.py \ 21 | --model models/FRCNN/res50/test.proto \ 22 | --weights models/FRCNN/snapshot/res50_faster_rcnn_iter_180000.caffemodel \ 23 | --config examples/FRCNN/config/voc_config.json \ 24 | --net_out models/FRCNN/res50_faster_rcnn_final.caffemodel 25 | -------------------------------------------------------------------------------- /examples/FRCNN/res50/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/test_frcnn.bin 13 | TYPE="res50" #vgg16/res101 14 | time $BUILD --gpu $gpu \ 15 | --model models/FRCNN/"$TYPE"/test_inference.prototxt \ 16 | --weights models/FRCNN/"$TYPE"_faster_rcnn_final_inference.caffemodel \ 17 | --default_c examples/FRCNN/config/voc_config.json \ 18 | --image_root VOCdevkit/VOC2007/JPEGImages/ \ 19 | --image_list examples/FRCNN/dataset/voc2007.test \ 20 | --out_file examples/FRCNN/results/voc2007_test_"$TYPE"_${pid}.frcnn \ 21 | --max_per_image 100 22 | 23 | CAL_AP=examples/FRCNN/calculate_voc_ap.py 24 | 25 | python $CAL_AP --gt examples/FRCNN/dataset/voc2007.test \ 26 | --answer examples/FRCNN/results/voc2007_test_"$TYPE"_${pid}.frcnn \ 27 | --overlap 0.5 28 | -------------------------------------------------------------------------------- /examples/FRCNN/res50/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver models/FRCNN/res50/solver.proto \ 16 | --weights models/FRCNN/ResNet-50-model.caffemodel 17 | # --weights models/FRCNN/Res101.v2.caffemodel 18 | echo 'remember to convert_model' 19 | exit 0 20 | time python examples/FRCNN/convert_model.py \ 21 | --model models/FRCNN/res50/test.proto \ 22 | --weights models/FRCNN/snapshot/res50_faster_rcnn_iter_180000.caffemodel \ 23 | --config examples/FRCNN/config/voc_config.json \ 24 | --net_out models/FRCNN/res50_faster_rcnn_final.caffemodel 25 | -------------------------------------------------------------------------------- /examples/FRCNN/results/.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | *log* 3 | *frcnn* 4 | *rpn* 5 | *txt* 6 | *jpg* 7 | -------------------------------------------------------------------------------- /examples/FRCNN/rfcn-res50/solver.proto: -------------------------------------------------------------------------------- 1 | #train_net: "exp/rfcn-res50/train_val_merged-atrous.proto" 2 | train_net: "exp/rfcn-res50/train_val_merged-atrous-ohem.proto" 3 | base_lr: 0.001 4 | lr_policy: "multistep" 5 | gamma: 0.1 6 | stepvalue: 60000 7 | #stepvalue: 70000 8 | max_iter: 80000 9 | #max_iter: 80000 10 | display: 100 11 | average_loss: 100 12 | momentum: 0.9 13 | weight_decay: 0.0001 14 | # function 15 | snapshot: 20000 16 | # We still use the snapshot prefix, though 17 | snapshot_prefix: "exp/snapshot/rfcn-ohem-soft-nms-res50-mix" 18 | iter_size: 2 19 | -------------------------------------------------------------------------------- /examples/FRCNN/rfcn-res50/train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | set -e 4 | if [ ! -n "$1" ] ;then 5 | echo "use default GPU 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | PATH=`pwd`:$PATH 12 | export OMP_NUM_THREADS=4 13 | #export LD_LIBRARY_PATH=~/fyk/protobuf-3.1.0/lib/:$LD_LIBRARY_PATH 14 | CAFFE=build/tools/caffe 15 | use_snapshot=0 16 | if [ $use_snapshot -gt 0 ];then 17 | for s in `ls exp/snapshot/rfcn-res50-mix*.solverstate`;do 18 | ss=$s 19 | done 20 | opt_w="--snapshot $ss" 21 | else 22 | opt_w="--weights /home/gpu/fyk/ResNet-50-model_merged.caffemodel" 23 | fi 24 | time GLOG_log_dir=exp/log \ 25 | $CAFFE train \ 26 | --gpu $gpu \ 27 | --solver exp/rfcn-res50/solver.proto \ 28 | $opt_w 29 | -------------------------------------------------------------------------------- /examples/FRCNN/rfcn-resnext/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 2 | train_net: "exp/rfcn-resnext/train_resnext50-32x4d-rfcn++.proto" 3 | #train_net: "exp/rfcn-resnext/train_resnext101-32x4d-merge.proto" 4 | #train_net: "exp/fpn-det/train_fpn-det_roi-ctx.proto" 5 | #train_net: "exp/fpn-det/train_val_merged-8s.proto" 6 | #train_net: "exp/light-rfcn-res50/train_val_merged-light-ohem.proto" 7 | #train_net: "exp/rfcn-res50/train_val_merged-atrous-ohem.proto" 8 | #base_lr: 0.0001 9 | base_lr: 0.001 10 | lr_policy: "cosine" 11 | #lr_policy: "multistep" 12 | gamma: 0.1 13 | stepvalue: 60000 14 | #stepvalue: 100000 15 | #max_iter: 120000 16 | max_iter: 80000 17 | display: 100 18 | #display: 1 19 | average_loss: 100 20 | momentum: 0.9 21 | weight_decay: 0.0001 22 | # function 23 | snapshot: 20000 24 | # We still use the snapshot prefix, though 25 | snapshot_prefix: "exp/snapshot/rfcn++_resnext50-32x4d_mix" 26 | #snapshot_prefix: "exp/snapshot/rfcn_resnext101-32x4d_mix" 27 | #snapshot_prefix: "exp/snapshot/rfcn++_fpn-det_roi-ctx_mix" 28 | #snapshot_prefix: "exp/snapshot/rfcn++-fpn-det-8s-mix" 29 | iter_size: 2 30 | -------------------------------------------------------------------------------- /examples/FRCNN/test_rpn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | 12 | pid=$$ 13 | 14 | BUILD=build/examples/FRCNN/test_rpn.bin 15 | 16 | time $BUILD --gpu $gpu \ 17 | --model models/FRCNN/zf_rpn/test.prototxt \ 18 | --weights models/FRCNN/zf_rpn_final.caffemodel \ 19 | --default_c examples/FRCNN/config/voc_config.json \ 20 | --image_root VOCdevkit/VOC2007/JPEGImages/ \ 21 | --image_list examples/FRCNN/dataset/voc2007.test \ 22 | --out_file examples/FRCNN/results/voc2007_test_${pid}.rpn 23 | 24 | CAL_RECALL=examples/FRCNN/calculate_recall.py 25 | 26 | time python $CAL_RECALL --gt examples/FRCNN/dataset/voc2007.test \ 27 | --answer examples/FRCNN/results/voc2007_test_${pid}.rpn \ 28 | --overlap 0.5 29 | -------------------------------------------------------------------------------- /examples/FRCNN/train_rpn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver models/FRCNN/zf_rpn/solver.prototxt \ 16 | --weights models/FRCNN/ZF.v2.caffemodel 17 | 18 | mv ./models/FRCNN/zf_rpn/zf_rpn_iter_70000.caffemodel ./models/FRCNN/zf_rpn_final.caffemodel 19 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg16/restore.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "\$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | PATH=.:$PATH 12 | #CAFFE=build/tools/caffe 13 | CAFFE=caffe-付永康 14 | 15 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 16 | --gpu $gpu \ 17 | --solver models/FRCNN/vgg16/solver.proto \ 18 | --snapshot models/FRCNN/snapshot/VGG16_faster_rcnn_iter_40000.solverstate 19 | 20 | echo 'remember to convert_model.py after training' 21 | exit 0 22 | time python examples/FRCNN/convert_model.py \ 23 | --model models/FRCNN/vgg16/test.proto \ 24 | --weights models/FRCNN/snapshot/vgg16_faster_rcnn_iter_70000.caffemodel \ 25 | --config examples/FRCNN/config/voc_config.json \ 26 | --net_out models/FRCNN/vgg16_faster_rcnn_final.caffemodel 27 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg16/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/vgg16/train_val.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 50000 6 | max_iter: 70000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 10000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "models/FRCNN/snapshot/vgg16_faster_rcnn" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg16/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/test_frcnn.bin 13 | 14 | $BUILD --gpu $gpu \ 15 | --model models/FRCNN/vgg16/test.proto \ 16 | --weights models/FRCNN/vgg16_faster_rcnn_final.caffemodel \ 17 | --default_c examples/FRCNN/config/voc_config.json \ 18 | --image_root VOCdevkit/VOC2007/JPEGImages/ \ 19 | --image_list examples/FRCNN/dataset/voc2007.test \ 20 | --out_file examples/FRCNN/results/voc2007_test_vgg16_${pid}.frcnn \ 21 | --max_per_image 100 22 | 23 | CAL_AP=examples/FRCNN/calculate_voc_ap.py 24 | 25 | python $CAL_AP --gt examples/FRCNN/dataset/voc2007.test \ 26 | --answer examples/FRCNN/results/voc2007_test_vgg16_${pid}.frcnn \ 27 | --overlap 0.5 28 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg16/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "\$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | PATH=.:$PATH 12 | #CAFFE=build/tools/caffe 13 | CAFFE=caffe-付永康 14 | 15 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 16 | --gpu $gpu \ 17 | --solver models/FRCNN/vgg16/solver.proto \ 18 | --weights models/FRCNN/imagenet_models/VGG16.v2.caffemodel 19 | 20 | echo 'remember to convert_model.py after training' 21 | exit 0 22 | time python examples/FRCNN/convert_model.py \ 23 | --model models/FRCNN/vgg16/test.proto \ 24 | --weights models/FRCNN/snapshot/vgg16_faster_rcnn_iter_70000.caffemodel \ 25 | --config examples/FRCNN/config/voc_config.json \ 26 | --net_out models/FRCNN/vgg16_faster_rcnn_final.caffemodel 27 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg16_rpn/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/vgg16_rpn/train_val.prototxt" 2 | 3 | base_lr: 0.001 4 | lr_policy: "step" 5 | gamma: 0.1 6 | momentum: 0.9 7 | stepsize: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 1 11 | weight_decay: 0.0005 12 | 13 | snapshot: 10000 14 | snapshot_prefix: "./models/FRCNN/snapshot/vgg16_rpn" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/vgg19/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/vgg19/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 50000 6 | max_iter: 70000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 10000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "models/FRCNN/snapshot/vgg19_faster_rcnn" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/zf/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/zf/train_val.prototxt" 2 | 3 | base_lr: 0.001 4 | lr_policy: "step" 5 | gamma: 0.1 6 | momentum: 0.9 7 | stepsize: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 1 11 | weight_decay: 0.0005 12 | 13 | snapshot: 10000 14 | snapshot_prefix: "./models/FRCNN/snapshot/zf_frcnn_end_to_end" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/zf/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | 12 | pid=$$ 13 | 14 | BUILD=build/examples/FRCNN/test_frcnn.bin 15 | 16 | $BUILD --gpu $gpu \ 17 | --model models/FRCNN/zf/test.prototxt \ 18 | --weights models/FRCNN/zf_faster_rcnn_final.caffemodel \ 19 | --default_c examples/FRCNN/config/voc_config.json \ 20 | --image_root VOCdevkit/VOC2007/JPEGImages/ \ 21 | --image_list examples/FRCNN/dataset/voc2007.test \ 22 | --out_file examples/FRCNN/results/voc2007_test_zf_${pid}.frcnn \ 23 | --max_per_image 100 24 | 25 | CAL_AP=examples/FRCNN/calculate_voc_ap.py 26 | 27 | python $CAL_AP --gt examples/FRCNN/dataset/voc2007.test \ 28 | --answer examples/FRCNN/results/voc2007_test_zf_${pid}.frcnn \ 29 | --overlap 0.5 30 | -------------------------------------------------------------------------------- /examples/FRCNN/zf/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | #CAFFE=build/tools/caffe 12 | CAFFE=caffe-付永康 13 | 14 | time GLOG_log_dir=examples/FRCNN/log $CAFFE train \ 15 | --gpu $gpu \ 16 | --solver models/FRCNN/zf/solver.prototxt \ 17 | --weights models/FRCNN/imagenet_models/ZF.v2.caffemodel 18 | 19 | echo 'remember to convert model' 20 | exit 0 21 | time python examples/FRCNN/convert_model.py \ 22 | --model models/FRCNN/zf/test.prototxt \ 23 | --weights models/FRCNN/snapshot/zf_frcnn_end_to_end_iter_70000.caffemodel \ 24 | --config examples/FRCNN/config/voc_config.json \ 25 | --net_out models/FRCNN/zf_faster_rcnn_final.caffemodel 26 | 27 | -------------------------------------------------------------------------------- /examples/FRCNN/zf/vid_solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/zf/vid_train.proto" 2 | 3 | base_lr: 0.001 4 | lr_policy: "step" 5 | gamma: 0.1 6 | momentum: 0.9 7 | stepsize: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 1 11 | weight_decay: 0.0005 12 | 13 | snapshot: 10000 14 | snapshot_prefix: "./models/FRCNN/snapshot/vid_zf_frcnn_end_to_end" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/FRCNN/zf_rpn/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/FRCNN/zf_rpn/train_val.prototxt" 2 | 3 | base_lr: 0.001 4 | lr_policy: "step" 5 | gamma: 0.1 6 | momentum: 0.9 7 | stepsize: 50000 8 | max_iter: 70000 9 | display: 20 10 | average_loss: 1 11 | weight_decay: 0.0005 12 | 13 | snapshot: 10000 14 | snapshot_prefix: "./models/FRCNN/snapshot/zf_rpn" 15 | iter_size: 2 16 | -------------------------------------------------------------------------------- /examples/YOLO/demo_yolov3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | 12 | BUILD=build/examples/YOLO/demo_yolov3.bin 13 | 14 | GLOG_logtostderr=1 $BUILD --gpu $gpu \ 15 | --model models/YOLO/yolov3.proto \ 16 | --weights models/YOLO/yolov3.caffemodel \ 17 | --classes 80 \ 18 | --image_dir examples/YOLO/images/ \ 19 | --out_dir examples/YOLO/results/ 20 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_sigmoid_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 100 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 100 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid_bn" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 60000 22 | # snapshot intermediate results 23 | snapshot: 10000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.0001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 65000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_solver_lr2.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.00001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 70000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 4000 21 | # snapshot intermediate results 22 | snapshot: 4000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_quick_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.0001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 5000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /examples/cifar10/create_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the cifar data into leveldb format. 3 | set -e 4 | 5 | EXAMPLE=examples/cifar10 6 | DATA=data/cifar10 7 | DBTYPE=lmdb 8 | 9 | echo "Creating $DBTYPE..." 10 | 11 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE 12 | 13 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE 14 | 15 | echo "Computing image mean..." 16 | 17 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 18 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto 19 | 20 | echo "Done." 21 | 22 | DBTYPE=leveldb 23 | 24 | echo "Creating $DBTYPE..." 25 | 26 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE 27 | 28 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE 29 | 30 | echo "Computing image mean..." 31 | 32 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 33 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto 34 | 35 | echo "Done." 36 | -------------------------------------------------------------------------------- /examples/cifar10/train_full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train \ 7 | --solver=examples/cifar10/cifar10_full_solver.prototxt $@ 8 | 9 | # reduce learning rate by factor of 10 10 | $TOOLS/caffe train \ 11 | --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \ 12 | --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate.h5 $@ 13 | 14 | # reduce learning rate by factor of 10 15 | $TOOLS/caffe train \ 16 | --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \ 17 | --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate.h5 $@ 18 | -------------------------------------------------------------------------------- /examples/cifar10/train_full_sigmoid.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train \ 7 | --solver=examples/cifar10/cifar10_full_sigmoid_solver.prototxt $@ 8 | 9 | -------------------------------------------------------------------------------- /examples/cifar10/train_full_sigmoid_bn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train \ 7 | --solver=examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt $@ 8 | 9 | -------------------------------------------------------------------------------- /examples/cifar10/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train \ 7 | --solver=examples/cifar10/cifar10_quick_solver.prototxt $@ 8 | 9 | # reduce learning rate by factor of 10 after 8 epochs 10 | $TOOLS/caffe train \ 11 | --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 12 | --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 $@ 13 | -------------------------------------------------------------------------------- /examples/cifar100/create_cifar100.sh: -------------------------------------------------------------------------------- 1 | # In Identity Mappings in Deep Residual Networks 2 | # dataset network baseline pre-activation 3 | # CIFAR-10 ResNet-110 6.61 6.37 4 | # ResNet-164 5.93 5.46 5 | # ResNet-1001 7.61 4.92 6 | # CIFAR-100 ResNet-164 25.16 24.33 7 | # ResNet-1001 27.82 22.71 8 | set -e 9 | 10 | EXAMPLE=examples/cifar100 11 | DATA=data/cifar100 12 | DBTYPE=lmdb 13 | LOG=examples/cifar100/log 14 | SNAPSHOT=examples/cifar100/snapshot 15 | 16 | echo "Creating $DBTYPE..." 17 | 18 | rm -rf $EXAMPLE/cifar100_train_$DBTYPE $EXAMPLE/cifar100_test_$DBTYPE 19 | 20 | ./build/examples/cifar100/convert_cifar100_data.bin $DATA $EXAMPLE $DBTYPE 21 | 22 | echo "Computing image mean..." 23 | 24 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 25 | $EXAMPLE/cifar100_train_$DBTYPE $EXAMPLE/mean.binaryproto 26 | 27 | if [ -d "$LOG" ]; then 28 | rm -rf $LOG 29 | echo "Remove Dir : $LOG" 30 | fi 31 | mkdir $LOG 32 | 33 | if [ -d "$SNAPSHOT" ]; then 34 | rm -rf $SNAPSHOT 35 | echo "Remove Dir : $SNAPSHOT" 36 | fi 37 | mkdir $SNAPSHOT 38 | python ./examples/cifar100/PadCifar100.py 39 | 40 | echo "Done." 41 | -------------------------------------------------------------------------------- /examples/finetune_flickr_style/flickr_style.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/finetune_flickr_style/flickr_style.csv.gz -------------------------------------------------------------------------------- /examples/finetune_flickr_style/style_names.txt: -------------------------------------------------------------------------------- 1 | Detailed 2 | Pastel 3 | Melancholy 4 | Noir 5 | HDR 6 | Vintage 7 | Long Exposure 8 | Horror 9 | Sunny 10 | Bright 11 | Hazy 12 | Bokeh 13 | Serene 14 | Texture 15 | Ethereal 16 | Macro 17 | Depth of Field 18 | Geometric Composition 19 | Minimal 20 | Romantic 21 | -------------------------------------------------------------------------------- /examples/finetune_pascal_detection/pascal_finetune_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt" 2 | test_iter: 100 3 | test_interval: 1000 4 | base_lr: 0.001 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 20000 8 | display: 20 9 | max_iter: 100000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune" 14 | -------------------------------------------------------------------------------- /examples/hdf5_classification/nonlinear_auto_test.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/test.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /examples/hdf5_classification/nonlinear_auto_train.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/train.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /examples/hdf5_classification/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layer { 3 | name: "data" 4 | type: "HDF5Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | hdf5_data_param { 11 | source: "examples/hdf5_classification/data/train.txt" 12 | batch_size: 10 13 | } 14 | } 15 | layer { 16 | name: "data" 17 | type: "HDF5Data" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | hdf5_data_param { 24 | source: "examples/hdf5_classification/data/test.txt" 25 | batch_size: 10 26 | } 27 | } 28 | layer { 29 | name: "fc1" 30 | type: "InnerProduct" 31 | bottom: "data" 32 | top: "fc1" 33 | param { 34 | lr_mult: 1 35 | decay_mult: 1 36 | } 37 | param { 38 | lr_mult: 2 39 | decay_mult: 0 40 | } 41 | inner_product_param { 42 | num_output: 2 43 | weight_filler { 44 | type: "xavier" 45 | } 46 | bias_filler { 47 | type: "constant" 48 | value: 0 49 | } 50 | } 51 | } 52 | layer { 53 | name: "loss" 54 | type: "SoftmaxWithLoss" 55 | bottom: "fc1" 56 | bottom: "label" 57 | top: "loss" 58 | } 59 | layer { 60 | name: "accuracy" 61 | type: "Accuracy" 62 | bottom: "fc1" 63 | bottom: "label" 64 | top: "accuracy" 65 | include { 66 | phase: TEST 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /examples/imagenet/make_imagenet_mean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Compute the mean image from the imagenet training lmdb 3 | # N.B. this is available in data/ilsvrc12 4 | 5 | EXAMPLE=examples/imagenet 6 | DATA=data/ilsvrc12 7 | TOOLS=build/tools 8 | 9 | $TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ 10 | $DATA/imagenet_mean.binaryproto 11 | 12 | echo "Done." 13 | -------------------------------------------------------------------------------- /examples/imagenet/resume_training.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=models/bvlc_reference_caffenet/solver.prototxt \ 6 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 \ 7 | $@ 8 | -------------------------------------------------------------------------------- /examples/imagenet/train_caffenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=models/bvlc_reference_caffenet/solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/images/cat gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/images/cat gray.jpg -------------------------------------------------------------------------------- /examples/images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/images/cat.jpg -------------------------------------------------------------------------------- /examples/images/cat_gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/images/cat_gray.jpg -------------------------------------------------------------------------------- /examples/images/fish-bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/examples/images/fish-bike.jpg -------------------------------------------------------------------------------- /examples/mnist/create_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into lmdb/leveldb format, 3 | # depending on the value assigned to $BACKEND. 4 | set -e 5 | 6 | EXAMPLE=examples/mnist 7 | DATA=data/mnist 8 | BUILD=build/examples/mnist 9 | 10 | BACKEND="lmdb" 11 | 12 | echo "Creating ${BACKEND}..." 13 | 14 | rm -rf $EXAMPLE/mnist_train_${BACKEND} 15 | rm -rf $EXAMPLE/mnist_test_${BACKEND} 16 | 17 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ 18 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND} 19 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ 20 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND} 21 | 22 | echo "Done." 23 | -------------------------------------------------------------------------------- /examples/mnist/lenet_adadelta_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 1.0 11 | lr_policy: "fixed" 12 | momentum: 0.95 13 | weight_decay: 0.0005 14 | # Display every 100 iterations 15 | display: 100 16 | # The maximum number of iterations 17 | max_iter: 10000 18 | # snapshot intermediate results 19 | snapshot: 5000 20 | snapshot_prefix: "examples/mnist/lenet_adadelta" 21 | # solver mode: CPU or GPU 22 | solver_mode: GPU 23 | type: "AdaDelta" 24 | delta: 1e-6 25 | -------------------------------------------------------------------------------- /examples/mnist/lenet_auto_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | train_net: "mnist/lenet_auto_train.prototxt" 3 | test_net: "mnist/lenet_auto_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # The base learning rate, momentum and the weight decay of the network. 11 | base_lr: 0.01 12 | momentum: 0.9 13 | weight_decay: 0.0005 14 | # The learning rate policy 15 | lr_policy: "inv" 16 | gamma: 0.0001 17 | power: 0.75 18 | # Display every 100 iterations 19 | display: 100 20 | # The maximum number of iterations 21 | max_iter: 10000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "mnist/lenet" 25 | -------------------------------------------------------------------------------- /examples/mnist/lenet_multistep_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "multistep" 15 | gamma: 0.9 16 | stepvalue: 5000 17 | stepvalue: 7000 18 | stepvalue: 8000 19 | stepvalue: 9000 20 | stepvalue: 9500 21 | # Display every 100 iterations 22 | display: 100 23 | # The maximum number of iterations 24 | max_iter: 10000 25 | # snapshot intermediate results 26 | snapshot: 5000 27 | snapshot_prefix: "examples/mnist/lenet_multistep" 28 | # solver mode: CPU or GPU 29 | solver_mode: GPU 30 | -------------------------------------------------------------------------------- /examples/mnist/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /examples/mnist/lenet_solver_adam.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | # this follows "ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION" 3 | net: "examples/mnist/lenet_train_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # All parameters are from the cited paper above 11 | base_lr: 0.001 12 | momentum: 0.9 13 | momentum2: 0.999 14 | # since Adam dynamically changes the learning rate, we set the base learning 15 | # rate to a fixed value 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | type: "Adam" 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /examples/mnist/lenet_solver_rmsprop.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.0 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet_rmsprop" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | type: "RMSProp" 27 | rms_decay: 0.98 28 | -------------------------------------------------------------------------------- /examples/mnist/mnist_autoencoder_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder" 17 | momentum: 0.9 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | -------------------------------------------------------------------------------- /examples/mnist/mnist_autoencoder_solver_adadelta.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 1.0 9 | lr_policy: "fixed" 10 | momentum: 0.95 11 | delta: 1e-8 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adadelta_train" 17 | # solver mode: CPU or GPU 18 | solver_mode: GPU 19 | type: "AdaDelta" 20 | -------------------------------------------------------------------------------- /examples/mnist/mnist_autoencoder_solver_adagrad.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "fixed" 10 | display: 100 11 | max_iter: 65000 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train" 15 | # solver mode: CPU or GPU 16 | solver_mode: GPU 17 | type: "AdaGrad" 18 | -------------------------------------------------------------------------------- /examples/mnist/mnist_autoencoder_solver_nesterov.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train" 17 | momentum: 0.95 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | type: "Nesterov" 21 | -------------------------------------------------------------------------------- /examples/mnist/train_lenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@ 5 | -------------------------------------------------------------------------------- /examples/mnist/train_lenet_adam.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@ 5 | -------------------------------------------------------------------------------- /examples/mnist/train_lenet_consolidated.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/lenet_consolidated_solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/mnist/train_lenet_rmsprop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/lenet_solver_rmsprop.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/mnist/train_mnist_autoencoder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/mnist/train_mnist_autoencoder_adadelta.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/mnist/train_mnist_autoencoder_adagrad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/mnist/train_mnist_autoencoder_nesterov.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt $@ 6 | -------------------------------------------------------------------------------- /examples/net_surgery/conv.prototxt: -------------------------------------------------------------------------------- 1 | # Simple single-layer network to showcase editing model parameters. 2 | name: "convolution" 3 | layer { 4 | name: "data" 5 | type: "Input" 6 | top: "data" 7 | input_param { shape: { dim: 1 dim: 1 dim: 100 dim: 100 } } 8 | } 9 | layer { 10 | name: "conv" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv" 14 | convolution_param { 15 | num_output: 3 16 | kernel_size: 5 17 | stride: 1 18 | weight_filler { 19 | type: "gaussian" 20 | std: 0.01 21 | } 22 | bias_filler { 23 | type: "constant" 24 | value: 0 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /examples/siamese/create_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into leveldb format. 3 | set -e 4 | 5 | EXAMPLES=./build/examples/siamese 6 | DATA=./data/mnist 7 | 8 | echo "Creating leveldb..." 9 | 10 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb 11 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb 12 | 13 | $EXAMPLES/convert_mnist_siamese_data.bin \ 14 | $DATA/train-images-idx3-ubyte \ 15 | $DATA/train-labels-idx1-ubyte \ 16 | ./examples/siamese/mnist_siamese_train_leveldb 17 | $EXAMPLES/convert_mnist_siamese_data.bin \ 18 | $DATA/t10k-images-idx3-ubyte \ 19 | $DATA/t10k-labels-idx1-ubyte \ 20 | ./examples/siamese/mnist_siamese_test_leveldb 21 | 22 | echo "Done." 23 | -------------------------------------------------------------------------------- /examples/siamese/mnist_siamese_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/siamese/mnist_siamese_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0000 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 50000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/siamese/mnist_siamese" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /examples/siamese/train_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt $@ 7 | -------------------------------------------------------------------------------- /examples/web_demo/exifutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script handles the skimage exif problem. 3 | """ 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | ORIENTATIONS = { # used in apply_orientation 9 | 2: (Image.FLIP_LEFT_RIGHT,), 10 | 3: (Image.ROTATE_180,), 11 | 4: (Image.FLIP_TOP_BOTTOM,), 12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 13 | 6: (Image.ROTATE_270,), 14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 15 | 8: (Image.ROTATE_90,) 16 | } 17 | 18 | 19 | def open_oriented_im(im_path): 20 | im = Image.open(im_path) 21 | if hasattr(im, '_getexif'): 22 | exif = im._getexif() 23 | if exif is not None and 274 in exif: 24 | orientation = exif[274] 25 | im = apply_orientation(im, orientation) 26 | img = np.asarray(im).astype(np.float32) / 255. 27 | if img.ndim == 2: 28 | img = img[:, :, np.newaxis] 29 | img = np.tile(img, (1, 1, 3)) 30 | elif img.shape[2] == 4: 31 | img = img[:, :, :3] 32 | return img 33 | 34 | 35 | def apply_orientation(im, orientation): 36 | if orientation in ORIENTATIONS: 37 | for method in ORIENTATIONS[orientation]: 38 | im = im.transpose(method) 39 | return im 40 | -------------------------------------------------------------------------------- /examples/web_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | werkzeug 2 | flask 3 | tornado 4 | numpy 5 | pandas 6 | pillow 7 | pyyaml 8 | -------------------------------------------------------------------------------- /include/api/FRCNN/rpn_api.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "caffe/blob.hpp" 10 | #include "caffe/common.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/FRCNN/util/frcnn_param.hpp" 14 | #include "caffe/FRCNN/util/frcnn_helper.hpp" 15 | 16 | namespace FRCNN_API{ 17 | 18 | using std::vector; 19 | using caffe::Blob; 20 | using caffe::Net; 21 | using caffe::Frcnn::FrcnnParam; 22 | using caffe::Frcnn::Point4f; 23 | using caffe::Frcnn::BBox; 24 | 25 | class Rpn_Det { 26 | public: 27 | Rpn_Det(std::string &proto_file, std::string &model_file){ 28 | Set_Model(proto_file, model_file); 29 | } 30 | void Set_Model(std::string &proto_file, std::string &model_file); 31 | void predict(const cv::Mat &img_in, vector > &results); 32 | private: 33 | void preprocess(const cv::Mat &img_in, const int blob_idx); 34 | void preprocess(const vector &data, const int blob_idx); 35 | vector > > predict(const vector blob_names); 36 | boost::shared_ptr > net_; 37 | float mean_[3]; 38 | }; 39 | 40 | } 41 | -------------------------------------------------------------------------------- /include/api/api.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "caffe/blob.hpp" 10 | #include "caffe/common.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/FRCNN/util/frcnn_param.hpp" 14 | #include "caffe/FRCNN/util/frcnn_helper.hpp" 15 | #include "api/FRCNN/frcnn_api.hpp" 16 | #include "api/FRCNN/rpn_api.hpp" 17 | 18 | namespace API{ 19 | 20 | using std::vector; 21 | using caffe::Blob; 22 | using caffe::Net; 23 | using caffe::Frcnn::FrcnnParam; 24 | using caffe::Frcnn::Point4f; 25 | using caffe::Frcnn::BBox; 26 | using caffe::Frcnn::DataPrepare; 27 | using FRCNN_API::Detector; 28 | using FRCNN_API::Rpn_Det; 29 | 30 | inline void Set_Config(std::string default_config) { 31 | caffe::Frcnn::FrcnnParam::load_param(default_config); 32 | caffe::Frcnn::FrcnnParam::print_param(); 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /include/api/util/blowfish.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015 By ihciah 3 | https://github.com/ihciah/CNN_forward 4 | modify by makefile@github 5 | */ 6 | #pragma once 7 | 8 | #ifndef __blowfish__ 9 | #define __blowfish__ 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | class Blowfish { 17 | public: 18 | Blowfish(const std::vector &key); 19 | std::vector Encrypt(const std::vector &src) const; 20 | std::vector Decrypt(const std::vector &src) const; 21 | void Encrypt(const char* in_filename, const char* out_filename); 22 | void Decrypt(const char* in_filename, const char* out_filename); 23 | std::vector ReadAllBytes(const char* filename); 24 | void WriteAllBytes(const char* filename, const std::vector &data); 25 | std::string getRandomTmpFile(); 26 | 27 | private: 28 | void SetKey(const char *key, size_t byte_length); 29 | void EncryptBlock(uint32_t *left, uint32_t *right) const; 30 | void DecryptBlock(uint32_t *left, uint32_t *right) const; 31 | uint32_t Feistel(uint32_t value) const; 32 | 33 | private: 34 | uint32_t pary_[18]; 35 | uint32_t sbox_[4][256]; 36 | }; 37 | 38 | #endif /* defined(__blowfish__) */ 39 | -------------------------------------------------------------------------------- /include/caffe/FRCNN/util/frcnn_helper.hpp: -------------------------------------------------------------------------------- 1 | // ------------------------------------------------------------------ 2 | // Xuanyi . Refer to Dong Jian 3 | // 2016/04/01 4 | // ------------------------------------------------------------------ 5 | #ifndef CAFFE_FRCNN_HELPER_HPP_ 6 | #define CAFFE_FRCNN_HELPER_HPP_ 7 | 8 | #include "caffe/FRCNN/util/frcnn_utils.hpp" 9 | 10 | namespace caffe { 11 | 12 | namespace Frcnn { 13 | 14 | template 15 | Point4f bbox_transform(const Point4f& ex_rois,const Point4f& gt_rois); 16 | 17 | template 18 | std::vector > bbox_transform(const std::vector >& ex_rois, 19 | const std::vector >& gt_rois); 20 | 21 | template 22 | Point4f bbox_transform_inv(const Point4f& box, const Point4f& delta); 23 | 24 | template 25 | std::vector > bbox_transform_inv(const Point4f& box, 26 | const std::vector >& deltas); 27 | 28 | } // namespace frcnn 29 | 30 | } // namespace caffe 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/solver_factory.hpp" 17 | #include "caffe/util/benchmark.hpp" 18 | #include "caffe/util/io.hpp" 19 | #include "caffe/util/upgrade_proto.hpp" 20 | 21 | #endif // CAFFE_CAFFE_HPP_ 22 | -------------------------------------------------------------------------------- /include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob, bool reshape); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob, bool reshape = false); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /include/caffe/util/nccl.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_NCCL_H_ 2 | #define CAFFE_UTIL_NCCL_H_ 3 | #ifdef USE_NCCL 4 | 5 | #include 6 | 7 | #include "caffe/common.hpp" 8 | 9 | #define NCCL_CHECK(condition) \ 10 | { \ 11 | ncclResult_t result = condition; \ 12 | CHECK_EQ(result, ncclSuccess) << " " \ 13 | << ncclGetErrorString(result); \ 14 | } 15 | 16 | namespace caffe { 17 | 18 | namespace nccl { 19 | 20 | template class dataType; 21 | 22 | template<> class dataType { 23 | public: 24 | static const ncclDataType_t type = ncclFloat; 25 | }; 26 | template<> class dataType { 27 | public: 28 | static const ncclDataType_t type = ncclDouble; 29 | }; 30 | 31 | } // namespace nccl 32 | 33 | } // namespace caffe 34 | 35 | #endif // end USE_NCCL 36 | 37 | #endif // CAFFE_UTIL_NCCL_H_ 38 | -------------------------------------------------------------------------------- /include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /include/caffe/util/search_path.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_SEARCH_PATH_H_ 2 | #define CAFFE_UTIL_SEARCH_PATH_H_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | /** 10 | * @brief Parses a list of colon separated paths as a vector of paths. 11 | */ 12 | std::vector ParseSearchPath( 13 | std::string const & search_path 14 | ); 15 | 16 | } // namespace caffe 17 | 18 | #endif // CAFFE_UTIL_SEARCH_PATH_H_ 19 | -------------------------------------------------------------------------------- /include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /include/logger/vis_logger.hpp: -------------------------------------------------------------------------------- 1 | #ifndef VIS_LOGGER_H_ 2 | #define VIS_LOGGER_H_ 3 | 4 | #include 5 | 6 | //#ifdef USE_VISUALDL 7 | //#include "visualdl/logic/sdk.h" 8 | //#endif 9 | 10 | /** 11 | * external logger tools, such as VisualDL, Tensorboard, .etc 12 | * only support logging of scalar using VisualDL currently. 13 | */ 14 | class VisLogger { 15 | public: 16 | VisLogger(const std::string dir="./log", int sync_cycle=30, const std::string mode="train"); 17 | virtual ~VisLogger(); 18 | void log_scalar(const std::string scalar_name, int step, float value); 19 | private: 20 | void *logger; 21 | std::map scalar_map; 22 | void* get_scalar_by_name(const std::string scalar_name); 23 | }; 24 | 25 | #ifdef __cplusplus 26 | // C style, global VisLogger object 27 | extern "C" { 28 | #endif 29 | void log_scalar(const std::string scalar_name, int step, float value); 30 | #ifdef __cplusplus 31 | } 32 | #endif 33 | 34 | #endif // VIS_LOGGER_H_ 35 | -------------------------------------------------------------------------------- /include/pybind11/common.h: -------------------------------------------------------------------------------- 1 | #include "detail/common.h" 2 | #warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'." 3 | -------------------------------------------------------------------------------- /include/yaml-cpp/anchor.h: -------------------------------------------------------------------------------- 1 | #ifndef ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | 12 | namespace YAML { 13 | typedef std::size_t anchor_t; 14 | const anchor_t NullAnchor = 0; 15 | } 16 | 17 | #endif // ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 18 | -------------------------------------------------------------------------------- /include/yaml-cpp/contrib/anchordict.h: -------------------------------------------------------------------------------- 1 | #ifndef ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | 12 | #include "../anchor.h" 13 | 14 | namespace YAML { 15 | /// AnchorDict 16 | /// . An object that stores and retrieves values correlating to anchor_t 17 | /// values. 18 | /// . Efficient implementation that can make assumptions about how anchor_t 19 | /// values are assigned by the Parser class. 20 | template 21 | class AnchorDict { 22 | public: 23 | void Register(anchor_t anchor, T value) { 24 | if (anchor > m_data.size()) { 25 | m_data.resize(anchor); 26 | } 27 | m_data[anchor - 1] = value; 28 | } 29 | 30 | T Get(anchor_t anchor) const { return m_data[anchor - 1]; } 31 | 32 | private: 33 | std::vector m_data; 34 | }; 35 | } 36 | 37 | #endif // ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 38 | -------------------------------------------------------------------------------- /include/yaml-cpp/emitterdef.h: -------------------------------------------------------------------------------- 1 | #ifndef EMITTERDEF_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define EMITTERDEF_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | namespace YAML { 11 | struct EmitterNodeType { 12 | enum value { NoType, Property, Scalar, FlowSeq, BlockSeq, FlowMap, BlockMap }; 13 | }; 14 | } 15 | 16 | #endif // EMITTERDEF_H_62B23520_7C8E_11DE_8A39_0800200C9A66 17 | -------------------------------------------------------------------------------- /include/yaml-cpp/emitterstyle.h: -------------------------------------------------------------------------------- 1 | #ifndef EMITTERSTYLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define EMITTERSTYLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | namespace YAML { 11 | struct EmitterStyle { 12 | enum value { Default, Block, Flow }; 13 | }; 14 | } 15 | 16 | #endif // EMITTERSTYLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 17 | -------------------------------------------------------------------------------- /include/yaml-cpp/mark.h: -------------------------------------------------------------------------------- 1 | #ifndef MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | 12 | namespace YAML { 13 | struct YAML_CPP_API Mark { 14 | Mark() : pos(0), line(0), column(0) {} 15 | 16 | static const Mark null_mark() { return Mark(-1, -1, -1); } 17 | 18 | bool is_null() const { return pos == -1 && line == -1 && column == -1; } 19 | 20 | int pos; 21 | int line, column; 22 | 23 | private: 24 | Mark(int pos_, int line_, int column_) 25 | : pos(pos_), line(line_), column(column_) {} 26 | }; 27 | } 28 | 29 | #endif // MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 30 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/detail/bool_type.h: -------------------------------------------------------------------------------- 1 | #ifndef NODE_DETAIL_BOOL_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define NODE_DETAIL_BOOL_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | namespace YAML { 11 | namespace detail { 12 | struct unspecified_bool { 13 | struct NOT_ALLOWED; 14 | static void true_value(NOT_ALLOWED*) {} 15 | }; 16 | typedef void (*unspecified_bool_type)(unspecified_bool::NOT_ALLOWED*); 17 | } 18 | } 19 | 20 | #define YAML_CPP_OPERATOR_BOOL() \ 21 | operator YAML::detail::unspecified_bool_type() const { \ 22 | return this->operator!() ? 0 \ 23 | : &YAML::detail::unspecified_bool::true_value; \ 24 | } 25 | 26 | #endif // NODE_DETAIL_BOOL_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 27 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/detail/iterator_fwd.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_DETAIL_ITERATOR_FWD_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_DETAIL_ITERATOR_FWD_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | #include 12 | #include 13 | #include 14 | 15 | namespace YAML { 16 | class node; 17 | 18 | namespace detail { 19 | struct iterator_value; 20 | template 21 | class iterator_base; 22 | } 23 | 24 | typedef detail::iterator_base iterator; 25 | typedef detail::iterator_base const_iterator; 26 | } 27 | 28 | #endif // VALUE_DETAIL_ITERATOR_FWD_H_62B23520_7C8E_11DE_8A39_0800200C9A66 29 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/detail/memory.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_DETAIL_MEMORY_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_DETAIL_MEMORY_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | 12 | #include "yaml-cpp/dll.h" 13 | #include "yaml-cpp/node/ptr.h" 14 | 15 | namespace YAML { 16 | namespace detail { 17 | class node; 18 | } // namespace detail 19 | } // namespace YAML 20 | 21 | namespace YAML { 22 | namespace detail { 23 | class YAML_CPP_API memory { 24 | public: 25 | node& create_node(); 26 | void merge(const memory& rhs); 27 | 28 | private: 29 | typedef std::set Nodes; 30 | Nodes m_nodes; 31 | }; 32 | 33 | class YAML_CPP_API memory_holder { 34 | public: 35 | memory_holder() : m_pMemory(new memory) {} 36 | 37 | node& create_node() { return m_pMemory->create_node(); } 38 | void merge(memory_holder& rhs); 39 | 40 | private: 41 | shared_memory m_pMemory; 42 | }; 43 | } 44 | } 45 | 46 | #endif // VALUE_DETAIL_MEMORY_H_62B23520_7C8E_11DE_8A39_0800200C9A66 47 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/emit.h: -------------------------------------------------------------------------------- 1 | #ifndef NODE_EMIT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define NODE_EMIT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | 13 | #include "yaml-cpp/dll.h" 14 | 15 | namespace YAML { 16 | class Emitter; 17 | class Node; 18 | 19 | YAML_CPP_API Emitter& operator<<(Emitter& out, const Node& node); 20 | YAML_CPP_API std::ostream& operator<<(std::ostream& out, const Node& node); 21 | 22 | YAML_CPP_API std::string Dump(const Node& node); 23 | } 24 | 25 | #endif // NODE_EMIT_H_62B23520_7C8E_11DE_8A39_0800200C9A66 26 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/iterator.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | #include "yaml-cpp/node/node.h" 12 | #include "yaml-cpp/node/detail/iterator_fwd.h" 13 | #include "yaml-cpp/node/detail/iterator.h" 14 | #include 15 | #include 16 | #include 17 | 18 | namespace YAML { 19 | namespace detail { 20 | struct iterator_value : public Node, std::pair { 21 | iterator_value() {} 22 | explicit iterator_value(const Node& rhs) 23 | : Node(rhs), 24 | std::pair(Node(Node::ZombieNode), Node(Node::ZombieNode)) {} 25 | explicit iterator_value(const Node& key, const Node& value) 26 | : Node(Node::ZombieNode), std::pair(key, value) {} 27 | }; 28 | } 29 | } 30 | 31 | #endif // VALUE_ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 32 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/parse.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_PARSE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_PARSE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include "yaml-cpp/dll.h" 15 | 16 | namespace YAML { 17 | class Node; 18 | 19 | YAML_CPP_API Node Load(const std::string& input); 20 | YAML_CPP_API Node Load(const char* input); 21 | YAML_CPP_API Node Load(std::istream& input); 22 | YAML_CPP_API Node LoadFile(const std::string& filename); 23 | 24 | YAML_CPP_API std::vector LoadAll(const std::string& input); 25 | YAML_CPP_API std::vector LoadAll(const char* input); 26 | YAML_CPP_API std::vector LoadAll(std::istream& input); 27 | YAML_CPP_API std::vector LoadAllFromFile(const std::string& filename); 28 | } 29 | 30 | #endif // VALUE_PARSE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 31 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/ptr.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_PTR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_PTR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | #include 12 | 13 | namespace YAML { 14 | namespace detail { 15 | class node; 16 | class node_ref; 17 | class node_data; 18 | class memory; 19 | class memory_holder; 20 | 21 | typedef boost::shared_ptr shared_node; 22 | typedef boost::shared_ptr shared_node_ref; 23 | typedef boost::shared_ptr shared_node_data; 24 | typedef boost::shared_ptr shared_memory_holder; 25 | typedef boost::shared_ptr shared_memory; 26 | } 27 | } 28 | 29 | #endif // VALUE_PTR_H_62B23520_7C8E_11DE_8A39_0800200C9A66 30 | -------------------------------------------------------------------------------- /include/yaml-cpp/node/type.h: -------------------------------------------------------------------------------- 1 | #ifndef VALUE_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define VALUE_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | namespace YAML { 11 | struct NodeType { 12 | enum value { Undefined, Null, Scalar, Sequence, Map }; 13 | }; 14 | } 15 | 16 | #endif // VALUE_TYPE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 17 | -------------------------------------------------------------------------------- /include/yaml-cpp/noncopyable.h: -------------------------------------------------------------------------------- 1 | #ifndef NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | 12 | namespace YAML { 13 | // this is basically boost::noncopyable 14 | class YAML_CPP_API noncopyable { 15 | protected: 16 | noncopyable() {} 17 | ~noncopyable() {} 18 | 19 | private: 20 | noncopyable(const noncopyable&); 21 | const noncopyable& operator=(const noncopyable&); 22 | }; 23 | } 24 | 25 | #endif // NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66 26 | -------------------------------------------------------------------------------- /include/yaml-cpp/null.h: -------------------------------------------------------------------------------- 1 | #ifndef NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/dll.h" 11 | 12 | namespace YAML { 13 | class Node; 14 | 15 | struct YAML_CPP_API _Null {}; 16 | inline bool operator==(const _Null&, const _Null&) { return true; } 17 | inline bool operator!=(const _Null&, const _Null&) { return false; } 18 | 19 | YAML_CPP_API bool IsNull(const Node& node); // old API only 20 | 21 | extern YAML_CPP_API _Null Null; 22 | } 23 | 24 | #endif // NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66 25 | -------------------------------------------------------------------------------- /include/yaml-cpp/parser.h: -------------------------------------------------------------------------------- 1 | #ifndef PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | 13 | #include "yaml-cpp/dll.h" 14 | #include "yaml-cpp/noncopyable.h" 15 | 16 | namespace YAML { 17 | class EventHandler; 18 | class Node; 19 | class Scanner; 20 | struct Directives; 21 | struct Token; 22 | 23 | class YAML_CPP_API Parser : private noncopyable { 24 | public: 25 | Parser(); 26 | Parser(std::istream& in); 27 | ~Parser(); 28 | 29 | operator bool() const; 30 | 31 | void Load(std::istream& in); 32 | bool HandleNextDocument(EventHandler& eventHandler); 33 | 34 | void PrintTokens(std::ostream& out); 35 | 36 | private: 37 | void ParseDirectives(); 38 | void HandleDirective(const Token& token); 39 | void HandleYamlDirective(const Token& token); 40 | void HandleTagDirective(const Token& token); 41 | 42 | private: 43 | std::auto_ptr m_pScanner; 44 | std::auto_ptr m_pDirectives; 45 | }; 46 | } 47 | 48 | #endif // PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66 49 | -------------------------------------------------------------------------------- /include/yaml-cpp/yaml-cpp-0.5.3/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2008-2015 Jesse Beder. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /include/yaml-cpp/yaml.h: -------------------------------------------------------------------------------- 1 | #ifndef YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include "yaml-cpp/parser.h" 11 | #include "yaml-cpp/emitter.h" 12 | #include "yaml-cpp/emitterstyle.h" 13 | #include "yaml-cpp/stlemitter.h" 14 | #include "yaml-cpp/exceptions.h" 15 | 16 | #include "yaml-cpp/node/node.h" 17 | #include "yaml-cpp/node/impl.h" 18 | #include "yaml-cpp/node/convert.h" 19 | #include "yaml-cpp/node/iterator.h" 20 | #include "yaml-cpp/node/detail/impl.h" 21 | #include "yaml-cpp/node/parse.h" 22 | #include "yaml-cpp/node/emit.h" 23 | 24 | #endif // YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66 25 | -------------------------------------------------------------------------------- /matlab/+caffe/+test/test_io.m: -------------------------------------------------------------------------------- 1 | classdef test_io < matlab.unittest.TestCase 2 | methods (Test) 3 | function test_read_write_mean(self) 4 | % randomly generate mean data 5 | width = 200; 6 | height = 300; 7 | channels = 3; 8 | mean_data_write = 255 * rand(width, height, channels, 'single'); 9 | % write mean data to binary proto 10 | mean_proto_file = tempname(); 11 | caffe.io.write_mean(mean_data_write, mean_proto_file); 12 | % read mean data from saved binary proto and test whether they are equal 13 | mean_data_read = caffe.io.read_mean(mean_proto_file); 14 | self.verifyEqual(mean_data_write, mean_data_read) 15 | delete(mean_proto_file); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /matlab/+caffe/Layer.m: -------------------------------------------------------------------------------- 1 | classdef Layer < handle 2 | % Wrapper class of caffe::Layer in matlab 3 | 4 | properties (Access = private) 5 | hLayer_self 6 | attributes 7 | % attributes fields: 8 | % hBlob_blobs 9 | end 10 | properties (SetAccess = private) 11 | params 12 | end 13 | 14 | methods 15 | function self = Layer(hLayer_layer) 16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle'); 17 | 18 | % setup self handle and attributes 19 | self.hLayer_self = hLayer_layer; 20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self); 21 | 22 | % setup weights 23 | self.params = caffe.Blob.empty(); 24 | for n = 1:length(self.attributes.hBlob_blobs) 25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n)); 26 | end 27 | end 28 | function layer_type = type(self) 29 | layer_type = caffe_('layer_get_type', self.hLayer_self); 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /matlab/+caffe/get_solver.m: -------------------------------------------------------------------------------- 1 | function solver = get_solver(solver_file) 2 | % solver = get_solver(solver_file) 3 | % Construct a Solver object from solver_file 4 | 5 | CHECK(ischar(solver_file), 'solver_file must be a string'); 6 | CHECK_FILE_EXIST(solver_file); 7 | pSolver = caffe_('get_solver', solver_file); 8 | solver = caffe.Solver(pSolver); 9 | 10 | end 11 | -------------------------------------------------------------------------------- /matlab/+caffe/imagenet/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /matlab/+caffe/private/CHECK.m: -------------------------------------------------------------------------------- 1 | function CHECK(expr, error_msg) 2 | 3 | if ~expr 4 | error(error_msg); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/+caffe/private/CHECK_FILE_EXIST.m: -------------------------------------------------------------------------------- 1 | function CHECK_FILE_EXIST(filename) 2 | 3 | if exist(filename, 'file') == 0 4 | error('%s does not exist', filename); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/+caffe/private/is_valid_handle.m: -------------------------------------------------------------------------------- 1 | function valid = is_valid_handle(hObj) 2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') 3 | % Check if a handle is valid (has the right data type and init_key matches) 4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++; 5 | 6 | % a handle is a struct array with the following fields 7 | % (uint64) ptr : the pointer to the C++ object 8 | % (double) init_key : caffe initialization key 9 | 10 | persistent init_key; 11 | if isempty(init_key) 12 | init_key = caffe_('get_init_key'); 13 | end 14 | 15 | % is_valid_handle('get_new_init_key') to get new init_key from C++; 16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key') 17 | init_key = caffe_('get_init_key'); 18 | return 19 | else 20 | % check whether data types are correct and init_key matches 21 | valid = isstruct(hObj) ... 22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ... 23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ... 24 | && hObj.init_key == init_key; 25 | end 26 | 27 | end 28 | -------------------------------------------------------------------------------- /matlab/+caffe/reset_all.m: -------------------------------------------------------------------------------- 1 | function reset_all() 2 | % reset_all() 3 | % clear all solvers and stand-alone nets and reset Caffe to initial status 4 | 5 | caffe_('reset'); 6 | is_valid_handle('get_new_init_key'); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /matlab/+caffe/run_tests.m: -------------------------------------------------------------------------------- 1 | function results = run_tests() 2 | % results = run_tests() 3 | % run all tests in this caffe matlab wrapper package 4 | 5 | % use CPU for testing 6 | caffe.set_mode_cpu(); 7 | 8 | % reset caffe before testing 9 | caffe.reset_all(); 10 | 11 | % put all test cases here 12 | results = [... 13 | run(caffe.test.test_net) ... 14 | run(caffe.test.test_solver) ... 15 | run(caffe.test.test_io) ]; 16 | 17 | % reset caffe after testing 18 | caffe.reset_all(); 19 | 20 | end 21 | -------------------------------------------------------------------------------- /matlab/+caffe/set_device.m: -------------------------------------------------------------------------------- 1 | function set_device(device_id) 2 | % set_device(device_id) 3 | % set Caffe's GPU device ID 4 | 5 | CHECK(isscalar(device_id) && device_id >= 0, ... 6 | 'device_id must be non-negative integer'); 7 | device_id = double(device_id); 8 | 9 | caffe_('set_device', device_id); 10 | 11 | end 12 | -------------------------------------------------------------------------------- /matlab/+caffe/set_mode_cpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_cpu() 2 | % set_mode_cpu() 3 | % set Caffe to CPU mode 4 | 5 | caffe_('set_mode_cpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/+caffe/set_mode_gpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_gpu() 2 | % set_mode_gpu() 3 | % set Caffe to GPU mode 4 | 5 | caffe_('set_mode_gpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/+caffe/version.m: -------------------------------------------------------------------------------- 1 | function version_str = version() 2 | % version() 3 | % show Caffe's version. 4 | 5 | version_str = caffe_('version'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/Cifar10/Cifar10.m: -------------------------------------------------------------------------------- 1 | clear;clc; 2 | 3 | model_file = 'cifar10_res20_trainval.proto'; 4 | weights_file = 'cifar10_res20_iter_64000.caffemodel'; 5 | 6 | current_dir = pwd; 7 | caffe_dir = '../../'; cd(caffe_dir); caffe_dir = pwd; 8 | cd(current_dir); 9 | addpath(fullfile(caffe_dir,'matlab')); 10 | caffe.reset_all(); 11 | caffe.set_mode_gpu(); 12 | caffe.set_device(4); 13 | 14 | rand('seed',0); 15 | cifar10_train_data = load(fullfile(caffe_dir,'examples','cifar10','cifar10_train_lmdb.mat')); 16 | cifar10_train_data.image = single(permute(cifar10_train_data.image,[3,2,4,1])); 17 | cifar10_test_data = load(fullfile(caffe_dir,'examples','cifar10','cifar10_test_lmdb.mat')); 18 | cifar10_test_data.image = single(permute(cifar10_test_data.image,[3,2,4,1])); 19 | train_num = size(cifar10_train_data.label, 1); 20 | test_num = size(cifar10_test_data.label, 1); 21 | 22 | %mean_cifar10 = load('mean_cifar10.mat'); 23 | %mean_cifar10 = mean_cifar10.mean; 24 | %mean_cifar10 = permute(mean_cifar10,[3,2,1]); 25 | 26 | %sub mean 27 | %cifar10_train_data.image = cifar10_train_data.image - single(repmat(mean_cifar10,1,1,1,train_num)); 28 | %cifar10_test_data.image = cifar10_test_data.image - single(repmat(mean_cifar10,1,1,1,test_num)); 29 | 30 | net = caffe.Net(model_file, weights_file, 'test'); 31 | -------------------------------------------------------------------------------- /matlab/Cifar10/GetCifar10Mat.sh: -------------------------------------------------------------------------------- 1 | python ./python/ConvertData/LMDB.py --Type Train 2 | python ./python/ConvertData/LMDB.py --Type Test 3 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/.gitignore: -------------------------------------------------------------------------------- 1 | *.txt 2 | *.train 3 | *.val 4 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/dataset/.gitignore: -------------------------------------------------------------------------------- 1 | *.txt 2 | *.train 3 | *.val 4 | *.train_val 5 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/googlenet_v1/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "matlab/FRCNN/For_LOC/eight/googlenet_v1/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 40000 6 | max_iter: 55000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 5000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "matlab/FRCNN/For_LOC/eight/googlenet_v1/snapshot/googlenet_v1_faster_rcnn" 15 | iter_size: 1 16 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/googlenet_v1/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/eight/googlenet_v1/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/eight/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/eight/googlenet_v1/out/8_test_list_googlenet_v1_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/googlenet_v1/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/googlenet_v1/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/eight/googlenet_v1/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/googlenet_v1/bvlc_googlenet.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/eight/googlenet_v1/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/eight/googlenet_v1/snapshot/googlenet_v1_faster_rcnn_iter_55000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/eight/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/eight/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res101/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "matlab/FRCNN/For_LOC/eight/res101/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 40000 8 | max_iter: 55000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 5000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "matlab/FRCNN/For_LOC/eight/res101/snapshot/res101_faster_rcnn" 17 | iter_size: 1 18 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res101/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/eight/res101/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/res101/res101_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/eight/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/eight/res101/out/8_test_list_res101_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res101/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/res101/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/eight/res101/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/res101/ResNet-101-model.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/eight/res101/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/eight/res101/snapshot/res101_faster_rcnn_iter_55000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/eight/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/eight/res101/res101_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res152/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "matlab/FRCNN/For_LOC/eight/res152/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 40000 8 | max_iter: 55000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 5000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "matlab/FRCNN/For_LOC/eight/res152/snapshot/res152_faster_rcnn" 17 | iter_size: 1 18 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res152/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/res152/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/eight/res152/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/res152/res152_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/eight/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/eight/res152/out/8_test_list_res152_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/res152/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/res152/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/eight/res152/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/res152/ResNet-152-model.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/eight/res152/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/eight/res152/snapshot/res152_faster_rcnn_iter_55000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/eight/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/eight/res152/res152_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/vgg19/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "matlab/FRCNN/For_LOC/eight/vgg19/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 40000 6 | max_iter: 55000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 5000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "matlab/FRCNN/For_LOC/eight/vgg19/snapshot/vgg19_faster_rcnn" 15 | iter_size: 1 16 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/vgg19/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/vgg19/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/eight/vgg19/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/vgg19/vgg19_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/eight/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/eight/vgg19/out/8_test_list_vgg19_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/eight/vgg19/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/eight/vgg19/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/eight/vgg19/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/eight/vgg19/VGG19.v2.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/eight/vgg19/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/eight/vgg19/snapshot/vgg19_faster_rcnn_iter_55000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/eight/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/eight/vgg19/vgg19_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/python/eval.sh: -------------------------------------------------------------------------------- 1 | python calculate_trecvid_ap.py --overlap 0.5 --gt trecvid_7.val --answer "trecvid_7_test_res101.frcnn;trecvid_7_test_vgg16.frcnn;trecvid_7_test_googlenet_v1.frcnn" | tee -a ap.log 2 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/googlenet_v1/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "matlab/FRCNN/For_LOC/two/googlenet_v1/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 70000 6 | max_iter: 90000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 10000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "matlab/FRCNN/For_LOC/two/googlenet_v1/snapshot/googlenet_v1_faster_rcnn" 15 | iter_size: 1 16 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/googlenet_v1/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/googlenet_v1/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/two/googlenet_v1/out/2_test_list_googlenet_v1_${pid}.frcnn \ 21 | --max_per_image 100 22 | 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/googlenet_v1/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/googlenet_v1/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/two/googlenet_v1/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/googlenet_v1/bvlc_googlenet.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/two/googlenet_v1/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/two/googlenet_v1/snapshot/googlenet_v1_faster_rcnn_iter_90000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/two/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/two/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res101/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "matlab/FRCNN/For_LOC/two/res101/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 70000 8 | max_iter: 90000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "matlab/FRCNN/For_LOC/two/res101/snapshot/res101_faster_rcnn" 17 | iter_size: 1 18 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res101/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res101/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res101/res101_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/two/res101/out/2_test_list_res101_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res101/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res101/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/two/res101/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res101/ResNet-101-model.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/two/res101/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/two/res101/snapshot/res101_faster_rcnn_iter_90000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/two/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/two/res101/res101_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res152/solver.proto: -------------------------------------------------------------------------------- 1 | # Resnet 101 solver file 2 | # When 07: 72.x+%, using 07+12, 79.x+%(18w iterations) 3 | train_net: "matlab/FRCNN/For_LOC/two/res152/train.proto" 4 | base_lr: 0.001 5 | lr_policy: "multistep" 6 | gamma: 0.1 7 | stepvalue: 70000 8 | max_iter: 90000 9 | display: 20 10 | average_loss: 100 11 | momentum: 0.9 12 | weight_decay: 0.0001 13 | # function 14 | snapshot: 10000 15 | # We still use the snapshot prefix, though 16 | snapshot_prefix: "matlab/FRCNN/For_LOC/two/res152/snapshot/res152_faster_rcnn" 17 | iter_size: 1 18 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res152/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res152/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res152/res152_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/two/res152/out/2_test_list_res152_${pid}.frcnn \ 21 | --max_per_image 100 22 | 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res152/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res152/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/two/res152/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res152/ResNet-152-model.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/two/res152/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/two/res152/snapshot/res152_faster_rcnn_iter_90000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/two/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/two/res152/res152_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/res152_merge_other/vgg19_pure_rois/loc_merge.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_merge_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res152_merge_other/vgg19_pure_rois/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/vgg19/vgg19_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/LOC/LOC_OUT/TWO_${2}_res152*.frcnn \ 20 | --out_file matlab/FRCNN/For_LOC/two/res152_merge_other/vgg19_pure_rois/out/TWO_${2}_vgg19_$$.score 21 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/googlenet_S1_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/googlenet_v1/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/googlenet_v1/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_1_5296 \ 20 | --out_file matlab/FRCNN/For_LOC/two/googlenet_v1/out/TWO_1_5296_test_list_googlenet_v1_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/googlenet_S2_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/googlenet_v1/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/googlenet_v1/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/googlenet_v1/googlenet_v1_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_5297_9958 \ 20 | --out_file matlab/FRCNN/For_LOC/two/googlenet_v1/out/TWO_5297_9958_test_list_googlenet_v1_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/res101_S1_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res101/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res101/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res101/res101_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_1_5296 \ 20 | --out_file matlab/FRCNN/For_LOC/two/res101/out/TWO_1_5296_test_list_res101_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/res101_S2_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res101/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res101/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res101/res101_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_5297_9958 \ 20 | --out_file matlab/FRCNN/For_LOC/two/res101/out/TWO_5297_9958_test_list_res101_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/res152_S1_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res152/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res152/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res152/res152_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_1_5296 \ 20 | --out_file matlab/FRCNN/For_LOC/two/res152/out/TWO_1_5296_test_list_res152_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/res152_S2_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/res152/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/res152/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/res152/res152_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_5297_9958 \ 20 | --out_file matlab/FRCNN/For_LOC/two/res152/out/TWO_5297_9958_test_list_res152_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/vgg19_S1_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/vgg19/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/vgg19/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/vgg19/vgg19_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_1_5296 \ 20 | --out_file matlab/FRCNN/For_LOC/two/vgg19/out/TWO_1_5296_test_list_vgg19_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/shell/vgg19_S2_test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/vgg19/log $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/vgg19/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/vgg19/vgg19_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list_5297_9958 \ 20 | --out_file matlab/FRCNN/For_LOC/two/vgg19/out/TWO_5297_9958_test_list_vgg19_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/vgg19/solver.proto: -------------------------------------------------------------------------------- 1 | train_net: "matlab/FRCNN/For_LOC/two/vgg19/train.proto" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 70000 6 | max_iter: 90000 7 | display: 20 8 | average_loss: 100 9 | momentum: 0.9 10 | weight_decay: 0.0005 11 | # function 12 | snapshot: 10000 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "matlab/FRCNN/For_LOC/two/vgg19/snapshot/vgg19_faster_rcnn" 15 | iter_size: 1 16 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/vgg19/test_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | # determine whether $1 is empty 4 | if [ ! -n "$1" ] ;then 5 | echo "$1 is empty, default is 0" 6 | gpu=0 7 | else 8 | echo "use $1-th gpu" 9 | gpu=$1 10 | fi 11 | pid=$$ 12 | BUILD=build/examples/FRCNN/loc_test_frcnn.bin 13 | 14 | time $BUILD --gpu $gpu \ 15 | --model matlab/FRCNN/For_LOC/two/vgg19/test.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/vgg19/vgg19_faster_rcnn_final.caffemodel \ 17 | --default_c matlab/FRCNN/For_LOC/two/trecvid.json \ 18 | --image_root matlab/FRCNN/For_LOC/LOC/filtered \ 19 | --image_list matlab/FRCNN/For_LOC/dataset/test.list \ 20 | --out_file matlab/FRCNN/For_LOC/two/vgg19/out/2_test_list_vgg19_${pid}.frcnn \ 21 | --max_per_image 100 22 | -------------------------------------------------------------------------------- /matlab/FRCNN/For_LOC/two/vgg19/train_frcnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script test four voc images using faster rcnn end-to-end trained model (ZF-Model) 3 | if [ ! -n "$1" ] ;then 4 | echo "$1 is empty, default is 0" 5 | gpu=0 6 | else 7 | echo "use $1-th gpu" 8 | gpu=$1 9 | fi 10 | 11 | CAFFE=build/tools/caffe 12 | 13 | time GLOG_log_dir=matlab/FRCNN/For_LOC/two/vgg19/log $CAFFE train \ 14 | --gpu $gpu \ 15 | --solver matlab/FRCNN/For_LOC/two/vgg19/solver.proto \ 16 | --weights matlab/FRCNN/For_LOC/two/vgg19/VGG19.v2.caffemodel 17 | 18 | time python examples/FRCNN/convert_model.py \ 19 | --model matlab/FRCNN/For_LOC/two/vgg19/test.proto \ 20 | --weights matlab/FRCNN/For_LOC/two/vgg19/snapshot/vgg19_faster_rcnn_iter_90000.caffemodel \ 21 | --config matlab/FRCNN/For_LOC/two/trecvid.json \ 22 | --net_out matlab/FRCNN/For_LOC/two/vgg19/vgg19_faster_rcnn_final.caffemodel 23 | -------------------------------------------------------------------------------- /matlab/hdf5creation/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | list.txt 3 | -------------------------------------------------------------------------------- /python/ConvertData/binaryproto2mat.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import lmdb 4 | from PIL import Image 5 | import scipy.io as sio 6 | import os 7 | if os.path.exists('./python/caffe'): 8 | sys.path.append('./python') 9 | else: 10 | print 'Error : caffe(pycaffe) could not be found' 11 | sys.exit(0) 12 | caffe_root = '%s'%(os.getcwd()) 13 | import caffe 14 | from caffe.proto import caffe_pb2 15 | 16 | def binaryproto2img(): 17 | 18 | mean = caffe_pb2.BlobProto() 19 | data = open('{}/examples/cifar10/mean.binaryproto'.format(caffe_root)).read() 20 | mean.ParseFromString(data) 21 | arr = np.array(caffe.io.blobproto_to_array(mean)) 22 | print 'arr.shape : {}'.format(arr.shape) 23 | print 'arr[0].shape : {}'.format(arr[0].shape) 24 | 25 | """ 26 | im = np.zeros((1024,2048,3),dtype=np.float32) 27 | im[:,:,0] = arr[0][0,:,:] 28 | im[:,:,1] = arr[0][1,:,:] 29 | im[:,:,2] = arr[0][2,:,:] 30 | im = Image.fromarray(im) 31 | im.save('mean_cityscape_img_train_sp_500x500.jpg') 32 | """ 33 | 34 | sio.savemat('mean_cifar10.mat',{'mean':arr[0]}) 35 | 36 | return 37 | 38 | if __name__=='__main__': 39 | binaryproto2img() 40 | -------------------------------------------------------------------------------- /python/ConvertData/convert.sh: -------------------------------------------------------------------------------- 1 | python ./python/ConvertData/LevelDB.py --Type Train 2 | python ./python/ConvertData/LevelDB.py --Type Test 3 | python ./python/ConvertData/LMDB.py --Type Train 4 | python ./python/ConvertData/LMDB.py --Type Test 5 | python ./python/ConvertData/PadCifar10.py --Type Train --Pad 4 6 | python ./python/ConvertData/PadCifar10.py --Type Test --Pad 4 7 | python ./python/ConvertData/PadBinaryMean.py 8 | -------------------------------------------------------------------------------- /python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer 2 | from ._caffe import init_log, log, set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed, solver_count, set_solver_count, solver_rank, set_solver_rank, set_multiprocess, Layer, get_solver 3 | from ._caffe import __version__ 4 | from .proto.caffe_pb2 import TRAIN, TEST 5 | from .classifier import Classifier 6 | from .detector import Detector 7 | from . import io 8 | from .net_spec import layers, params, NetSpec, to_proto 9 | -------------------------------------------------------------------------------- /python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /python/caffe/test/test_draw.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from google.protobuf import text_format 5 | 6 | import caffe.draw 7 | from caffe.proto import caffe_pb2 8 | 9 | def getFilenames(): 10 | """Yields files in the source tree which are Net prototxts.""" 11 | result = [] 12 | 13 | root_dir = os.path.abspath(os.path.join( 14 | os.path.dirname(__file__), '..', '..', '..')) 15 | assert os.path.exists(root_dir) 16 | 17 | for dirname in ('models', 'examples'): 18 | dirname = os.path.join(root_dir, dirname) 19 | assert os.path.exists(dirname) 20 | for cwd, _, filenames in os.walk(dirname): 21 | for filename in filenames: 22 | filename = os.path.join(cwd, filename) 23 | if filename.endswith('.prototxt') and 'solver' not in filename: 24 | yield os.path.join(dirname, filename) 25 | 26 | 27 | class TestDraw(unittest.TestCase): 28 | def test_draw_net(self): 29 | for filename in getFilenames(): 30 | net = caffe_pb2.NetParameter() 31 | with open(filename) as infile: 32 | text_format.Merge(infile.read(), net) 33 | caffe.draw.draw_net(net, 'LR') 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /python/caffe/test/test_layer_type_list.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import caffe 4 | 5 | class TestLayerTypeList(unittest.TestCase): 6 | 7 | def test_standard_types(self): 8 | #removing 'Data' from list 9 | for type_name in ['Data', 'Convolution', 'InnerProduct']: 10 | self.assertIn(type_name, caffe.layer_type_list(), 11 | '%s not in layer_type_list()' % type_name) 12 | -------------------------------------------------------------------------------- /python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | matplotlib>=1.3.1 6 | ipython>=3.0.0 7 | h5py>=2.2.0 8 | leveldb>=0.191 9 | networkx>=1.8.1 10 | nose>=1.3.0 11 | pandas>=0.12.0 12 | python-dateutil>=1.4,<2 13 | protobuf>=2.5.0 14 | python-gflags>=2.0 15 | pyyaml>=3.10 16 | Pillow>=2.3.0 17 | six>=1.1.0 -------------------------------------------------------------------------------- /scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build_docs.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Split caffe.proto for inclusion by layer catalogue. 16 | scripts/split_caffe_proto.py 17 | 18 | # Generate developer docs. 19 | make docs 20 | 21 | # Display docs using web server. 22 | cd docs 23 | jekyll serve -w -s . -d _site --port=$PORT 24 | -------------------------------------------------------------------------------- /scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip 22 | unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR 23 | rm $MODEL_DIR/gist.zip 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /scripts/split_caffe_proto.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import mmap 3 | import re 4 | import os 5 | import errno 6 | 7 | script_path = os.path.dirname(os.path.realpath(__file__)) 8 | 9 | # a regex to match the parameter definitions in caffe.proto 10 | r = re.compile(r'(?://.*\n)*message ([^ ]*) \{\n(?: .*\n|\n)*\}') 11 | 12 | # create directory to put caffe.proto fragments 13 | try: 14 | os.mkdir( 15 | os.path.join(script_path, 16 | '../docs/_includes/')) 17 | os.mkdir( 18 | os.path.join(script_path, 19 | '../docs/_includes/proto/')) 20 | except OSError as exception: 21 | if exception.errno != errno.EEXIST: 22 | raise 23 | 24 | caffe_proto_fn = os.path.join( 25 | script_path, 26 | '../src/caffe/proto/caffe.proto') 27 | 28 | with open(caffe_proto_fn, 'r') as fin: 29 | 30 | for m in r.finditer(fin.read(), re.MULTILINE): 31 | fn = os.path.join( 32 | script_path, 33 | '../docs/_includes/proto/%s.txt' % m.group(1)) 34 | with open(fn, 'w') as fout: 35 | fout.write(m.group(0)) 36 | -------------------------------------------------------------------------------- /scripts/travis/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # build the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if ! $WITH_CMAKE ; then 8 | make --jobs $NUM_THREADS all test pycaffe warn 9 | else 10 | cd build 11 | make --jobs $NUM_THREADS all test.testbin 12 | fi 13 | make lint 14 | -------------------------------------------------------------------------------- /scripts/travis/configure-cmake.sh: -------------------------------------------------------------------------------- 1 | # CMake configuration 2 | 3 | mkdir -p build 4 | cd build 5 | 6 | ARGS="-DCMAKE_BUILD_TYPE=Release -DBLAS=Open" 7 | 8 | if $WITH_PYTHON3 ; then 9 | ARGS="$ARGS -Dpython_version=3" 10 | fi 11 | 12 | if $WITH_IO ; then 13 | ARGS="$ARGS -DUSE_OPENCV=On -DUSE_LMDB=On -DUSE_LEVELDB=On" 14 | else 15 | ARGS="$ARGS -DUSE_OPENCV=Off -DUSE_LMDB=Off -DUSE_LEVELDB=Off" 16 | fi 17 | 18 | if $WITH_CUDA ; then 19 | # Only build SM50 20 | ARGS="$ARGS -DCPU_ONLY=Off -DCUDA_ARCH_NAME=Manual -DCUDA_ARCH_BIN=\"50\" -DCUDA_ARCH_PTX=\"\"" 21 | else 22 | ARGS="$ARGS -DCPU_ONLY=On" 23 | fi 24 | 25 | if $WITH_CUDNN ; then 26 | ARGS="$ARGS -DUSE_CUDNN=On" 27 | else 28 | ARGS="$ARGS -DUSE_CUDNN=Off" 29 | fi 30 | 31 | cmake .. $ARGS 32 | 33 | -------------------------------------------------------------------------------- /scripts/travis/configure-make.sh: -------------------------------------------------------------------------------- 1 | # raw Makefile configuration 2 | 3 | LINE () { 4 | echo "$@" >> Makefile.config 5 | } 6 | 7 | cp Makefile.config.example Makefile.config 8 | 9 | LINE "BLAS := open" 10 | LINE "WITH_PYTHON_LAYER := 1" 11 | 12 | if $WITH_PYTHON3 ; then 13 | # TODO(lukeyeager) this path is currently disabled because of test errors like: 14 | # ImportError: dynamic module does not define init function (PyInit__caffe) 15 | LINE "PYTHON_LIBRARIES := python3.4m boost_python-py34" 16 | LINE "PYTHON_INCLUDE := /usr/include/python3.4 /usr/lib/python3/dist-packages/numpy/core/include" 17 | LINE "INCLUDE_DIRS := \$(INCLUDE_DIRS) \$(PYTHON_INCLUDE)" 18 | fi 19 | 20 | if ! $WITH_IO ; then 21 | LINE "USE_OPENCV := 0" 22 | LINE "USE_LEVELDB := 0" 23 | LINE "USE_LMDB := 0" 24 | fi 25 | 26 | if $WITH_CUDA ; then 27 | # Only build SM50 28 | LINE "CUDA_ARCH := -gencode arch=compute_50,code=sm_50" 29 | else 30 | LINE "CPU_ONLY := 1" 31 | fi 32 | 33 | if $WITH_CUDNN ; then 34 | LINE "USE_CUDNN := 1" 35 | fi 36 | 37 | -------------------------------------------------------------------------------- /scripts/travis/configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # configure the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if ! $WITH_CMAKE ; then 8 | source $BASEDIR/configure-make.sh 9 | else 10 | source $BASEDIR/configure-cmake.sh 11 | fi 12 | -------------------------------------------------------------------------------- /scripts/travis/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set default environment variables 3 | 4 | set -e 5 | 6 | WITH_CMAKE=${WITH_CMAKE:-false} 7 | WITH_PYTHON3=${WITH_PYTHON3:-false} 8 | WITH_IO=${WITH_IO:-true} 9 | WITH_CUDA=${WITH_CUDA:-false} 10 | WITH_CUDNN=${WITH_CUDNN:-false} 11 | -------------------------------------------------------------------------------- /scripts/travis/install-python-deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # install extra Python dependencies 3 | # (must come after setup-venv) 4 | 5 | BASEDIR=$(dirname $0) 6 | source $BASEDIR/defaults.sh 7 | 8 | if ! $WITH_PYTHON3 ; then 9 | # Python2 10 | : 11 | else 12 | # Python3 13 | pip install --pre protobuf==3.0.0b3 14 | pip install pydot 15 | fi 16 | -------------------------------------------------------------------------------- /scripts/travis/setup-venv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # setup a Python virtualenv 3 | # (must come after install-deps) 4 | 5 | BASEDIR=$(dirname $0) 6 | source $BASEDIR/defaults.sh 7 | 8 | VENV_DIR=${1:-~/venv} 9 | 10 | # setup our own virtualenv 11 | if $WITH_PYTHON3; then 12 | PYTHON_EXE='/usr/bin/python3' 13 | else 14 | PYTHON_EXE='/usr/bin/python2' 15 | fi 16 | 17 | # use --system-site-packages so that Python will use deb packages 18 | virtualenv $VENV_DIR -p $PYTHON_EXE --system-site-packages 19 | -------------------------------------------------------------------------------- /scripts/travis/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # test the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if $WITH_CUDA ; then 8 | echo "Skipping tests for CUDA build" 9 | exit 0 10 | fi 11 | 12 | if ! $WITH_CMAKE ; then 13 | make runtest 14 | make pytest 15 | else 16 | cd build 17 | make runtest 18 | make pytest 19 | fi 20 | -------------------------------------------------------------------------------- /scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /src/api/FRCNN/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FRCNN_api_sources 2 | frcnn_api.cpp 3 | rpn_api.cpp 4 | ) 5 | ADD_LIBRARY(FRCNN_api ${FRCNN_api_sources}) 6 | TARGET_LINK_LIBRARIES(FRCNN_api ${Caffe_LINK}) 7 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_augment/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | g++ -std=c++11 `pkg-config opencv --cflags` *.cpp \ 3 | -o test `pkg-config opencv --libs` \ 4 | #-I /usr/local/opencv3/include \ 5 | #-L /usr/local/opencv3/lib \ 6 | #-lopencv_core -lopencv_highgui -lopencv_imgproc \ 7 | #-pthread 8 | #-lopencv_imgcodecs 9 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_augment/opencv_utils.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/src/caffe/FRCNN/data_augment/opencv_utils.cpp -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_augment/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/src/caffe/FRCNN/data_augment/test.jpg -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_enhance/haze_free/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | g++ -std=c++11 *.cpp \ 3 | -o free-haze \ 4 | -I /usr/local/opencv3/include \ 5 | -L /usr/local/opencv3/lib \ 6 | -lopencv_core -lopencv_highgui -lopencv_imgproc \ 7 | -pthread 8 | #-lopencv_imgcodecs 9 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_enhance/haze_free/guidedfilter.h: -------------------------------------------------------------------------------- 1 | #ifndef GUIDED_FILTER_H 2 | #define GUIDED_FILTER_H 3 | 4 | #include 5 | 6 | class GuidedFilterImpl; 7 | 8 | class GuidedFilter 9 | { 10 | public: 11 | GuidedFilter(const cv::Mat &I, int r, double eps); 12 | ~GuidedFilter(); 13 | 14 | cv::Mat filter(const cv::Mat &p, int depth = -1) const; 15 | 16 | private: 17 | GuidedFilterImpl *impl_; 18 | }; 19 | 20 | cv::Mat guidedFilter(const cv::Mat &I, const cv::Mat &p, int r, double eps, int depth = -1); 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_enhance/haze_free/haze.h: -------------------------------------------------------------------------------- 1 | #include "iostream" 2 | #include 3 | #include "time.h" 4 | #include "string.h" 5 | //#include "io.h" 6 | 7 | /****** OpenCV *******/ 8 | #include "opencv2/opencv.hpp" 9 | 10 | #define MAX_INT 20000000 11 | 12 | //Type of Min and Max value 13 | typedef struct _MinMax 14 | { 15 | double min; 16 | double max; 17 | }MinMax; 18 | 19 | cv::Mat ReadImage(); 20 | void rerange(); 21 | void fill_x_y(); 22 | int find_table(int y); 23 | void locate(int l1, int l2, double l3); 24 | void getL(cv::Mat img); 25 | 26 | cv::Vec Airlight(cv::Mat img, cv::Mat dark); 27 | cv::Mat TransmissionMat(cv::Mat dark); 28 | cv::Mat DarkChannelPrior(cv::Mat img); 29 | 30 | void RefineTrans(cv::Mat trans); 31 | 32 | 33 | void printMat(char * name, cv::Mat m); 34 | 35 | void remove_haze(char* img_name, char* out_img_name); 36 | cv::Mat remove_haze(cv::Mat img); 37 | 38 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/data_enhance/histgram/equalize_hist.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __EQ_HIST__ 2 | #define __EQ_HIST__ 3 | 4 | #include 5 | using namespace cv; 6 | Mat equalizeChannelHist(const Mat & inputImage); 7 | Mat equalizeIntensityHist(const Mat & inputImage); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /src/caffe/FRCNN/focal_loss/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/evn python 2 | 3 | import os 4 | import sys 5 | import math 6 | import pylab 7 | import numpy as np 8 | 9 | def plot_Focal_Loss(gama=2): 10 | '''FL(p_t) = -(1 - p_t)^gama * log(p_t)''' 11 | p_t = np.linspace(0,1,1000) 12 | y = -np.power(1 - p_t, gama) * np.log(p_t) 13 | 14 | # compose plot 15 | pylab.title('Focal Loss') 16 | pylab.plot(p_t, y, 'co') # same function with cyan dots 17 | pylab.plot(p_t, -np.log(p_t)) # softmax loss 18 | pylab.show() # show the plot 19 | 20 | def plot_Gradient_of_Focal_Loss(gama=2): 21 | '''FL(p_t) = -(1 - p_t)^gama * log(p_t), here just for x instead of p_t''' 22 | p_t = np.linspace(0,1,1000) 23 | y = np.power(1 - p_t, gama) * (gama * p_t * np.log(p_t) + p_t - 1) # if i == j 24 | 25 | # compose plot 26 | pylab.title('Gridient of Focal Loss') 27 | pylab.plot(p_t, y, 'co') # same function with cyan dots 28 | pylab.plot(p_t, p_t - 1) # softmax loss 29 | pylab.show() # show the plot 30 | 31 | if __name__ == '__main__': 32 | '''Loss and Gradient''' 33 | plot_Focal_Loss(gama=2) 34 | plot_Gradient_of_Focal_Loss(gama=2) 35 | 36 | pi = 0.01; bias = -np.log((1 - pi) / pi) 37 | print "pi:", pi, "bias:", bias 38 | -------------------------------------------------------------------------------- /src/caffe/PR/README.md: -------------------------------------------------------------------------------- 1 | This directory includes code that pulls from Caffe repo's unmerged pull requests or other repos' good implementation for speeding up or special features. 2 | 3 | ## layers 4 | 5 | - [CuDNNDeconvolution](https://github.com/BVLC/caffe/pull/5924/commits/fb3146363963fa494d1e7488890cac3d2a141c8f) 6 | - [ConvolutionDepthwise](https://github.com/BVLC/caffe/pull/5665/commits/327a0194c67bc599ade211c388087a166339bdb5) 7 | -------------------------------------------------------------------------------- /src/caffe/SSD/ssd_base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ssd_base_data_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void SSDBasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); 11 | // Reshape to loaded data. 12 | top[0]->ReshapeLike(batch->data_); 13 | // Copy the data 14 | caffe_copy(batch->data_.count(), batch->data_.gpu_data(), 15 | top[0]->mutable_gpu_data()); 16 | if (this->output_labels_) { 17 | // Reshape to loaded labels. 18 | top[1]->ReshapeLike(batch->label_); 19 | // Copy the labels. 20 | caffe_copy(batch->label_.count(), batch->label_.gpu_data(), 21 | top[1]->mutable_gpu_data()); 22 | } 23 | // Ensure the copy is synchronous wrt the host, so that the next batch isn't 24 | // copied in meanwhile. 25 | CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); 26 | prefetch_free_.push(batch); 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FORWARD(SSDBasePrefetchingDataLayer); 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /src/caffe/YOLO/box.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Company: Systhesis 3 | * Author: Chen 4 | * Date: 2018/06/04 5 | */ 6 | 7 | #ifndef __BOX_H_ 8 | #define __BOX_H_ 9 | #include "yolo_layer.h" 10 | 11 | 12 | void do_nms_sort(detection *dets, int total, int classes, float thresh); 13 | 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /src/caffe/YOLO/image.h: -------------------------------------------------------------------------------- 1 | /* 2 | * part from darknet 3 | */ 4 | #ifndef __IMAGE_H_ 5 | #define __IMAGE_H_ 6 | 7 | typedef struct 8 | { 9 | int w; 10 | int h; 11 | int c; 12 | float *data; 13 | }image; 14 | 15 | image load_image_color(char* filename,int w,int h); 16 | 17 | void free_image(image m); 18 | 19 | image letterbox_image(image im, int w, int h); 20 | 21 | float get_pixel(image m, int x, int y, int c); 22 | void set_pixel(image m, int x, int y, int c, float val); 23 | void add_pixel(image m, int x, int y, int c, float val); 24 | 25 | //image make_image(int w, int h, int c); 26 | 27 | image resize_image(image im, int w, int h); 28 | 29 | //void fill_image(image m, float s); 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /src/caffe/YOLO/yolo_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Company: Systhesis 3 | * Author: Chen 4 | * Date: 2018/06/04 5 | */ 6 | 7 | #ifndef __YOLO_LAYER_H_ 8 | #define __YOLO_LAYER_H_ 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace caffe; 14 | 15 | typedef struct{ 16 | float x,y,w,h; 17 | }box; 18 | 19 | typedef struct{ 20 | box bbox; 21 | int classes; 22 | float* prob; 23 | float* mask; 24 | float objectness; 25 | int sort_class; 26 | }detection; 27 | 28 | typedef struct layer{ 29 | int batch; 30 | int total; 31 | int n,c,h,w; 32 | int out_n,out_c,out_h,out_w; 33 | int classes; 34 | int inputs,outputs; 35 | int *mask; 36 | float* biases; 37 | float* output; 38 | float* output_gpu; 39 | }layer; 40 | 41 | layer make_yolo_layer(int batch,int w,int h,int n,int total,int classes); 42 | 43 | void free_yolo_layer(layer l); 44 | 45 | void forward_yolo_layer_gpu(const float* input,layer l, float* output); 46 | 47 | detection* get_detections(vector*> blobs,int img_w,int img_h, int netw,int neth, int* nboxes, int classes, float thresh=0.5, float nms=0.45); 48 | 49 | void free_detections(detection *dets,int nboxes); 50 | 51 | 52 | 53 | 54 | #endif 55 | -------------------------------------------------------------------------------- /src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/layer.hpp" 2 | 3 | namespace caffe { 4 | 5 | INSTANTIATE_CLASS(Layer); 6 | 7 | } // namespace caffe 8 | -------------------------------------------------------------------------------- /src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/base_data_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | if (prefetch_current_) { 11 | prefetch_free_.push(prefetch_current_); 12 | } 13 | prefetch_current_ = prefetch_full_.pop("Waiting for data"); 14 | // Reshape to loaded data. 15 | top[0]->ReshapeLike(prefetch_current_->data_); 16 | top[0]->set_gpu_data(prefetch_current_->data_.mutable_gpu_data()); 17 | if (this->output_labels_) { 18 | // Reshape to loaded labels. 19 | top[1]->ReshapeLike(prefetch_current_->label_); 20 | top[1]->set_gpu_data(prefetch_current_->label_.mutable_gpu_data()); 21 | } 22 | } 23 | 24 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /src/caffe/layers/hdf5_data_layer.cu: -------------------------------------------------------------------------------- 1 | /* 2 | TODO: 3 | - only load parts of the file, in accordance with a prototxt param "max_mem" 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include "hdf5.h" 10 | #include "hdf5_hl.h" 11 | 12 | #include "caffe/layers/hdf5_data_layer.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | void HDF5DataLayer::Forward_gpu(const vector*>& bottom, 18 | const vector*>& top) { 19 | const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); 20 | for (int i = 0; i < batch_size; ++i) { 21 | while (Skip()) { 22 | Next(); 23 | } 24 | for (int j = 0; j < this->layer_param_.top_size(); ++j) { 25 | int data_dim = top[j]->count() / top[j]->shape(0); 26 | caffe_copy(data_dim, 27 | &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] 28 | * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); 29 | } 30 | Next(); 31 | } 32 | } 33 | 34 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer); 35 | 36 | } // namespace caffe 37 | -------------------------------------------------------------------------------- /src/caffe/layers/input_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/input_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void InputLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | const int num_top = top.size(); 11 | const InputParameter& param = this->layer_param_.input_param(); 12 | const int num_shape = param.shape_size(); 13 | CHECK(num_shape == 0 || num_shape == 1 || num_shape == num_top) 14 | << "Must specify 'shape' once, once per top blob, or not at all: " 15 | << num_top << " tops vs. " << num_shape << " shapes."; 16 | if (num_shape > 0) { 17 | for (int i = 0; i < num_top; ++i) { 18 | const int shape_index = (param.shape_size() == 1) ? 0 : i; 19 | top[i]->Reshape(param.shape(shape_index)); 20 | } 21 | } 22 | } 23 | 24 | INSTANTIATE_CLASS(InputLayer); 25 | REGISTER_LAYER_CLASS(Input); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /src/caffe/layers/parameter_layer.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/layers/parameter_layer.hpp" 2 | 3 | namespace caffe { 4 | 5 | INSTANTIATE_CLASS(ParameterLayer); 6 | REGISTER_LAYER_CLASS(Parameter); 7 | 8 | } // namespace caffe 9 | -------------------------------------------------------------------------------- /src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /src/caffe/solvers/adadelta_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2, 8 | Dtype momentum, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi; 12 | gi = gi * sqrt((h2[i] + delta) / (hi + delta)); 13 | h2[i] = momentum * h2[i] + (1-momentum) * gi * gi; 14 | g[i] = local_rate * gi; 15 | } 16 | } 17 | template 18 | void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum, 19 | Dtype delta, Dtype local_rate) { 20 | AdaDeltaUpdate // NOLINT_NEXT_LINE(whitespace/operators) 21 | <<>>( 22 | N, g, h, h2, momentum, delta, local_rate); 23 | CUDA_POST_KERNEL_CHECK; 24 | } 25 | template void adadelta_update_gpu(int , float*, float*, float*, 26 | float, float, float); 27 | template void adadelta_update_gpu(int, double*, double*, double*, 28 | double, double, double); 29 | 30 | } // namespace caffe 31 | -------------------------------------------------------------------------------- /src/caffe/solvers/adagrad_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, 8 | Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = h[i] + gi*gi; 12 | g[i] = local_rate * gi / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, 17 | Dtype local_rate) { 18 | AdaGradUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void adagrad_update_gpu(int, float*, float*, float, float); 24 | template void adagrad_update_gpu(int, double*, double*, double, double); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /src/caffe/solvers/adam_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, 8 | Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float mi = m[i] = m[i]*beta1 + gi*(1-beta1); 12 | float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); 13 | g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); 14 | } 15 | } 16 | template 17 | void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, 18 | Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 19 | AdamUpdate // NOLINT_NEXT_LINE(whitespace/operators) 20 | <<>>( 21 | N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); 22 | CUDA_POST_KERNEL_CHECK; 23 | } 24 | template void adam_update_gpu(int, float*, float*, float*, 25 | float, float, float, float); 26 | template void adam_update_gpu(int, double*, double*, double*, 27 | double, double, double, double); 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /src/caffe/solvers/nesterov_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float hi = h[i]; 11 | float hi_new = h[i] = momentum * hi + local_rate * g[i]; 12 | g[i] = (1+momentum) * hi_new - momentum * hi; 13 | } 14 | } 15 | template 16 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 17 | Dtype local_rate) { 18 | NesterovUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, momentum, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void nesterov_update_gpu(int, float*, float*, float, float); 24 | template void nesterov_update_gpu(int, double*, double*, double, 25 | double); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /src/caffe/solvers/rmsprop_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype rms_decay, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi; 12 | g[i] = local_rate * g[i] / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay, 17 | Dtype delta, Dtype local_rate) { 18 | RMSPropUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, rms_decay, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void rmsprop_update_gpu(int, float*, float*, float, float, 24 | float); 25 | template void rmsprop_update_gpu(int, double*, double*, double, double, 26 | double); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /src/caffe/solvers/sgd_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | g[i] = h[i] = momentum*h[i] + local_rate*g[i]; 11 | } 12 | } 13 | template 14 | void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 15 | Dtype local_rate) { 16 | SGDUpdate // NOLINT_NEXT_LINE(whitespace/operators) 17 | <<>>( 18 | N, g, h, momentum, local_rate); 19 | CUDA_POST_KERNEL_CHECK; 20 | } 21 | template void sgd_update_gpu(int, float*, float*, float, float); 22 | template void sgd_update_gpu(int, double*, double*, double, double); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | #include "caffe/test/test_caffe_main.hpp" 3 | 4 | namespace caffe { 5 | #ifndef CPU_ONLY 6 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 7 | #endif 8 | } 9 | 10 | #ifndef CPU_ONLY 11 | using caffe::CAFFE_TEST_CUDA_PROP; 12 | #endif 13 | 14 | int main(int argc, char** argv) { 15 | ::testing::InitGoogleTest(&argc, argv); 16 | caffe::GlobalInit(&argc, &argv); 17 | #ifndef CPU_ONLY 18 | // Before starting testing, let's first print out a few cuda device info. 19 | int device; 20 | cudaGetDeviceCount(&device); 21 | cout << "Cuda number of devices: " << device << endl; 22 | if (argc > 1) { 23 | // Use the given device 24 | device = atoi(argv[1]); 25 | cudaSetDevice(device); 26 | cout << "Setting to use device " << device << endl; 27 | } else if (CUDA_TEST_DEVICE >= 0) { 28 | // Use the device assigned in build configuration; but with a lower priority 29 | device = CUDA_TEST_DEVICE; 30 | } 31 | cudaGetDevice(&device); 32 | cout << "Current device id: " << device << endl; 33 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 34 | cout << "Current device name: " << CAFFE_TEST_CUDA_PROP.name << endl; 35 | #endif 36 | // invoke the test. 37 | return RUN_ALL_TESTS(); 38 | } 39 | -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makefile/frcnn/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /src/caffe/util/search_path.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/search_path.hpp" 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | std::vector ParseSearchPath( 10 | std::string const & search_path 11 | ) { 12 | std::vector result; 13 | 14 | std::string::const_iterator start = search_path.begin(); 15 | while (true) { 16 | std::string::const_iterator i = std::find(start, search_path.end(), ':'); 17 | result.push_back(std::string(start, i)); 18 | if (i == search_path.end()) break; 19 | start = i + 1; 20 | } 21 | 22 | return result; 23 | } 24 | 25 | } // namespace caffe 26 | -------------------------------------------------------------------------------- /src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | target_include_directories(gtest PUBLIC ${Caffe_SRC_DIR}) 4 | target_compile_definitions(gtest PUBLIC -DGTEST_USE_OWN_TR1_TUPLE) 5 | 6 | 7 | #add_library(gtest_main gtest_main.cc) 8 | #target_link_libraries(gtest_main gtest) 9 | -------------------------------------------------------------------------------- /src/logger/Makefile: -------------------------------------------------------------------------------- 1 | CXX_SRCS := $(shell find . -name "*.cpp") 2 | CXX_OBJS := ${CXX_SRCS:.cpp=.o} 3 | CXXFLAGS += -O3 -std=c++11 -fPIC 4 | VISUALDL_LIB := /home/s04/fyk/VisualDL/core.so 5 | OUT_DIR := ../../distribute/lib 6 | OUT_SO := $(OUT_DIR)/libvis_logger.so 7 | 8 | $(OUT_SO): $(CXX_OBJS) 9 | @ echo CXX/LD -o $@ $< 10 | @ mkdir -p ../../distribute/lib/ 11 | @ g++ -shared -o $@ $^ $(CXXFLAGS) \ 12 | -l:$(VISUALDL_LIB) #-L/home/s04/fyk/protobuf/lib -lprotobuf 13 | @ rm *.o 14 | cp $(VISUALDL_LIB) $(OUT_DIR) 15 | @ echo done 16 | %.o: %.cpp 17 | @ echo CXX/LD -o $@ $< 18 | @ g++ -c $< \ 19 | $(CXXFLAGS) \ 20 | -DUSE_VISUALDL \ 21 | -I ../../include \ 22 | -I /home/s04/fyk/VisualDL \ 23 | -I /home/s04/fyk/protobuf-3.1.0/include 24 | 25 | clean: 26 | rm *.o $(OUT_SO) 27 | -------------------------------------------------------------------------------- /src/logger/README.md: -------------------------------------------------------------------------------- 1 | 2 | logger class using VisualDL C++ SDK. 3 | steps to use: 4 | 5 | 1. generate proto headers of storage.proto in `VisualDL/visualdl/storage` directory 6 | `protoc storage.proto --cpp_out=.` 7 | 2. `cd $CAFFE_ROOT; make` 8 | 9 | 10 | -------------------------------------------------------------------------------- /src/modules/fpn/fpn_utils.hpp: -------------------------------------------------------------------------------- 1 | // ------------------------------------------------------------------ 2 | // FPN 3 | // Written by github.com/makefile 4 | // ------------------------------------------------------------------ 5 | #ifndef FPN_UTILS_HPP_ 6 | #define FPN_UTILS_HPP_ 7 | 8 | #include "caffe/FRCNN/util/frcnn_utils.hpp" 9 | #include "caffe/blob.hpp" 10 | using namespace caffe; 11 | using namespace caffe::Frcnn; 12 | // single scale version forked from generate_anchors.py 13 | //vector generate_anchors(int base_size=16, vector ratios={0.5, 1, 2}, int scale=8) { 14 | std::vector > generate_anchors(int base_size, const std::vector &ratios, const std::vector &scales); 15 | 16 | //calc pyramid level of rois 17 | template 18 | int calc_level(Point4f &box, int max_level) ; 19 | 20 | //put rois to different pyramid level top blob 21 | template 22 | void split_top_rois_by_level(const vector *> &top, int roi_blob_start_idx, std::vector > > &level_rois); 23 | 24 | #endif 25 | 26 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/collectionstack.h: -------------------------------------------------------------------------------- 1 | #ifndef COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | 13 | namespace YAML { 14 | struct CollectionType { 15 | enum value { NoCollection, BlockMap, BlockSeq, FlowMap, FlowSeq, CompactMap }; 16 | }; 17 | 18 | class CollectionStack { 19 | public: 20 | CollectionType::value GetCurCollectionType() const { 21 | if (collectionStack.empty()) 22 | return CollectionType::NoCollection; 23 | return collectionStack.top(); 24 | } 25 | 26 | void PushCollectionType(CollectionType::value type) { 27 | collectionStack.push(type); 28 | } 29 | void PopCollectionType(CollectionType::value type) { 30 | assert(type == GetCurCollectionType()); 31 | collectionStack.pop(); 32 | } 33 | 34 | private: 35 | std::stack collectionStack; 36 | }; 37 | } 38 | 39 | #endif // COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66 40 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/contrib/graphbuilder.cpp: -------------------------------------------------------------------------------- 1 | #include "graphbuilderadapter.h" 2 | 3 | #include "yaml-cpp/parser.h" // IWYU pragma: keep 4 | 5 | namespace YAML { 6 | class GraphBuilderInterface; 7 | 8 | void* BuildGraphOfNextDocument(Parser& parser, 9 | GraphBuilderInterface& graphBuilder) { 10 | GraphBuilderAdapter eventHandler(graphBuilder); 11 | if (parser.HandleNextDocument(eventHandler)) { 12 | return eventHandler.RootNode(); 13 | } else { 14 | return NULL; 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/directives.cpp: -------------------------------------------------------------------------------- 1 | #include "directives.h" 2 | 3 | namespace YAML { 4 | Directives::Directives() { 5 | // version 6 | version.isDefault = true; 7 | version.major = 1; 8 | version.minor = 2; 9 | } 10 | 11 | const std::string Directives::TranslateTagHandle( 12 | const std::string& handle) const { 13 | std::map::const_iterator it = tags.find(handle); 14 | if (it == tags.end()) { 15 | if (handle == "!!") 16 | return "tag:yaml.org,2002:"; 17 | return handle; 18 | } 19 | 20 | return it->second; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/directives.h: -------------------------------------------------------------------------------- 1 | #ifndef DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | 13 | namespace YAML { 14 | struct Version { 15 | bool isDefault; 16 | int major, minor; 17 | }; 18 | 19 | struct Directives { 20 | Directives(); 21 | 22 | const std::string TranslateTagHandle(const std::string& handle) const; 23 | 24 | Version version; 25 | std::map tags; 26 | }; 27 | } 28 | 29 | #endif // DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66 30 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/emit.cpp: -------------------------------------------------------------------------------- 1 | #include "yaml-cpp/node/emit.h" 2 | #include "yaml-cpp/emitfromevents.h" 3 | #include "yaml-cpp/emitter.h" 4 | #include "nodeevents.h" 5 | 6 | namespace YAML { 7 | Emitter& operator<<(Emitter& out, const Node& node) { 8 | EmitFromEvents emitFromEvents(out); 9 | NodeEvents events(node); 10 | events.Emit(emitFromEvents); 11 | return out; 12 | } 13 | 14 | std::ostream& operator<<(std::ostream& out, const Node& node) { 15 | Emitter emitter(out); 16 | emitter << node; 17 | return out; 18 | } 19 | 20 | std::string Dump(const Node& node) { 21 | Emitter emitter; 22 | emitter << node; 23 | return emitter.c_str(); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/indentation.h: -------------------------------------------------------------------------------- 1 | #ifndef INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include 12 | 13 | #include "yaml-cpp/ostream_wrapper.h" 14 | 15 | namespace YAML { 16 | struct Indentation { 17 | Indentation(std::size_t n_) : n(n_) {} 18 | std::size_t n; 19 | }; 20 | 21 | inline ostream_wrapper& operator<<(ostream_wrapper& out, 22 | const Indentation& indent) { 23 | for (std::size_t i = 0; i < indent.n; i++) 24 | out << ' '; 25 | return out; 26 | } 27 | 28 | struct IndentTo { 29 | IndentTo(std::size_t n_) : n(n_) {} 30 | std::size_t n; 31 | }; 32 | 33 | inline ostream_wrapper& operator<<(ostream_wrapper& out, 34 | const IndentTo& indent) { 35 | while (out.col() < indent.n) 36 | out << ' '; 37 | return out; 38 | } 39 | } 40 | 41 | #endif // INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66 42 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/memory.cpp: -------------------------------------------------------------------------------- 1 | #include "yaml-cpp/node/detail/memory.h" 2 | #include "yaml-cpp/node/detail/node.h" // IWYU pragma: keep 3 | #include "yaml-cpp/node/ptr.h" 4 | 5 | namespace YAML { 6 | namespace detail { 7 | 8 | void memory_holder::merge(memory_holder& rhs) { 9 | if (m_pMemory == rhs.m_pMemory) 10 | return; 11 | 12 | m_pMemory->merge(*rhs.m_pMemory); 13 | rhs.m_pMemory = m_pMemory; 14 | } 15 | 16 | node& memory::create_node() { 17 | shared_node pNode(new node); 18 | m_nodes.insert(pNode); 19 | return *pNode; 20 | } 21 | 22 | void memory::merge(const memory& rhs) { 23 | m_nodes.insert(rhs.m_nodes.begin(), rhs.m_nodes.end()); 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/node.cpp: -------------------------------------------------------------------------------- 1 | #include "yaml-cpp/node/node.h" 2 | #include "nodebuilder.h" 3 | #include "nodeevents.h" 4 | 5 | namespace YAML { 6 | Node Clone(const Node& node) { 7 | NodeEvents events(node); 8 | NodeBuilder builder; 9 | events.Emit(builder); 10 | return builder.Root(); 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/null.cpp: -------------------------------------------------------------------------------- 1 | #include "yaml-cpp/null.h" 2 | 3 | namespace YAML { 4 | _Null Null; 5 | } 6 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/regex_yaml.cpp: -------------------------------------------------------------------------------- 1 | #include "regex_yaml.h" 2 | 3 | namespace YAML { 4 | // constructors 5 | RegEx::RegEx() : m_op(REGEX_EMPTY) {} 6 | 7 | RegEx::RegEx(REGEX_OP op) : m_op(op) {} 8 | 9 | RegEx::RegEx(char ch) : m_op(REGEX_MATCH), m_a(ch) {} 10 | 11 | RegEx::RegEx(char a, char z) : m_op(REGEX_RANGE), m_a(a), m_z(z) {} 12 | 13 | RegEx::RegEx(const std::string& str, REGEX_OP op) : m_op(op) { 14 | for (std::size_t i = 0; i < str.size(); i++) 15 | m_params.push_back(RegEx(str[i])); 16 | } 17 | 18 | // combination constructors 19 | RegEx operator!(const RegEx& ex) { 20 | RegEx ret(REGEX_NOT); 21 | ret.m_params.push_back(ex); 22 | return ret; 23 | } 24 | 25 | RegEx operator||(const RegEx& ex1, const RegEx& ex2) { 26 | RegEx ret(REGEX_OR); 27 | ret.m_params.push_back(ex1); 28 | ret.m_params.push_back(ex2); 29 | return ret; 30 | } 31 | 32 | RegEx operator&&(const RegEx& ex1, const RegEx& ex2) { 33 | RegEx ret(REGEX_AND); 34 | ret.m_params.push_back(ex1); 35 | ret.m_params.push_back(ex2); 36 | return ret; 37 | } 38 | 39 | RegEx operator+(const RegEx& ex1, const RegEx& ex2) { 40 | RegEx ret(REGEX_SEQ); 41 | ret.m_params.push_back(ex1); 42 | ret.m_params.push_back(ex2); 43 | return ret; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/scantag.h: -------------------------------------------------------------------------------- 1 | #ifndef SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | #include "stream.h" 12 | 13 | namespace YAML { 14 | const std::string ScanVerbatimTag(Stream& INPUT); 15 | const std::string ScanTagHandle(Stream& INPUT, bool& canBeHandle); 16 | const std::string ScanTagSuffix(Stream& INPUT); 17 | } 18 | 19 | #endif // SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 20 | -------------------------------------------------------------------------------- /src/yaml-cpp-0.5.3/tag.h: -------------------------------------------------------------------------------- 1 | #ifndef TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 2 | #define TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 3 | 4 | #if defined(_MSC_VER) || \ 5 | (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \ 6 | (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4 7 | #pragma once 8 | #endif 9 | 10 | #include 11 | 12 | namespace YAML { 13 | struct Directives; 14 | struct Token; 15 | 16 | struct Tag { 17 | enum TYPE { 18 | VERBATIM, 19 | PRIMARY_HANDLE, 20 | SECONDARY_HANDLE, 21 | NAMED_HANDLE, 22 | NON_SPECIFIC 23 | }; 24 | 25 | Tag(const Token& token); 26 | const std::string Translate(const Directives& directives); 27 | 28 | TYPE type; 29 | std::string handle, value; 30 | }; 31 | } 32 | 33 | #endif // TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66 34 | -------------------------------------------------------------------------------- /tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /tools/encrypt_model.cpp: -------------------------------------------------------------------------------- 1 | #include "api/util/blowfish.hpp" 2 | #include 3 | #include 4 | #include 5 | 6 | void show_usage(char* name) { 7 | printf("Encrypt/Decrypt tool.\n" 8 | "Usage: %s [enc|dec] \n" 9 | " enc - encrypt the file\n" 10 | " dec - decrypt the file\n", name); 11 | } 12 | 13 | int main(int argc, char** argv) { 14 | //LOG(FATAL) << "Deprecated. Use caffe device_query " 15 | // "[--device_id=0] instead."; 16 | if (argc < 5) { 17 | show_usage(argv[0]); 18 | //exit(0); 19 | return 0; 20 | } 21 | 22 | //std::string key(argv[2]); 23 | //std::vector v_key(key.begin(), key.end()); 24 | std::vector v_key(argv[2], argv[2]+strlen(argv[2])); 25 | Blowfish bf(v_key); 26 | 27 | if (strncmp("enc", argv[1], 3)==0) { 28 | bf.Encrypt(argv[3], argv[4]); 29 | } else if (strncmp("dec", argv[1], 3)==0) { 30 | bf.Decrypt(argv[3], argv[4]); 31 | } else { 32 | show_usage(argv[0]); 33 | } 34 | return 0; 35 | } 36 | -------------------------------------------------------------------------------- /tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | --------------------------------------------------------------------------------