├── DSLT.m ├── OTB_dataset └── MotorRolling │ ├── groundtruth_rect.txt │ └── img │ ├── 0001.jpg │ ├── 0002.jpg │ ├── 0003.jpg │ ├── 0004.jpg │ ├── 0005.jpg │ ├── 0006.jpg │ ├── 0007.jpg │ ├── 0008.jpg │ ├── 0009.jpg │ ├── 0010.jpg │ ├── 0011.jpg │ ├── 0012.jpg │ ├── 0013.jpg │ ├── 0014.jpg │ ├── 0015.jpg │ ├── 0016.jpg │ ├── 0017.jpg │ ├── 0018.jpg │ ├── 0019.jpg │ ├── 0020.jpg │ ├── 0021.jpg │ ├── 0022.jpg │ ├── 0023.jpg │ ├── 0024.jpg │ ├── 0025.jpg │ ├── 0026.jpg │ ├── 0027.jpg │ ├── 0028.jpg │ ├── 0029.jpg │ ├── 0030.jpg │ ├── 0031.jpg │ ├── 0032.jpg │ ├── 0033.jpg │ ├── 0034.jpg │ ├── 0035.jpg │ ├── 0036.jpg │ ├── 0037.jpg │ ├── 0038.jpg │ ├── 0039.jpg │ ├── 0040.jpg │ ├── 0041.jpg │ ├── 0042.jpg │ ├── 0043.jpg │ ├── 0044.jpg │ ├── 0045.jpg │ ├── 0046.jpg │ ├── 0047.jpg │ ├── 0048.jpg │ ├── 0049.jpg │ ├── 0050.jpg │ ├── 0051.jpg │ ├── 0052.jpg │ ├── 0053.jpg │ ├── 0054.jpg │ ├── 0055.jpg │ ├── 0056.jpg │ ├── 0057.jpg │ ├── 0058.jpg │ ├── 0059.jpg │ ├── 0060.jpg │ ├── 0061.jpg │ ├── 0062.jpg │ ├── 0063.jpg │ ├── 0064.jpg │ ├── 0065.jpg │ ├── 0066.jpg │ ├── 0067.jpg │ ├── 0068.jpg │ ├── 0069.jpg │ ├── 0070.jpg │ ├── 0071.jpg │ ├── 0072.jpg │ ├── 0073.jpg │ ├── 0074.jpg │ ├── 0075.jpg │ ├── 0076.jpg │ ├── 0077.jpg │ ├── 0078.jpg │ ├── 0079.jpg │ ├── 0080.jpg │ ├── 0081.jpg │ ├── 0082.jpg │ ├── 0083.jpg │ ├── 0084.jpg │ ├── 0085.jpg │ ├── 0086.jpg │ ├── 0087.jpg │ ├── 0088.jpg │ ├── 0089.jpg │ ├── 0090.jpg │ ├── 0091.jpg │ ├── 0092.jpg │ ├── 0093.jpg │ ├── 0094.jpg │ ├── 0095.jpg │ ├── 0096.jpg │ ├── 0097.jpg │ ├── 0098.jpg │ ├── 0099.jpg │ ├── 0100.jpg │ ├── 0101.jpg │ ├── 0102.jpg │ ├── 0103.jpg │ ├── 0104.jpg │ ├── 0105.jpg │ ├── 0106.jpg │ ├── 0107.jpg │ ├── 0108.jpg │ ├── 0109.jpg │ ├── 0110.jpg │ ├── 0111.jpg │ ├── 0112.jpg │ ├── 0113.jpg │ ├── 0114.jpg │ ├── 0115.jpg │ ├── 0116.jpg │ ├── 0117.jpg │ ├── 0118.jpg │ ├── 0119.jpg │ ├── 0120.jpg │ ├── 0121.jpg │ ├── 0122.jpg │ ├── 0123.jpg │ ├── 0124.jpg │ ├── 0125.jpg │ ├── 0126.jpg │ ├── 0127.jpg │ ├── 0128.jpg │ ├── 0129.jpg │ ├── 0130.jpg │ ├── 0131.jpg │ ├── 0132.jpg │ ├── 0133.jpg │ ├── 0134.jpg │ ├── 0135.jpg │ ├── 0136.jpg │ ├── 0137.jpg │ ├── 0138.jpg │ ├── 0139.jpg │ ├── 0140.jpg │ ├── 0141.jpg │ ├── 0142.jpg │ ├── 0143.jpg │ ├── 0144.jpg │ ├── 0145.jpg │ ├── 0146.jpg │ ├── 0147.jpg │ ├── 0148.jpg │ ├── 0149.jpg │ ├── 0150.jpg │ ├── 0151.jpg │ ├── 0152.jpg │ ├── 0153.jpg │ ├── 0154.jpg │ ├── 0155.jpg │ ├── 0156.jpg │ ├── 0157.jpg │ ├── 0158.jpg │ ├── 0159.jpg │ ├── 0160.jpg │ ├── 0161.jpg │ ├── 0162.jpg │ ├── 0163.jpg │ └── 0164.jpg ├── README.md ├── caffe-dslt ├── CMakeLists.txt ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config ├── Makefile.config.example ├── README.md ├── caffe.cloc ├── cmake │ ├── ConfigGen.cmake │ ├── Cuda.cmake │ ├── Dependencies.cmake │ ├── External │ │ ├── gflags.cmake │ │ └── glog.cmake │ ├── Misc.cmake │ ├── Modules │ │ ├── FindAtlas.cmake │ │ ├── FindGFlags.cmake │ │ ├── FindGlog.cmake │ │ ├── FindLAPACK.cmake │ │ ├── FindLMDB.cmake │ │ ├── FindLevelDB.cmake │ │ ├── FindMKL.cmake │ │ ├── FindMatlabMex.cmake │ │ ├── FindNumPy.cmake │ │ ├── FindOpenBLAS.cmake │ │ ├── FindSnappy.cmake │ │ └── FindvecLib.cmake │ ├── ProtoBuf.cmake │ ├── Summary.cmake │ ├── Targets.cmake │ ├── Templates │ │ ├── CaffeConfig.cmake.in │ │ ├── CaffeConfigVersion.cmake.in │ │ └── caffe_config.h.in │ ├── Utils.cmake │ └── lint.cmake ├── docs │ ├── CMakeLists.txt │ ├── CNAME │ ├── README.md │ ├── _config.yml │ ├── _layouts │ │ └── default.html │ ├── development.md │ ├── images │ │ ├── GitHub-Mark-64px.png │ │ └── caffeine-icon.png │ ├── index.md │ ├── install_apt.md │ ├── install_osx.md │ ├── install_yum.md │ ├── installation.md │ ├── model_zoo.md │ ├── multigpu.md │ ├── performance_hardware.md │ ├── stylesheets │ │ ├── pygment_trac.css │ │ ├── reset.css │ │ └── styles.css │ └── tutorial │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig │ │ ├── .gitignore │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ ├── forward_backward.png │ │ └── layer.jpg │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md ├── examples │ ├── 00-classification.ipynb │ ├── 01-learning-lenet.ipynb │ ├── 02-brewing-logreg.ipynb │ ├── 03-fine-tuning.ipynb │ ├── CMakeLists.txt │ ├── ResNet │ │ ├── ResNet-50-deploy.prototxt │ │ ├── ResNet.prototxt │ │ ├── ResNet_mean.npy │ │ ├── bias_init.txt │ │ ├── stn_solver_res.prototxt │ │ └── stn_train_res.sh │ ├── cifar10 │ │ ├── cifar10_full.prototxt │ │ ├── cifar10_full_sigmoid_solver.prototxt │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt │ │ ├── cifar10_full_sigmoid_train_test.prototxt │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt │ │ ├── cifar10_full_solver.prototxt │ │ ├── cifar10_full_solver_lr1.prototxt │ │ ├── cifar10_full_solver_lr2.prototxt │ │ ├── cifar10_full_train_test.prototxt │ │ ├── cifar10_quick.prototxt │ │ ├── cifar10_quick_solver.prototxt │ │ ├── cifar10_quick_solver_lr1.prototxt │ │ ├── cifar10_quick_train_test.prototxt │ │ ├── convert_cifar_data.cpp │ │ ├── create_cifar10.sh │ │ ├── readme.md │ │ ├── train_full.sh │ │ ├── train_full_sigmoid.sh │ │ ├── train_full_sigmoid_bn.sh │ │ └── train_quick.sh │ ├── cpp_classification │ │ ├── classification.cpp │ │ └── readme.md │ ├── deconv_tracking.tar.gz │ ├── deconv_tracking │ │ ├── adjust.prototxt │ │ ├── adjust_solver.prototxt │ │ ├── de-conv4.prototxt │ │ ├── de-conv_solver.prototxt │ │ ├── de-conv_solver1prototxt │ │ ├── de-conv_solver2.prototxt │ │ ├── deconv_deploy.prototxt │ │ ├── deconv_train_test.prototxt │ │ ├── deconv_train_test1.prototxt │ │ ├── deconv_train_test2.prototxt │ │ ├── deconv_train_test3.prototxt │ │ ├── deconv_train_test4.prototxt │ │ ├── deconv_train_test5.prototxt │ │ ├── deconv_train_test6.prototxt │ │ ├── deconv_train_test7.prototxt │ │ ├── deconv_train_test8.prototxt │ │ ├── loss.prototxt │ │ ├── loss_solver.prototxt │ │ ├── train_test.prototxt │ │ ├── train_test1.prototxt │ │ └── train_test2.prototxt │ ├── detection.ipynb │ ├── feature_extraction │ │ ├── imagenet_val.prototxt │ │ └── readme.md │ ├── finetune_flickr_style │ │ ├── assemble_data.py │ │ ├── flickr_style.csv.gz │ │ ├── readme.md │ │ └── style_names.txt │ ├── finetune_pascal_detection │ │ ├── pascal_finetune_solver.prototxt │ │ └── pascal_finetune_trainval_test.prototxt │ ├── hdf5_classification │ │ ├── nonlinear_auto_test.prototxt │ │ ├── nonlinear_auto_train.prototxt │ │ ├── nonlinear_solver.prototxt │ │ ├── nonlinear_train_val.prototxt │ │ ├── solver.prototxt │ │ └── train_val.prototxt │ ├── imagenet │ │ ├── create_imagenet.sh │ │ ├── make_imagenet_mean.sh │ │ ├── readme.md │ │ ├── resume_training.sh │ │ └── train_caffenet.sh │ ├── images │ │ ├── cat.jpg │ │ ├── cat_gray.jpg │ │ └── fish-bike.jpg │ ├── mnist │ │ ├── convert_mnist_data.cpp │ │ ├── create_mnist.sh │ │ ├── lenet.prototxt │ │ ├── lenet_adadelta_solver.prototxt │ │ ├── lenet_auto_solver.prototxt │ │ ├── lenet_consolidated_solver.prototxt │ │ ├── lenet_multistep_solver.prototxt │ │ ├── lenet_solver.prototxt │ │ ├── lenet_solver_adam.prototxt │ │ ├── lenet_solver_rmsprop.prototxt │ │ ├── lenet_train_test.prototxt │ │ ├── mnist_autoencoder.prototxt │ │ ├── mnist_autoencoder_solver.prototxt │ │ ├── mnist_autoencoder_solver_adadelta.prototxt │ │ ├── mnist_autoencoder_solver_adagrad.prototxt │ │ ├── mnist_autoencoder_solver_nesterov.prototxt │ │ ├── readme.md │ │ ├── train_lenet.sh │ │ ├── train_lenet_adam.sh │ │ ├── train_lenet_consolidated.sh │ │ ├── train_lenet_rmsprop.sh │ │ ├── train_mnist_autoencoder.sh │ │ ├── train_mnist_autoencoder_adadelta.sh │ │ ├── train_mnist_autoencoder_adagrad.sh │ │ └── train_mnist_autoencoder_nesterov.sh │ ├── net_surgery.ipynb │ ├── net_surgery │ │ ├── bvlc_caffenet_full_conv.prototxt │ │ └── conv.prototxt │ ├── pycaffe │ │ ├── caffenet.py │ │ ├── layers │ │ │ └── pyloss.py │ │ └── linreg.prototxt │ ├── siamese │ │ ├── convert_mnist_siamese_data.cpp │ │ ├── create_mnist_siamese.sh │ │ ├── mnist_siamese.ipynb │ │ ├── mnist_siamese.prototxt │ │ ├── mnist_siamese_solver.prototxt │ │ ├── mnist_siamese_train_test.prototxt │ │ ├── readme.md │ │ └── train_mnist_siamese.sh │ ├── test.py │ └── web_demo │ │ ├── app.py │ │ ├── exifutil.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ └── templates │ │ └── index.html ├── include │ └── caffe │ │ ├── blob.hpp │ │ ├── caffe.hpp │ │ ├── common.hpp │ │ ├── common_layers.hpp │ │ ├── data_layers.hpp │ │ ├── data_reader.hpp │ │ ├── data_transformer.hpp │ │ ├── filler.hpp │ │ ├── gpu_util.cuh │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── layer_factory.hpp │ │ ├── layers │ │ ├── absval_layer.hpp │ │ ├── accuracy_layer.hpp │ │ ├── argmax_layer.hpp │ │ ├── base_conv_layer.hpp │ │ ├── base_data_layer.hpp │ │ ├── batch_norm_layer.hpp │ │ ├── batch_reindex_layer.hpp │ │ ├── bias_layer.hpp │ │ ├── bnll_layer.hpp │ │ ├── concat_layer.hpp │ │ ├── contrastive_loss_layer.hpp │ │ ├── conv_layer.hpp │ │ ├── crop_layer.hpp │ │ ├── cudnn_conv_layer.hpp │ │ ├── cudnn_lcn_layer.hpp │ │ ├── cudnn_lrn_layer.hpp │ │ ├── cudnn_pooling_layer.hpp │ │ ├── cudnn_relu_layer.hpp │ │ ├── cudnn_sigmoid_layer.hpp │ │ ├── cudnn_softmax_layer.hpp │ │ ├── cudnn_tanh_layer.hpp │ │ ├── data_layer.hpp │ │ ├── deconv_layer.hpp │ │ ├── dropout_layer.hpp │ │ ├── dummy_data_layer.hpp │ │ ├── eltwise_layer.hpp │ │ ├── elu_layer.hpp │ │ ├── embed_layer.hpp │ │ ├── euclidean_loss_layer.hpp │ │ ├── exp_layer.hpp │ │ ├── filter_layer.hpp │ │ ├── flatten_layer.hpp │ │ ├── focal_loss_layer.hpp │ │ ├── hdf5_data_layer.hpp │ │ ├── hdf5_output_layer.hpp │ │ ├── hinge_loss_layer.hpp │ │ ├── im2col_layer.hpp │ │ ├── image_data_layer.hpp │ │ ├── infogain_loss_layer.hpp │ │ ├── inner_product_layer.hpp │ │ ├── loss_layer.hpp │ │ ├── lrn_layer.hpp │ │ ├── memory_data_layer.hpp │ │ ├── multinomial_logistic_loss_layer.hpp │ │ ├── mvn_layer.hpp │ │ ├── neuron_layer.hpp │ │ ├── normalize_layer.hpp │ │ ├── pooling_layer.hpp │ │ ├── power_layer.hpp │ │ ├── prelu_layer.hpp │ │ ├── python_layer.hpp │ │ ├── reduction_layer.hpp │ │ ├── relu_layer.hpp │ │ ├── reshape_layer.hpp │ │ ├── scale_layer.hpp │ │ ├── sigmoid_cross_entropy_loss_layer.hpp │ │ ├── sigmoid_layer.hpp │ │ ├── silence_layer.hpp │ │ ├── slice_layer.hpp │ │ ├── softmax_layer.hpp │ │ ├── softmax_loss_layer.hpp │ │ ├── split_layer.hpp │ │ ├── spp_layer.hpp │ │ ├── tanh_layer.hpp │ │ ├── threshold_layer.hpp │ │ ├── tile_layer.hpp │ │ ├── unpooling_layer.hpp │ │ └── window_data_layer.hpp │ │ ├── loc_loss_layer.hpp │ │ ├── loss_layers.hpp │ │ ├── net.hpp │ │ ├── neuron_layers.hpp │ │ ├── parallel.hpp │ │ ├── power_file_layer.hpp │ │ ├── proto │ │ └── caffe.pb.h │ │ ├── sgd_solvers.hpp │ │ ├── solver.hpp │ │ ├── solver_factory.hpp │ │ ├── st_layer.hpp │ │ ├── st_loss_layer.hpp │ │ ├── syncedmem.hpp │ │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ │ ├── util │ │ ├── benchmark.hpp │ │ ├── blocking_queue.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── db_leveldb.hpp │ │ ├── db_lmdb.hpp │ │ ├── device_alternate.hpp │ │ ├── format.hpp │ │ ├── gpu_util.cuh │ │ ├── hdf5.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── rng.hpp │ │ ├── signal_handler.h │ │ └── upgrade_proto.hpp │ │ └── vision_layers.hpp ├── matlab │ ├── +caffe │ │ ├── +test │ │ │ ├── test_io.m │ │ │ ├── test_net.m │ │ │ └── test_solver.m │ │ ├── Blob.m │ │ ├── Layer.m │ │ ├── Net.m │ │ ├── Solver.m │ │ ├── get_net.m │ │ ├── get_solver.m │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.mat │ │ ├── io.m │ │ ├── private │ │ │ ├── CHECK.m │ │ │ ├── CHECK_FILE_EXIST.m │ │ │ ├── caffe_.cpp │ │ │ └── is_valid_handle.m │ │ ├── reset_all.m │ │ ├── run_tests.m │ │ ├── set_device.m │ │ ├── set_mode_cpu.m │ │ ├── set_mode_gpu.m │ │ └── version.m │ ├── CMakeLists.txt │ ├── demo │ │ └── classification_demo.m │ └── hdf5creation │ │ ├── .gitignore │ │ ├── demo.m │ │ └── store2hdf5.m ├── python │ ├── CMakeLists.txt │ ├── caffe │ │ ├── __init__.py │ │ ├── _caffe.cpp │ │ ├── classifier.py │ │ ├── detector.py │ │ ├── draw.py │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.npy │ │ ├── io.py │ │ ├── net_spec.py │ │ ├── pycaffe.py │ │ └── test │ │ │ ├── test_io.py │ │ │ ├── test_layer_type_list.py │ │ │ ├── test_net.py │ │ │ ├── test_net_spec.py │ │ │ ├── test_python_layer.py │ │ │ ├── test_python_layer_with_param_str.py │ │ │ └── test_solver.py │ ├── classify.py │ ├── detect.py │ ├── draw_net.py │ └── requirements.txt ├── scripts │ ├── build_docs.sh │ ├── copy_notebook.py │ ├── cpp_lint.py │ ├── deploy_docs.sh │ ├── download_model_binary.py │ ├── download_model_from_gist.sh │ ├── gather_examples.sh │ ├── travis │ │ ├── travis_build_and_test.sh │ │ ├── travis_install.sh │ │ └── travis_setup_makefile_config.sh │ └── upload_model_to_gist.sh ├── src │ ├── caffe │ │ ├── CMakeLists.txt │ │ ├── blob.cpp │ │ ├── common.cpp │ │ ├── data_reader.cpp │ │ ├── data_transformer.cpp │ │ ├── internal_thread.cpp │ │ ├── layer.cpp │ │ ├── layer_factory.cpp │ │ ├── layers │ │ │ ├── absval_layer.cpp │ │ │ ├── absval_layer.cu │ │ │ ├── accuracy_layer.cpp │ │ │ ├── argmax_layer.cpp │ │ │ ├── base_conv_layer.cpp │ │ │ ├── base_data_layer.cpp │ │ │ ├── base_data_layer.cu │ │ │ ├── batch_norm_layer.cpp │ │ │ ├── batch_norm_layer.cu │ │ │ ├── batch_reindex_layer.cpp │ │ │ ├── batch_reindex_layer.cu │ │ │ ├── bias_layer.cpp │ │ │ ├── bias_layer.cu │ │ │ ├── bn_layer.cpp │ │ │ ├── bn_layer.cu │ │ │ ├── bnll_layer.cpp │ │ │ ├── bnll_layer.cu │ │ │ ├── concat_layer.cpp │ │ │ ├── concat_layer.cu │ │ │ ├── contrastive_loss_layer.cpp │ │ │ ├── contrastive_loss_layer.cu │ │ │ ├── conv_layer.cpp │ │ │ ├── conv_layer.cu │ │ │ ├── correlation_layer.cpp │ │ │ ├── correlation_layer.cu │ │ │ ├── crop_layer.cpp │ │ │ ├── crop_layer.cu │ │ │ ├── cudnn_conv_layer.cpp │ │ │ ├── cudnn_conv_layer.cu │ │ │ ├── cudnn_lcn_layer.cpp │ │ │ ├── cudnn_lcn_layer.cu │ │ │ ├── cudnn_lrn_layer.cpp │ │ │ ├── cudnn_lrn_layer.cu │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ ├── cudnn_pooling_layer.cu │ │ │ ├── cudnn_relu_layer.cpp │ │ │ ├── cudnn_relu_layer.cu │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ ├── cudnn_softmax_layer.cu │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ ├── cudnn_tanh_layer.cu │ │ │ ├── data_layer.cpp │ │ │ ├── deconv_layer.cpp │ │ │ ├── deconv_layer.cu │ │ │ ├── dropout_layer.cpp │ │ │ ├── dropout_layer.cu │ │ │ ├── dummy_data_layer.cpp │ │ │ ├── eltwise_layer.cpp │ │ │ ├── eltwise_layer.cu │ │ │ ├── elu_layer.cpp │ │ │ ├── elu_layer.cu │ │ │ ├── embed_layer.cpp │ │ │ ├── embed_layer.cu │ │ │ ├── euclidean_loss_layer.cpp │ │ │ ├── euclidean_loss_layer.cu │ │ │ ├── exp_layer.cpp │ │ │ ├── exp_layer.cu │ │ │ ├── filter_layer.cpp │ │ │ ├── filter_layer.cu │ │ │ ├── flatten_layer.cpp │ │ │ ├── focal_loss_layer.cpp │ │ │ ├── focal_loss_layer.cu │ │ │ ├── hdf5_data_layer.cpp │ │ │ ├── hdf5_data_layer.cu │ │ │ ├── hdf5_output_layer.cpp │ │ │ ├── hdf5_output_layer.cu │ │ │ ├── hinge_loss_layer.cpp │ │ │ ├── im2col_layer.cpp │ │ │ ├── im2col_layer.cu │ │ │ ├── image_data_layer.cpp │ │ │ ├── infogain_loss_layer.cpp │ │ │ ├── inner_product_layer.cpp │ │ │ ├── inner_product_layer.cu │ │ │ ├── loc_loss_layer.cpp │ │ │ ├── loc_loss_layer.cu │ │ │ ├── loss_layer.cpp │ │ │ ├── lrn_layer.cpp │ │ │ ├── lrn_layer.cu │ │ │ ├── memory_data_layer.cpp │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ ├── mvn_layer.cpp │ │ │ ├── mvn_layer.cu │ │ │ ├── neuron_layer.cpp │ │ │ ├── normalization_layer.cpp │ │ │ ├── normalization_layer.cu │ │ │ ├── normalize_layer.cpp │ │ │ ├── normalize_layer.cu │ │ │ ├── pooling_layer.cpp │ │ │ ├── pooling_layer.cu │ │ │ ├── power_file_layer.cpp │ │ │ ├── power_file_layer.cu │ │ │ ├── power_layer.cpp │ │ │ ├── power_layer.cu │ │ │ ├── prelu_layer.cpp │ │ │ ├── prelu_layer.cu │ │ │ ├── reduction_layer.cpp │ │ │ ├── reduction_layer.cu │ │ │ ├── relu_layer.cpp │ │ │ ├── relu_layer.cu │ │ │ ├── reshape_layer.cpp │ │ │ ├── scale_layer.cpp │ │ │ ├── scale_layer.cu │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ │ ├── sigmoid_layer.cpp │ │ │ ├── sigmoid_layer.cu │ │ │ ├── silence_layer.cpp │ │ │ ├── silence_layer.cu │ │ │ ├── slice_layer.cpp │ │ │ ├── slice_layer.cu │ │ │ ├── softmax_layer.cpp │ │ │ ├── softmax_layer.cu │ │ │ ├── softmax_loss_layer.cpp │ │ │ ├── softmax_loss_layer.cu │ │ │ ├── split_layer.cpp │ │ │ ├── split_layer.cu │ │ │ ├── spp_layer.cpp │ │ │ ├── st_layer.cpp │ │ │ ├── st_layer.cu │ │ │ ├── st_loss_layer.cpp │ │ │ ├── st_loss_layer.cu │ │ │ ├── tanh_layer.cpp │ │ │ ├── tanh_layer.cu │ │ │ ├── threshold_layer.cpp │ │ │ ├── threshold_layer.cu │ │ │ ├── tile_layer.cpp │ │ │ ├── tile_layer.cu │ │ │ ├── unpooling_layer.cpp │ │ │ ├── unpooling_layer.cu │ │ │ └── window_data_layer.cpp │ │ ├── net.cpp │ │ ├── parallel.cpp │ │ ├── proto │ │ │ ├── caffe.pb.cc │ │ │ └── caffe.proto │ │ ├── solver.cpp │ │ ├── solvers │ │ │ ├── adadelta_solver.cpp │ │ │ ├── adadelta_solver.cu │ │ │ ├── adagrad_solver.cpp │ │ │ ├── adagrad_solver.cu │ │ │ ├── adam_solver.cpp │ │ │ ├── adam_solver.cu │ │ │ ├── nesterov_solver.cpp │ │ │ ├── nesterov_solver.cu │ │ │ ├── rmsprop_solver.cpp │ │ │ ├── rmsprop_solver.cu │ │ │ ├── sgd_solver.cpp │ │ │ └── sgd_solver.cu │ │ ├── syncedmem.cpp │ │ ├── test │ │ │ ├── CMakeLists.txt │ │ │ ├── test_accuracy_layer.cpp │ │ │ ├── test_argmax_layer.cpp │ │ │ ├── test_batch_norm_layer.cpp │ │ │ ├── test_batch_reindex_layer.cpp │ │ │ ├── test_benchmark.cpp │ │ │ ├── test_bias_layer.cpp │ │ │ ├── test_blob.cpp │ │ │ ├── test_caffe_main.cpp │ │ │ ├── test_common.cpp │ │ │ ├── test_concat_layer.cpp │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ ├── test_convolution_layer.cpp │ │ │ ├── test_data │ │ │ │ ├── generate_sample_data.py │ │ │ │ ├── sample_data.h5 │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ ├── sample_data_list.txt │ │ │ │ ├── solver_data.h5 │ │ │ │ └── solver_data_list.txt │ │ │ ├── test_data_layer.cpp │ │ │ ├── test_data_transformer.cpp │ │ │ ├── test_db.cpp │ │ │ ├── test_deconvolution_layer.cpp │ │ │ ├── test_dummy_data_layer.cpp │ │ │ ├── test_eltwise_layer.cpp │ │ │ ├── test_embed_layer.cpp │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ ├── test_filler.cpp │ │ │ ├── test_filter_layer.cpp │ │ │ ├── test_flatten_layer.cpp │ │ │ ├── test_gradient_based_solver.cpp │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ ├── test_hdf5data_layer.cpp │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ ├── test_im2col_kernel.cu │ │ │ ├── test_im2col_layer.cpp │ │ │ ├── test_image_data_layer.cpp │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ ├── test_inner_product_layer.cpp │ │ │ ├── test_internal_thread.cpp │ │ │ ├── test_io.cpp │ │ │ ├── test_layer_factory.cpp │ │ │ ├── test_lrn_layer.cpp │ │ │ ├── test_math_functions.cpp │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ ├── test_memory_data_layer.cpp │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ ├── test_mvn_layer.cpp │ │ │ ├── test_net.cpp │ │ │ ├── test_neuron_layer.cpp │ │ │ ├── test_platform.cpp │ │ │ ├── test_pooling_layer.cpp │ │ │ ├── test_power_layer.cpp │ │ │ ├── test_protobuf.cpp │ │ │ ├── test_random_number_generator.cpp │ │ │ ├── test_reduction_layer.cpp │ │ │ ├── test_reshape_layer.cpp │ │ │ ├── test_scale_layer.cpp │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── test_slice_layer.cpp │ │ │ ├── test_softmax_layer.cpp │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ ├── test_solver.cpp │ │ │ ├── test_solver_factory.cpp │ │ │ ├── test_split_layer.cpp │ │ │ ├── test_spp_layer.cpp │ │ │ ├── test_stochastic_pooling.cpp │ │ │ ├── test_syncedmem.cpp │ │ │ ├── test_tanh_layer.cpp │ │ │ ├── test_threshold_layer.cpp │ │ │ ├── test_tile_layer.cpp │ │ │ ├── test_upgrade_proto.cpp │ │ │ └── test_util_blas.cpp │ │ └── util │ │ │ ├── benchmark.cpp │ │ │ ├── blocking_queue.cpp │ │ │ ├── cudnn.cpp │ │ │ ├── db.cpp │ │ │ ├── db_leveldb.cpp │ │ │ ├── db_lmdb.cpp │ │ │ ├── hdf5.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_splits.cpp │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ ├── signal_handler.cpp │ │ │ └── upgrade_proto.cpp │ └── gtest │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc └── tools │ ├── CMakeLists.txt │ ├── caffe.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── device_query.cpp │ ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.py │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py.example │ ├── resize_and_crop_images.py │ └── summarize.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── net_speed_benchmark.cpp │ ├── test_net.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ ├── upgrade_net_proto_text.cpp │ └── upgrade_solver_proto_text.cpp ├── framework.png ├── model ├── finetune_net.caffemodel └── new_vgg_net.caffemodel ├── prototxt ├── fea_net.prototxt ├── fea_net1.prototxt ├── initial_feanet.m ├── otb_fea_net.prototxt ├── refer_im_fea_net.prototxt ├── vgg_se_res_layer.prototxt ├── vgg_se_res_layer_test.prototxt ├── vgg_se_res_solver.prototxt ├── vgg_se_res_solver1.prototxt └── vot_fea_net.prototxt ├── run_DSLT.m ├── util ├── GetMap.m ├── choose_video.m ├── cleanupFun.m ├── collectData.m ├── compute_performance_measures.m ├── crop_bg.m ├── ext_roi.m ├── feature_norm.m ├── focal_loss.m ├── gaussian_test.m ├── generate_data_list.m ├── get_crops.m ├── get_scale_im.m ├── get_scale_im1.m ├── get_scale_sample.m ├── get_scale_sample1.m ├── get_scale_sample3.m ├── get_subwindow.m ├── get_subwindow_avg.m ├── get_subwindow_avg1.m ├── impreprocess.m ├── impreprocess1.m ├── init_scale_estimator.m ├── load_video_info.m ├── load_vot_video_info.m ├── localMaximum.m ├── loss_object_grad.m ├── loss_object_grad_focal.m ├── myGetMap.m ├── myGetMap1.m ├── my_data_augmentation.m ├── mygaussian_shaped_labels.m ├── mynewGetMap.m ├── mypca.m ├── mytrain.m ├── plot_focal_loss.m ├── precision_plot.m ├── pretrain_net.m ├── revise_bilinear_layer.m ├── revise_dense_fpn.m ├── revise_fea_prototxt.m ├── revise_fpn.m ├── revise_layer1.m ├── revise_layer2.m ├── revise_prototxt1.m ├── revise_prototxt2.m ├── revise_prototxt3.m ├── revise_se_res_layer.m ├── revise_se_res_layer_test.m ├── revise_three_fpn.m ├── revise_two_layer.m ├── revise_vot_fpn.m ├── set_tracker_param.m └── stntracking_new.m └── vot.m /OTB_dataset/MotorRolling/img/0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0001.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0002.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0003.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0004.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0005.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0006.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0007.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0008.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0009.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0010.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0011.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0011.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0012.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0012.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0013.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0013.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0014.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0014.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0015.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0015.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0016.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0016.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0017.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0017.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0018.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0018.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0019.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0019.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0020.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0020.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0021.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0021.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0022.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0022.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0023.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0023.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0024.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0024.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0025.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0025.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0026.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0026.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0027.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0027.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0028.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0028.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0029.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0029.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0030.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0030.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0031.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0031.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0032.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0032.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0033.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0033.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0034.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0034.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0035.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0035.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0036.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0036.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0037.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0037.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0038.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0038.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0039.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0039.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0040.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0040.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0041.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0041.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0042.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0042.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0043.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0043.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0044.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0044.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0045.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0045.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0046.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0046.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0047.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0047.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0048.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0048.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0049.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0049.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0050.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0050.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0051.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0051.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0052.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0052.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0053.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0053.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0054.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0054.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0055.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0055.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0056.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0056.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0057.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0057.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0058.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0058.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0059.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0059.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0060.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0060.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0061.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0061.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0062.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0062.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0063.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0063.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0064.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0064.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0065.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0065.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0066.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0066.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0067.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0067.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0068.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0068.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0069.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0069.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0070.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0070.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0071.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0071.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0072.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0072.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0073.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0073.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0074.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0074.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0075.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0075.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0076.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0076.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0077.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0077.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0078.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0078.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0079.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0079.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0080.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0080.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0081.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0081.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0082.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0082.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0083.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0083.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0084.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0084.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0085.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0085.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0086.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0086.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0087.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0087.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0088.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0088.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0089.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0089.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0090.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0090.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0091.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0091.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0092.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0092.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0093.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0093.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0094.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0094.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0095.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0095.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0096.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0096.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0097.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0097.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0098.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0098.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0099.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0099.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0100.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0101.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0102.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0102.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0103.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0103.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0104.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0104.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0105.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0105.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0106.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0106.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0107.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0107.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0108.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0109.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0109.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0110.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0110.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0111.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0111.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0112.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0112.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0113.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0113.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0114.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0114.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0115.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0115.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0116.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0116.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0117.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0117.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0118.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0118.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0119.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0119.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0120.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0121.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0121.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0122.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0122.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0123.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0123.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0124.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0124.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0125.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0125.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0126.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0126.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0127.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0127.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0128.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0128.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0129.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0129.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0130.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0130.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0131.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0131.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0132.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0132.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0133.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0133.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0134.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0134.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0135.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0135.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0136.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0136.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0137.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0137.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0138.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0138.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0139.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0139.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0140.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0140.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0141.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0141.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0142.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0142.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0143.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0143.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0144.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0144.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0145.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0145.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0146.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0146.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0147.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0147.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0148.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0148.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0149.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0150.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0150.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0151.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0151.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0152.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0152.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0153.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0153.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0154.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0154.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0155.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0155.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0156.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0156.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0157.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0157.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0158.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0158.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0159.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0159.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0160.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0160.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0161.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0161.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0162.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0162.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0163.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0163.jpg -------------------------------------------------------------------------------- /OTB_dataset/MotorRolling/img/0164.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/OTB_dataset/MotorRolling/img/0164.jpg -------------------------------------------------------------------------------- /caffe-dslt/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /caffe-dslt/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the users group in case you need help: 7 | https://groups.google.com/forum/#!forum/caffe-users 8 | -------------------------------------------------------------------------------- /caffe-dslt/caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /caffe-dslt/cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /caffe-dslt/cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /caffe-dslt/cmake/Modules/FindvecLib.cmake: -------------------------------------------------------------------------------- 1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework 2 | # 3 | # The following are set after configuration is done: 4 | # VECLIB_FOUND 5 | # vecLib_INCLUDE_DIR 6 | # vecLib_LINKER_LIBS 7 | 8 | 9 | if(NOT APPLE) 10 | return() 11 | endif() 12 | 13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") 14 | 15 | find_path(vecLib_INCLUDE_DIR vecLib.h 16 | DOC "vecLib include directory" 17 | PATHS /System/Library/${__veclib_include_suffix} 18 | /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} 19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) 20 | 21 | include(FindPackageHandleStandardArgs) 22 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) 23 | 24 | if(VECLIB_FOUND) 25 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") 26 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") 27 | message(STATUS "Found standalone vecLib.framework") 28 | else() 29 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") 30 | message(STATUS "Found vecLib as part of Accelerate.framework") 31 | endif() 32 | 33 | mark_as_advanced(vecLib_INCLUDE_DIR) 34 | endif() 35 | -------------------------------------------------------------------------------- /caffe-dslt/cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /caffe-dslt/cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* NVIDA Cuda */ 8 | #cmakedefine HAVE_CUDA 9 | 10 | /* NVIDA cuDNN */ 11 | #cmakedefine HAVE_CUDNN 12 | #cmakedefine USE_CUDNN 13 | 14 | /* NVIDA cuDNN */ 15 | #cmakedefine CPU_ONLY 16 | 17 | /* Test device */ 18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 19 | 20 | /* Temporary (TODO: remove) */ 21 | #if 1 22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 24 | #define CMAKE_EXT ".gen.cmake" 25 | #else 26 | #define CMAKE_SOURCE_DIR "src/" 27 | #define EXAMPLES_SOURCE_DIR "examples/" 28 | #define CMAKE_EXT "" 29 | #endif 30 | 31 | /* Matlab */ 32 | #cmakedefine HAVE_MATLAB 33 | 34 | /* IO libraries */ 35 | #cmakedefine USE_OPENCV 36 | #cmakedefine USE_LEVELDB 37 | #cmakedefine USE_LMDB 38 | #cmakedefine ALLOW_LMDB_NOLOCK 39 | -------------------------------------------------------------------------------- /caffe-dslt/docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /caffe-dslt/docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /caffe-dslt/docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /caffe-dslt/docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /caffe-dslt/docs/images/caffeine-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/images/caffeine-icon.png -------------------------------------------------------------------------------- /caffe-dslt/docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/tutorial/fig/.gitignore -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /caffe-dslt/docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /caffe-dslt/examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB_RECURSE examples_srcs "${PROJECT_SOURCE_DIR}/examples/*.cpp") 2 | 3 | foreach(source_file ${examples_srcs}) 4 | # get file name 5 | get_filename_component(name ${source_file} NAME_WE) 6 | 7 | # get folder name 8 | get_filename_component(path ${source_file} PATH) 9 | get_filename_component(folder ${path} NAME_WE) 10 | 11 | add_executable(${name} ${source_file}) 12 | target_link_libraries(${name} ${Caffe_LINK}) 13 | caffe_default_properties(${name}) 14 | 15 | # set back RUNTIME_OUTPUT_DIRECTORY 16 | set_target_properties(${name} PROPERTIES 17 | RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/examples/${folder}") 18 | 19 | caffe_set_solution_folder(${name} examples) 20 | 21 | # install 22 | install(TARGETS ${name} DESTINATION bin) 23 | 24 | if(UNIX OR APPLE) 25 | # Funny command to make tutorials work 26 | # TODO: remove in future as soon as naming is standartaized everywhere 27 | set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${Caffe_POSTFIX}) 28 | add_custom_command(TARGET ${name} POST_BUILD 29 | COMMAND ln -sf "${__outname}" "${__outname}.bin") 30 | endif() 31 | endforeach() 32 | -------------------------------------------------------------------------------- /caffe-dslt/examples/ResNet/ResNet_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/ResNet/ResNet_mean.npy -------------------------------------------------------------------------------- /caffe-dslt/examples/ResNet/bias_init.txt: -------------------------------------------------------------------------------- 1 | 1 0 0 0 1 0 2 | -------------------------------------------------------------------------------- /caffe-dslt/examples/ResNet/stn_solver_res.prototxt: -------------------------------------------------------------------------------- 1 | net: "/home/luxiankai/code/deep-residual-networks/caffe/examples/ResNet/ResNet.prototxt" 2 | #test_iter: 100 3 | #test_interval: 2000 4 | display: 1 5 | base_lr: 0.00005#0.001<-#0.0001 6 | lr_policy: "step" 7 | stepsize: 10000 #10000 8 | gamma: 0.8 9 | max_iter: 50000 10 | momentum: 0.9 11 | weight_decay: 0.0005#0.001 12 | snapshot: 1000 13 | snapshot_prefix: "examples/ResNet/pretrain_resnet/" 14 | solver_mode: GPU 15 | -------------------------------------------------------------------------------- /caffe-dslt/examples/ResNet/stn_train_res.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train -solver examples/ResNet/stn_solver_res.prototxt -weights models/ResNet/ResNet-50-model.caffemodel -gpu 0 4 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_full_sigmoid_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 200 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 200 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid_bn" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_full_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 60000 22 | # snapshot intermediate results 23 | snapshot: 10000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_full_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.0001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 65000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_full_solver_lr2.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.00001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 70000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 4000 21 | # snapshot intermediate results 22 | snapshot: 4000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/cifar10_quick_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.0001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 5000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/create_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the cifar data into leveldb format. 3 | 4 | EXAMPLE=examples/cifar10 5 | DATA=data/cifar10 6 | DBTYPE=lmdb 7 | 8 | echo "Creating $DBTYPE..." 9 | 10 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE 11 | 12 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE 13 | 14 | echo "Computing image mean..." 15 | 16 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 17 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto 18 | 19 | echo "Done." 20 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/train_full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate.h5 12 | 13 | # reduce learning rate by factor of 10 14 | $TOOLS/caffe train \ 15 | --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \ 16 | --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate.h5 17 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/train_full_sigmoid.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_sigmoid_solver.prototxt 7 | 8 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/train_full_sigmoid_bn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt 7 | 8 | -------------------------------------------------------------------------------- /caffe-dslt/examples/cifar10/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_quick_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 after 8 epochs 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 12 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/deconv_tracking.tar.gz -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/adjust.prototxt: -------------------------------------------------------------------------------- 1 | name: "adjust_layer" 2 | force_backward: true 3 | input: "data" 4 | input_dim: 1 5 | input_dim: 100 6 | input_dim: 29#15 7 | input_dim: 29#15 8 | layer { 9 | name: "adjust" type: "Convolution" bottom: "data" top: "adjust_data" param { 10 | lr_mult: 0 decay_mult: 0 } param { lr_mult: 1 decay_mult: 0 } 11 | convolution_param { num_output: 100 kernel_size: 1 group: 100 12 | weight_filler { type: "constant" value: 1 } 13 | bias_filler { 14 | type: "constant" 15 | value: 0 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/adjust_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: '/home/luxiankai/code/deep-residual-networks/caffe/examples/deconv_tracking/adjust.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 0.001#0.001#0.001#1e-6 #4e-7 # #8e-7 #6e-7 #3e-7 #1e-7 #1~4e-7 #3e-7 #1e-9 #4e-8# 1e-9 #2e-8#5e-8#1e-8 #7e-8 #7e-6 #5e-7 #1.2e-6 #8e-7 7 | gamma: 0.1 8 | stepsize: 120000 9 | display: 10 10 | #max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.0005#0.001 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | #device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | random_seed: 701 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/de-conv4.prototxt: -------------------------------------------------------------------------------- 1 | name: "de-conv3" 2 | force_backward: true 3 | input: "data" 4 | input_dim: 1 5 | input_dim: 1 6 | #input_dim: 232 7 | #input_dim: 232 8 | input_dim: 57 #48 9 | input_dim: 57 #48 10 | #input_dim: 480 11 | #input_dim: 480 12 | layer { 13 | bottom: "data" 14 | type: "Convolution" 15 | top: "score_fr" 16 | param { 17 | lr_mult: 1 18 | decay_mult: 1 19 | } 20 | param { 21 | lr_mult: 2 22 | decay_mult: 0 23 | } 24 | convolution_param { 25 | num_output: 1 26 | pad: 0 27 | kernel_size: 1 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/de-conv_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: '/home/luxiankai/code/deep-residual-networks/caffe/examples/deconv_tracking/train_test1.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 8e-9 #4e-7 #8e-7 #8e-7 #6e-7 #3e-7 #1e-7 #1~4e-7 #3e-7 #1e-9 #4e-8# 1e-9 #2e-8#5e-8#1e-8 #7e-8 #7e-6 #5e-7 #1.2e-6 #8e-7 #1e-9 7 | gamma: 0.1 8 | stepsize: 120000 9 | display: 10 10 | #max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.001 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | random_seed: 701 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/de-conv_solver1prototxt: -------------------------------------------------------------------------------- 1 | net: '/home/luxiankai/code/deep-residual-networks/caffe/examples/deconv_tracking/deconv_train_test7.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 0.001#0.001#0.001#0.001#1e-6 #4e-7 # #8e-7 #6e-7 #3e-7 #1e-7 #1~4e-7 #3e-7 #1e-9 #4e-8# 1e-9 #2e-8#5e-8#1e-8 #7e-8 #7e-6 #5e-7 #1.2e-6 #8e-7 7 | gamma: 0.8 8 | stepsize: 30000#12000 9 | display: 10 10 | max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.0005#0.001 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | #device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | random_seed: 701 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/de-conv_solver2.prototxt: -------------------------------------------------------------------------------- 1 | net: '/home/luxiankai/code/deep-residual-networks/caffe/examples/deconv_tracking/deconv_train_test7.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 0.001#0.001#0.001#0.001#1e-6 #4e-7 # #8e-7 #6e-7 #3e-7 #1e-7 #1~4e-7 #3e-7 #1e-9 #4e-8# 1e-9 #2e-8#5e-8#1e-8 #7e-8 #7e-6 #5e-7 #1.2e-6 #8e-7 7 | gamma: 0.8 8 | stepsize: 30000#12000 9 | display: 10 10 | max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.0005#0.001 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | #device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | random_seed: 701 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/loss.prototxt: -------------------------------------------------------------------------------- 1 | # design for scale+loss 2 | name: "CorrelatioMapLoss" 3 | force_backward: true 4 | input: "examplar" 5 | input_dim: 50 6 | input_dim: 96 7 | input_dim: 127 8 | input_dim: 127 9 | 10 | input: "instance" 11 | input_dim: 50 12 | input_dim: 96 13 | input_dim: 239 14 | input_dim: 239 15 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/loss_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: '/home/luxiankai/code/deep-residual-networks/caffe/examples/deconv_tracking/loss.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 8e-9 #4e-7 #8e-7 #8e-7 #6e-7 #3e-7 #1e-7 #1~4e-7 #3e-7 #1e-9 #4e-8# 1e-9 #2e-8#5e-8#1e-8 #7e-8 #7e-6 #5e-7 #1.2e-6 #8e-7 #1e-9 7 | gamma: 0.1 8 | stepsize: 120000 9 | display: 10 10 | #max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.001 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | random_seed: 701 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/deconv_tracking/train_test2.prototxt: -------------------------------------------------------------------------------- 1 | # design for triplet stream network 2 | name: "TwoflowSTN" 3 | force_backward: true 4 | input: "examplar" 5 | input_dim: 1 6 | input_dim: 3 7 | input_dim: 11 8 | input_dim: 11 9 | 10 | input: "instance" 11 | input_dim: 1 12 | input_dim: 3 13 | input_dim: 23 14 | input_dim: 23 15 | 16 | layer { 17 | name: "corr" 18 | type: "Correlation" 19 | bottom: "instance" 20 | bottom: "examplar" 21 | top: "corr" 22 | correlation_param { 23 | pad: 0 24 | kernel_size: 1 25 | max_displacement:5 #63 26 | stride_1: 1 27 | stride_2: 1 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /caffe-dslt/examples/finetune_flickr_style/flickr_style.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/finetune_flickr_style/flickr_style.csv.gz -------------------------------------------------------------------------------- /caffe-dslt/examples/finetune_flickr_style/style_names.txt: -------------------------------------------------------------------------------- 1 | Detailed 2 | Pastel 3 | Melancholy 4 | Noir 5 | HDR 6 | Vintage 7 | Long Exposure 8 | Horror 9 | Sunny 10 | Bright 11 | Hazy 12 | Bokeh 13 | Serene 14 | Texture 15 | Ethereal 16 | Macro 17 | Depth of Field 18 | Geometric Composition 19 | Minimal 20 | Romantic 21 | -------------------------------------------------------------------------------- /caffe-dslt/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt" 2 | test_iter: 100 3 | test_interval: 1000 4 | base_lr: 0.001 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 20000 8 | display: 20 9 | max_iter: 100000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune" 14 | -------------------------------------------------------------------------------- /caffe-dslt/examples/hdf5_classification/nonlinear_auto_test.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/test.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe-dslt/examples/hdf5_classification/nonlinear_auto_train.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/train.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe-dslt/examples/hdf5_classification/nonlinear_solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt" 2 | test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt" 3 | test_iter: 250 4 | test_interval: 1000 5 | base_lr: 0.01 6 | lr_policy: "step" 7 | gamma: 0.1 8 | stepsize: 5000 9 | display: 1000 10 | max_iter: 10000 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/hdf5_classification/data/train" 15 | solver_mode: CPU 16 | -------------------------------------------------------------------------------- /caffe-dslt/examples/hdf5_classification/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "examples/hdf5_classification/logreg_auto_train.prototxt" 2 | test_net: "examples/hdf5_classification/logreg_auto_test.prototxt" 3 | test_iter: 250 4 | test_interval: 1000 5 | base_lr: 0.01 6 | lr_policy: "step" 7 | gamma: 0.1 8 | stepsize: 5000 9 | display: 1000 10 | max_iter: 10000 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/hdf5_classification/data/train" 15 | solver_mode: CPU 16 | -------------------------------------------------------------------------------- /caffe-dslt/examples/hdf5_classification/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layer { 3 | name: "data" 4 | type: "HDF5Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | hdf5_data_param { 11 | source: "examples/hdf5_classification/data/train.txt" 12 | batch_size: 10 13 | } 14 | } 15 | layer { 16 | name: "data" 17 | type: "HDF5Data" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | hdf5_data_param { 24 | source: "examples/hdf5_classification/data/test.txt" 25 | batch_size: 10 26 | } 27 | } 28 | layer { 29 | name: "fc1" 30 | type: "InnerProduct" 31 | bottom: "data" 32 | top: "fc1" 33 | param { 34 | lr_mult: 1 35 | decay_mult: 1 36 | } 37 | param { 38 | lr_mult: 2 39 | decay_mult: 0 40 | } 41 | inner_product_param { 42 | num_output: 2 43 | weight_filler { 44 | type: "xavier" 45 | } 46 | bias_filler { 47 | type: "constant" 48 | value: 0 49 | } 50 | } 51 | } 52 | layer { 53 | name: "loss" 54 | type: "SoftmaxWithLoss" 55 | bottom: "fc1" 56 | bottom: "label" 57 | top: "loss" 58 | } 59 | layer { 60 | name: "accuracy" 61 | type: "Accuracy" 62 | bottom: "fc1" 63 | bottom: "label" 64 | top: "accuracy" 65 | include { 66 | phase: TEST 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /caffe-dslt/examples/imagenet/make_imagenet_mean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Compute the mean image from the imagenet training lmdb 3 | # N.B. this is available in data/ilsvrc12 4 | 5 | EXAMPLE=examples/imagenet 6 | DATA=data/ilsvrc12 7 | TOOLS=build/tools 8 | 9 | $TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ 10 | $DATA/imagenet_mean.binaryproto 11 | 12 | echo "Done." 13 | -------------------------------------------------------------------------------- /caffe-dslt/examples/imagenet/resume_training.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt \ 5 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 6 | -------------------------------------------------------------------------------- /caffe-dslt/examples/imagenet/train_caffenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/images/cat.jpg -------------------------------------------------------------------------------- /caffe-dslt/examples/images/cat_gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/images/cat_gray.jpg -------------------------------------------------------------------------------- /caffe-dslt/examples/images/fish-bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/examples/images/fish-bike.jpg -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/create_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into lmdb/leveldb format, 3 | # depending on the value assigned to $BACKEND. 4 | 5 | EXAMPLE=examples/mnist 6 | DATA=data/mnist 7 | BUILD=build/examples/mnist 8 | 9 | BACKEND="lmdb" 10 | 11 | echo "Creating ${BACKEND}..." 12 | 13 | rm -rf $EXAMPLE/mnist_train_${BACKEND} 14 | rm -rf $EXAMPLE/mnist_test_${BACKEND} 15 | 16 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ 17 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND} 18 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ 19 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND} 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_adadelta_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 1.0 11 | lr_policy: "fixed" 12 | momentum: 0.95 13 | weight_decay: 0.0005 14 | # Display every 100 iterations 15 | display: 100 16 | # The maximum number of iterations 17 | max_iter: 10000 18 | # snapshot intermediate results 19 | snapshot: 5000 20 | snapshot_prefix: "examples/mnist/lenet_adadelta" 21 | # solver mode: CPU or GPU 22 | solver_mode: GPU 23 | type: "AdaDelta" 24 | delta: 1e-6 25 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_auto_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | train_net: "examples/mnist/lenet_auto_train.prototxt" 3 | test_net: "examples/mnist/lenet_auto_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # The base learning rate, momentum and the weight decay of the network. 11 | base_lr: 0.01 12 | momentum: 0.9 13 | weight_decay: 0.0005 14 | # The learning rate policy 15 | lr_policy: "inv" 16 | gamma: 0.0001 17 | power: 0.75 18 | # Display every 100 iterations 19 | display: 100 20 | # The maximum number of iterations 21 | max_iter: 10000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "examples/mnist/lenet" 25 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_multistep_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "multistep" 15 | gamma: 0.9 16 | stepvalue: 5000 17 | stepvalue: 7000 18 | stepvalue: 8000 19 | stepvalue: 9000 20 | stepvalue: 9500 21 | # Display every 100 iterations 22 | display: 100 23 | # The maximum number of iterations 24 | max_iter: 10000 25 | # snapshot intermediate results 26 | snapshot: 5000 27 | snapshot_prefix: "examples/mnist/lenet_multistep" 28 | # solver mode: CPU or GPU 29 | solver_mode: GPU 30 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_solver_adam.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | # this follows "ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION" 3 | net: "examples/mnist/lenet_train_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # All parameters are from the cited paper above 11 | base_lr: 0.001 12 | momentum: 0.9 13 | momentum2: 0.999 14 | # since Adam dynamically changes the learning rate, we set the base learning 15 | # rate to a fixed value 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | type: "Adam" 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/lenet_solver_rmsprop.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.0 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet_rmsprop" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | type: "RMSProp" 27 | rms_decay: 0.98 28 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/mnist_autoencoder_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder" 17 | momentum: 0.9 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/mnist_autoencoder_solver_adadelta.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 1.0 9 | lr_policy: "fixed" 10 | momentum: 0.95 11 | delta: 1e-8 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adadelta_train" 17 | # solver mode: CPU or GPU 18 | solver_mode: GPU 19 | type: "AdaDelta" 20 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "fixed" 10 | display: 100 11 | max_iter: 65000 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train" 15 | # solver mode: CPU or GPU 16 | solver_mode: GPU 17 | type: "AdaGrad" 18 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train" 17 | momentum: 0.95 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | type: "Nesterov" 21 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_lenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt 4 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_lenet_adam.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt 4 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_lenet_consolidated.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/lenet_consolidated_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_lenet_rmsprop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_rmsprop.prototxt 4 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_mnist_autoencoder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_mnist_autoencoder_adadelta.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_mnist_autoencoder_adagrad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/mnist/train_mnist_autoencoder_nesterov.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt 5 | -------------------------------------------------------------------------------- /caffe-dslt/examples/net_surgery/conv.prototxt: -------------------------------------------------------------------------------- 1 | # Simple single-layer network to showcase editing model parameters. 2 | name: "convolution" 3 | input: "data" 4 | input_shape { 5 | dim: 1 6 | dim: 1 7 | dim: 100 8 | dim: 100 9 | } 10 | layer { 11 | name: "conv" 12 | type: "Convolution" 13 | bottom: "data" 14 | top: "conv" 15 | convolution_param { 16 | num_output: 3 17 | kernel_size: 5 18 | stride: 1 19 | weight_filler { 20 | type: "gaussian" 21 | std: 0.01 22 | } 23 | bias_filler { 24 | type: "constant" 25 | value: 0 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /caffe-dslt/examples/pycaffe/layers/pyloss.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | 4 | 5 | class EuclideanLossLayer(caffe.Layer): 6 | """ 7 | Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer 8 | to demonstrate the class interface for developing layers in Python. 9 | """ 10 | 11 | def setup(self, bottom, top): 12 | # check input pair 13 | if len(bottom) != 2: 14 | raise Exception("Need two inputs to compute distance.") 15 | 16 | def reshape(self, bottom, top): 17 | # check input dimensions match 18 | if bottom[0].count != bottom[1].count: 19 | raise Exception("Inputs must have the same dimension.") 20 | # difference is shape of inputs 21 | self.diff = np.zeros_like(bottom[0].data, dtype=np.float32) 22 | # loss output is scalar 23 | top[0].reshape(1) 24 | 25 | def forward(self, bottom, top): 26 | self.diff[...] = bottom[0].data - bottom[1].data 27 | top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2. 28 | 29 | def backward(self, top, propagate_down, bottom): 30 | for i in range(2): 31 | if not propagate_down[i]: 32 | continue 33 | if i == 0: 34 | sign = 1 35 | else: 36 | sign = -1 37 | bottom[i].diff[...] = sign * self.diff / bottom[i].num 38 | -------------------------------------------------------------------------------- /caffe-dslt/examples/siamese/create_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into leveldb format. 3 | 4 | EXAMPLES=./build/examples/siamese 5 | DATA=./data/mnist 6 | 7 | echo "Creating leveldb..." 8 | 9 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb 10 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb 11 | 12 | $EXAMPLES/convert_mnist_siamese_data.bin \ 13 | $DATA/train-images-idx3-ubyte \ 14 | $DATA/train-labels-idx1-ubyte \ 15 | ./examples/siamese/mnist_siamese_train_leveldb 16 | $EXAMPLES/convert_mnist_siamese_data.bin \ 17 | $DATA/t10k-images-idx3-ubyte \ 18 | $DATA/t10k-labels-idx1-ubyte \ 19 | ./examples/siamese/mnist_siamese_test_leveldb 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe-dslt/examples/siamese/mnist_siamese_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/siamese/mnist_siamese_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0000 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 50000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/siamese/mnist_siamese" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe-dslt/examples/siamese/train_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt 6 | -------------------------------------------------------------------------------- /caffe-dslt/examples/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Aug 1 22:05:35 2016 4 | 5 | @author: luxiankai 6 | """ 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | #%matplotlib inline 10 | 11 | # Make sure that caffe is on the python path: 12 | caffe_root = '../' # this file is expected to be in {caffe_root}/examples 13 | import sys 14 | sys.path.insert(0, caffe_root + 'python') 15 | 16 | import caffe 17 | 18 | plt.rcParams['figure.figsize'] = (10, 10) 19 | plt.rcParams['image.interpolation'] = 'nearest' 20 | plt.rcParams['image.cmap'] = 'gray' 21 | 22 | import os 23 | MEAN_FILE=caffe_root+'examples/ResNet/ResNet_mean.binaryproto' 24 | mean_blob = caffe.proto.caffe_pb2.BlobProto() 25 | mean_blob.ParseFromString(open(MEAN_FILE, 'rb').read()) 26 | 27 | # 将均值blob转为numpy.array 28 | mean_npy = caffe.io.blobproto_to_array(mean_blob) 29 | print mean_npy.shape 30 | 31 | -------------------------------------------------------------------------------- /caffe-dslt/examples/web_demo/exifutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script handles the skimage exif problem. 3 | """ 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | ORIENTATIONS = { # used in apply_orientation 9 | 2: (Image.FLIP_LEFT_RIGHT,), 10 | 3: (Image.ROTATE_180,), 11 | 4: (Image.FLIP_TOP_BOTTOM,), 12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 13 | 6: (Image.ROTATE_270,), 14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 15 | 8: (Image.ROTATE_90,) 16 | } 17 | 18 | 19 | def open_oriented_im(im_path): 20 | im = Image.open(im_path) 21 | if hasattr(im, '_getexif'): 22 | exif = im._getexif() 23 | if exif is not None and 274 in exif: 24 | orientation = exif[274] 25 | im = apply_orientation(im, orientation) 26 | img = np.asarray(im).astype(np.float32) / 255. 27 | if img.ndim == 2: 28 | img = img[:, :, np.newaxis] 29 | img = np.tile(img, (1, 1, 3)) 30 | elif img.shape[2] == 4: 31 | img = img[:, :, :3] 32 | return img 33 | 34 | 35 | def apply_orientation(im, orientation): 36 | if orientation in ORIENTATIONS: 37 | for method in ORIENTATIONS[orientation]: 38 | im = im.transpose(method) 39 | return im 40 | -------------------------------------------------------------------------------- /caffe-dslt/examples/web_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | werkzeug 2 | flask 3 | tornado 4 | numpy 5 | pandas 6 | pillow 7 | pyyaml 8 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/sgd_solvers.hpp" 17 | #include "caffe/solver_factory.hpp" 18 | #include "caffe/util/benchmark.hpp" 19 | #include "caffe/util/io.hpp" 20 | #include "caffe/util/upgrade_proto.hpp" 21 | 22 | #endif // CAFFE_CAFFE_HPP_ 23 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/cudnn_lrn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LRN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | template 16 | class CuDNNLRNLayer : public LRNLayer { 17 | public: 18 | explicit CuDNNLRNLayer(const LayerParameter& param) 19 | : LRNLayer(param), handles_setup_(false) {} 20 | virtual void LayerSetUp(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual ~CuDNNLRNLayer(); 25 | 26 | protected: 27 | virtual void Forward_gpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | 32 | bool handles_setup_; 33 | cudnnHandle_t handle_; 34 | cudnnLRNDescriptor_t norm_desc_; 35 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 36 | 37 | int size_; 38 | Dtype alpha_, beta_, k_; 39 | }; 40 | #endif 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_CUDNN_LRN_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/cudnn_relu_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ 2 | #define CAFFE_CUDNN_RELU_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/relu_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of ReLULayer. 18 | */ 19 | template 20 | class CuDNNReLULayer : public ReLULayer { 21 | public: 22 | explicit CuDNNReLULayer(const LayerParameter& param) 23 | : ReLULayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNReLULayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_RELU_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/cudnn_sigmoid_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of SigmoidLayer. 18 | */ 19 | template 20 | class CuDNNSigmoidLayer : public SigmoidLayer { 21 | public: 22 | explicit CuDNNSigmoidLayer(const LayerParameter& param) 23 | : SigmoidLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSigmoidLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/cudnn_softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/softmax_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /** 16 | * @brief cuDNN implementation of SoftmaxLayer. 17 | * Fallback to SoftmaxLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNSoftmaxLayer : public SoftmaxLayer { 21 | public: 22 | explicit CuDNNSoftmaxLayer(const LayerParameter& param) 23 | : SoftmaxLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSoftmaxLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/cudnn_tanh_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ 2 | #define CAFFE_CUDNN_TANH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/tanh_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of TanHLayer. 18 | */ 19 | template 20 | class CuDNNTanHLayer : public TanHLayer { 21 | public: 22 | explicit CuDNNTanHLayer(const LayerParameter& param) 23 | : TanHLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNTanHLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_TANH_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class DataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit DataLayer(const LayerParameter& param); 21 | virtual ~DataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "Data"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 2; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | }; 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_DATA_LAYER_HPP_ 40 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-dslt/include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/+test/test_io.m: -------------------------------------------------------------------------------- 1 | classdef test_io < matlab.unittest.TestCase 2 | methods (Test) 3 | function test_read_write_mean(self) 4 | % randomly generate mean data 5 | width = 200; 6 | height = 300; 7 | channels = 3; 8 | mean_data_write = 255 * rand(width, height, channels, 'single'); 9 | % write mean data to binary proto 10 | mean_proto_file = tempname(); 11 | caffe.io.write_mean(mean_data_write, mean_proto_file); 12 | % read mean data from saved binary proto and test whether they are equal 13 | mean_data_read = caffe.io.read_mean(mean_proto_file); 14 | self.verifyEqual(mean_data_write, mean_data_read) 15 | delete(mean_proto_file); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/Layer.m: -------------------------------------------------------------------------------- 1 | classdef Layer < handle 2 | % Wrapper class of caffe::Layer in matlab 3 | 4 | properties (Access = private) 5 | hLayer_self 6 | attributes 7 | % attributes fields: 8 | % hBlob_blobs 9 | end 10 | properties (SetAccess = private) 11 | params 12 | end 13 | 14 | methods 15 | function self = Layer(hLayer_layer) 16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle'); 17 | 18 | % setup self handle and attributes 19 | self.hLayer_self = hLayer_layer; 20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self); 21 | 22 | % setup weights 23 | self.params = caffe.Blob.empty(); 24 | for n = 1:length(self.attributes.hBlob_blobs) 25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n)); 26 | end 27 | end 28 | function layer_type = type(self) 29 | layer_type = caffe_('layer_get_type', self.hLayer_self); 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/get_net.m: -------------------------------------------------------------------------------- 1 | function net = get_net(varargin) 2 | % net = get_net(model_file, phase_name) or 3 | % net = get_net(model_file, weights_file, phase_name) 4 | % Construct a net from model_file, and load weights from weights_file 5 | % phase_name can only be 'train' or 'test' 6 | 7 | CHECK(nargin == 2 || nargin == 3, ['usage: ' ... 8 | 'net = get_net(model_file, phase_name) or ' ... 9 | 'net = get_net(model_file, weights_file, phase_name)']); 10 | if nargin == 3 11 | model_file = varargin{1}; 12 | weights_file = varargin{2}; 13 | phase_name = varargin{3}; 14 | elseif nargin == 2 15 | model_file = varargin{1}; 16 | phase_name = varargin{2}; 17 | end 18 | 19 | CHECK(ischar(model_file), 'model_file must be a string'); 20 | CHECK(ischar(phase_name), 'phase_name must be a string'); 21 | CHECK_FILE_EXIST(model_file); 22 | CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ... 23 | sprintf('phase_name can only be %strain%s or %stest%s', ... 24 | char(39), char(39), char(39), char(39))); 25 | 26 | % construct caffe net from model_file 27 | hNet = caffe_('get_net', model_file, phase_name); 28 | net = caffe.Net(hNet); 29 | 30 | % load weights from weights_file 31 | if nargin == 3 32 | CHECK(ischar(weights_file), 'weights_file must be a string'); 33 | CHECK_FILE_EXIST(weights_file); 34 | net.copy_from(weights_file); 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/get_solver.m: -------------------------------------------------------------------------------- 1 | function solver = get_solver(solver_file) 2 | % solver = get_solver(solver_file) 3 | % Construct a Solver object from solver_file 4 | 5 | CHECK(ischar(solver_file), 'solver_file must be a string'); 6 | CHECK_FILE_EXIST(solver_file); 7 | pSolver = caffe_('get_solver', solver_file); 8 | solver = caffe.Solver(pSolver); 9 | 10 | end 11 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/private/CHECK.m: -------------------------------------------------------------------------------- 1 | function CHECK(expr, error_msg) 2 | 3 | if ~expr 4 | error(error_msg); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/private/CHECK_FILE_EXIST.m: -------------------------------------------------------------------------------- 1 | function CHECK_FILE_EXIST(filename) 2 | 3 | if exist(filename, 'file') == 0 4 | error('%s does not exist', filename); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/private/is_valid_handle.m: -------------------------------------------------------------------------------- 1 | function valid = is_valid_handle(hObj) 2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') 3 | % Check if a handle is valid (has the right data type and init_key matches) 4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++; 5 | 6 | % a handle is a struct array with the following fields 7 | % (uint64) ptr : the pointer to the C++ object 8 | % (double) init_key : caffe initialization key 9 | 10 | persistent init_key; 11 | if isempty(init_key) 12 | init_key = caffe_('get_init_key'); 13 | end 14 | 15 | % is_valid_handle('get_new_init_key') to get new init_key from C++; 16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key') 17 | init_key = caffe_('get_init_key'); 18 | return 19 | else 20 | % check whether data types are correct and init_key matches 21 | valid = isstruct(hObj) ... 22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ... 23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ... 24 | && hObj.init_key == init_key; 25 | end 26 | 27 | end 28 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/reset_all.m: -------------------------------------------------------------------------------- 1 | function reset_all() 2 | % reset_all() 3 | % clear all solvers and stand-alone nets and reset Caffe to initial status 4 | 5 | caffe_('reset'); 6 | is_valid_handle('get_new_init_key'); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/run_tests.m: -------------------------------------------------------------------------------- 1 | function results = run_tests() 2 | % results = run_tests() 3 | % run all tests in this caffe matlab wrapper package 4 | 5 | % use CPU for testing 6 | caffe.set_mode_cpu(); 7 | 8 | % reset caffe before testing 9 | caffe.reset_all(); 10 | 11 | % put all test cases here 12 | results = [... 13 | run(caffe.test.test_net) ... 14 | run(caffe.test.test_solver) ... 15 | run(caffe.test.test_io) ]; 16 | 17 | % reset caffe after testing 18 | caffe.reset_all(); 19 | 20 | end 21 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/set_device.m: -------------------------------------------------------------------------------- 1 | function set_device(device_id) 2 | % set_device(device_id) 3 | % set Caffe's GPU device ID 4 | 5 | CHECK(isscalar(device_id) && device_id >= 0, ... 6 | 'device_id must be non-negative integer'); 7 | device_id = double(device_id); 8 | 9 | caffe_('set_device', device_id); 10 | 11 | end 12 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/set_mode_cpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_cpu() 2 | % set_mode_cpu() 3 | % set Caffe to CPU mode 4 | 5 | caffe_('set_mode_cpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/set_mode_gpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_gpu() 2 | % set_mode_gpu() 3 | % set Caffe to GPU mode 4 | 5 | caffe_('set_mode_gpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/+caffe/version.m: -------------------------------------------------------------------------------- 1 | function version_str = version() 2 | % version() 3 | % show Caffe's version. 4 | 5 | version_str = caffe_('version'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe-dslt/matlab/hdf5creation/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | list.txt 3 | -------------------------------------------------------------------------------- /caffe-dslt/python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver 2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list 3 | from ._caffe import __version__ 4 | from .proto.caffe_pb2 import TRAIN, TEST 5 | from .classifier import Classifier 6 | from .detector import Detector 7 | from . import io 8 | from .net_spec import layers, params, NetSpec, to_proto 9 | -------------------------------------------------------------------------------- /caffe-dslt/python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /caffe-dslt/python/caffe/test/test_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | 4 | import caffe 5 | 6 | class TestBlobProtoToArray(unittest.TestCase): 7 | 8 | def test_old_format(self): 9 | data = np.zeros((10,10)) 10 | blob = caffe.proto.caffe_pb2.BlobProto() 11 | blob.data.extend(list(data.flatten())) 12 | shape = (1,1,10,10) 13 | blob.num, blob.channels, blob.height, blob.width = shape 14 | 15 | arr = caffe.io.blobproto_to_array(blob) 16 | self.assertEqual(arr.shape, shape) 17 | 18 | def test_new_format(self): 19 | data = np.zeros((10,10)) 20 | blob = caffe.proto.caffe_pb2.BlobProto() 21 | blob.data.extend(list(data.flatten())) 22 | blob.shape.dim.extend(list(data.shape)) 23 | 24 | arr = caffe.io.blobproto_to_array(blob) 25 | self.assertEqual(arr.shape, data.shape) 26 | 27 | def test_no_shape(self): 28 | data = np.zeros((10,10)) 29 | blob = caffe.proto.caffe_pb2.BlobProto() 30 | blob.data.extend(list(data.flatten())) 31 | 32 | with self.assertRaises(ValueError): 33 | caffe.io.blobproto_to_array(blob) 34 | 35 | def test_scalar(self): 36 | data = np.ones((1)) * 123 37 | blob = caffe.proto.caffe_pb2.BlobProto() 38 | blob.data.extend(list(data.flatten())) 39 | 40 | arr = caffe.io.blobproto_to_array(blob) 41 | self.assertEqual(arr, 123) 42 | -------------------------------------------------------------------------------- /caffe-dslt/python/caffe/test/test_layer_type_list.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import caffe 4 | 5 | class TestLayerTypeList(unittest.TestCase): 6 | 7 | def test_standard_types(self): 8 | #removing 'Data' from list 9 | for type_name in ['Data', 'Convolution', 'InnerProduct']: 10 | self.assertIn(type_name, caffe.layer_type_list(), 11 | '%s not in layer_type_list()' % type_name) 12 | -------------------------------------------------------------------------------- /caffe-dslt/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | matplotlib>=1.3.1 6 | ipython>=3.0.0 7 | h5py>=2.2.0 8 | leveldb>=0.191 9 | networkx>=1.8.1 10 | nose>=1.3.0 11 | pandas>=0.12.0 12 | python-dateutil>=1.4,<2 13 | protobuf>=2.5.0 14 | python-gflags>=2.0 15 | pyyaml>=3.10 16 | Pillow>=2.3.0 17 | six>=1.1.0 -------------------------------------------------------------------------------- /caffe-dslt/scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build_docs.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Generate developer docs. 16 | make docs 17 | 18 | # Display docs using web server. 19 | cd docs 20 | jekyll serve -w -s . -d _site --port=$PORT 21 | -------------------------------------------------------------------------------- /caffe-dslt/scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /caffe-dslt/scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip 22 | unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR 23 | rm $MODEL_DIR/gist.zip 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /caffe-dslt/scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /caffe-dslt/scripts/travis/travis_setup_makefile_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mv Makefile.config.example Makefile.config 6 | 7 | if $WITH_CUDA; then 8 | # Only generate compute_50. 9 | GENCODE="-gencode arch=compute_50,code=sm_50" 10 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" 11 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config 12 | fi 13 | 14 | # Remove IO library settings from Makefile.config 15 | # to avoid conflicts with CI configuration 16 | sed -i -e '/USE_LMDB/d' Makefile.config 17 | sed -i -e '/USE_LEVELDB/d' Makefile.config 18 | sed -i -e '/USE_OPENCV/d' Makefile.config 19 | 20 | cat << 'EOF' >> Makefile.config 21 | # Travis' nvcc doesn't like newer boost versions 22 | NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used 23 | ANACONDA_HOME := $(CONDA_DIR) 24 | PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ 25 | $(ANACONDA_HOME)/include/python2.7 \ 26 | $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include 27 | PYTHON_LIB := $(ANACONDA_HOME)/lib 28 | INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include 29 | LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib 30 | WITH_PYTHON_LAYER := 1 31 | EOF 32 | -------------------------------------------------------------------------------- /caffe-dslt/scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | set_target_properties(caffe PROPERTIES 24 | VERSION ${CAFFE_TARGET_VERSION} 25 | SOVERSION ${CAFFE_TARGET_SOVERSION} 26 | ) 27 | 28 | # ---[ Tests 29 | add_subdirectory(test) 30 | 31 | # ---[ Install 32 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 33 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 34 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 35 | 36 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 37 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 38 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 39 | 40 | 41 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void Layer::InitMutex() { 8 | forward_mutex_.reset(new boost::mutex()); 9 | } 10 | 11 | template 12 | void Layer::Lock() { 13 | if (IsShared()) { 14 | forward_mutex_->lock(); 15 | } 16 | } 17 | 18 | template 19 | void Layer::Unlock() { 20 | if (IsShared()) { 21 | forward_mutex_->unlock(); 22 | } 23 | } 24 | 25 | INSTANTIATE_CLASS(Layer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/base_data_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); 11 | // Reshape to loaded data. 12 | top[0]->ReshapeLike(batch->data_); 13 | // Copy the data 14 | caffe_copy(batch->data_.count(), batch->data_.gpu_data(), 15 | top[0]->mutable_gpu_data()); 16 | if (this->output_labels_) { 17 | // Reshape to loaded labels. 18 | top[1]->ReshapeLike(batch->label_); 19 | // Copy the labels. 20 | caffe_copy(batch->label_.count(), batch->label_.gpu_data(), 21 | top[1]->mutable_gpu_data()); 22 | } 23 | // Ensure the copy is synchronous wrt the host, so that the next batch isn't 24 | // copied in meanwhile. 25 | CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); 26 | prefetch_free_.push(batch); 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | ReLULayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | handles_setup_ = true; 17 | } 18 | 19 | template 20 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 21 | const vector*>& top) { 22 | ReLULayer::Reshape(bottom, top); 23 | const int N = bottom[0]->num(); 24 | const int K = bottom[0]->channels(); 25 | const int H = bottom[0]->height(); 26 | const int W = bottom[0]->width(); 27 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 28 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 29 | } 30 | 31 | template 32 | CuDNNReLULayer::~CuDNNReLULayer() { 33 | // Check that handles have been setup before destroying. 34 | if (!handles_setup_) { return; } 35 | 36 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 37 | cudnnDestroyTensorDescriptor(this->top_desc_); 38 | cudnnDestroy(this->handle_); 39 | } 40 | 41 | INSTANTIATE_CLASS(CuDNNReLULayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_tanh_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | TanHLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | handles_setup_ = true; 17 | } 18 | 19 | template 20 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 21 | const vector*>& top) { 22 | TanHLayer::Reshape(bottom, top); 23 | const int N = bottom[0]->num(); 24 | const int K = bottom[0]->channels(); 25 | const int H = bottom[0]->height(); 26 | const int W = bottom[0]->width(); 27 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 28 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 29 | } 30 | 31 | template 32 | CuDNNTanHLayer::~CuDNNTanHLayer() { 33 | // Check that handles have been setup before destroying. 34 | if (!handles_setup_) { return; } 35 | 36 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 37 | cudnnDestroyTensorDescriptor(this->top_desc_); 38 | cudnnDestroy(this->handle_); 39 | } 40 | 41 | INSTANTIATE_CLASS(CuDNNTanHLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/exp_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ExpLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | const Dtype* bottom_data = bottom[0]->gpu_data(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | if (inner_scale_ == Dtype(1)) { 15 | caffe_gpu_exp(count, bottom_data, top_data); 16 | } else { 17 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 18 | caffe_gpu_exp(count, top_data, top_data); 19 | } 20 | if (outer_scale_ != Dtype(1)) { 21 | caffe_gpu_scal(count, outer_scale_, top_data); 22 | } 23 | } 24 | 25 | template 26 | void ExpLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | if (!propagate_down[0]) { return; } 29 | const int count = bottom[0]->count(); 30 | const Dtype* top_data = top[0]->gpu_data(); 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 33 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 34 | if (inner_scale_ != Dtype(1)) { 35 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 36 | } 37 | } 38 | 39 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 40 | 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/loss_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void LossLayer::LayerSetUp( 9 | const vector*>& bottom, const vector*>& top) { 10 | // LossLayers have a non-zero (1) loss by default. 11 | if (this->layer_param_.loss_weight_size() == 0) { 12 | this->layer_param_.add_loss_weight(Dtype(1)); 13 | } 14 | } 15 | 16 | template 17 | void LossLayer::Reshape( 18 | const vector*>& bottom, const vector*>& top) { 19 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 20 | << "The data and label should have the same number."; 21 | vector loss_shape(0); // Loss layers output a scalar; 0 axes. 22 | top[0]->Reshape(loss_shape); 23 | } 24 | 25 | INSTANTIATE_CLASS(LossLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 10 | const vector*>& top, const vector& propagate_down, 11 | const vector*>& bottom) { 12 | if (propagate_down[1]) { 13 | LOG(FATAL) << this->type() 14 | << " Layer cannot backpropagate to label inputs."; 15 | } 16 | if (propagate_down[0]) { 17 | // First, compute the diff 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 21 | const Dtype* target = bottom[1]->gpu_data(); 22 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 23 | caffe_copy(count, sigmoid_output_data, bottom_diff); 24 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 25 | // Scale down gradient 26 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 27 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | inline Dtype sigmoid(Dtype x) { 10 | return 1. / (1. + exp(-x)); 11 | } 12 | 13 | template 14 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 15 | const vector*>& top) { 16 | const Dtype* bottom_data = bottom[0]->cpu_data(); 17 | Dtype* top_data = top[0]->mutable_cpu_data(); 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | top_data[i] = sigmoid(bottom_data[i]); 21 | } 22 | } 23 | 24 | template 25 | void SigmoidLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | for (int i = 0; i < count; ++i) { 34 | const Dtype sigmoid_x = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(SigmoidLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(SigmoidLayer); 45 | 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void TanHLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = tanh(bottom_data[i]); 18 | } 19 | } 20 | 21 | template 22 | void TanHLayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* top_data = top[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype tanhx; 31 | for (int i = 0; i < count; ++i) { 32 | tanhx = top_data[i]; 33 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 34 | } 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(TanHLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(TanHLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/adadelta_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2, 8 | Dtype momentum, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi; 12 | gi = gi * sqrt((h2[i] + delta) / (hi + delta)); 13 | h2[i] = momentum * h2[i] + (1-momentum) * gi * gi; 14 | g[i] = local_rate * gi; 15 | } 16 | } 17 | template 18 | void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum, 19 | Dtype delta, Dtype local_rate) { 20 | AdaDeltaUpdate // NOLINT_NEXT_LINE(whitespace/operators) 21 | <<>>( 22 | N, g, h, h2, momentum, delta, local_rate); 23 | CUDA_POST_KERNEL_CHECK; 24 | } 25 | template void adadelta_update_gpu(int , float*, float*, float*, 26 | float, float, float); 27 | template void adadelta_update_gpu(int, double*, double*, double*, 28 | double, double, double); 29 | 30 | } // namespace caffe 31 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/adagrad_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, 8 | Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = h[i] + gi*gi; 12 | g[i] = local_rate * gi / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, 17 | Dtype local_rate) { 18 | AdaGradUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void adagrad_update_gpu(int, float*, float*, float, float); 24 | template void adagrad_update_gpu(int, double*, double*, double, double); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/adam_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, 8 | Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float mi = m[i] = m[i]*beta1 + gi*(1-beta1); 12 | float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); 13 | g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); 14 | } 15 | } 16 | template 17 | void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, 18 | Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 19 | AdamUpdate // NOLINT_NEXT_LINE(whitespace/operators) 20 | <<>>( 21 | N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); 22 | CUDA_POST_KERNEL_CHECK; 23 | } 24 | template void adam_update_gpu(int, float*, float*, float*, 25 | float, float, float, float); 26 | template void adam_update_gpu(int, double*, double*, double*, 27 | double, double, double, double); 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/nesterov_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float hi = h[i]; 11 | float hi_new = h[i] = momentum * hi + local_rate * g[i]; 12 | g[i] = (1+momentum) * hi_new - momentum * hi; 13 | } 14 | } 15 | template 16 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 17 | Dtype local_rate) { 18 | NesterovUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, momentum, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void nesterov_update_gpu(int, float*, float*, float, float); 24 | template void nesterov_update_gpu(int, double*, double*, double, 25 | double); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/rmsprop_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype rms_decay, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi; 12 | g[i] = local_rate * g[i] / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay, 17 | Dtype delta, Dtype local_rate) { 18 | RMSPropUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, rms_decay, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void rmsprop_update_gpu(int, float*, float*, float, float, 24 | float); 25 | template void rmsprop_update_gpu(int, double*, double*, double, double, 26 | double); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/solvers/sgd_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | g[i] = h[i] = momentum*h[i] + local_rate*g[i]; 11 | } 12 | } 13 | template 14 | void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 15 | Dtype local_rate) { 16 | SGDUpdate // NOLINT_NEXT_LINE(whitespace/operators) 17 | <<>>( 18 | N, g, h, momentum, local_rate); 19 | CUDA_POST_KERNEL_CHECK; 20 | } 21 | template void sgd_update_gpu(int, float*, float*, float, float); 22 | template void sgd_update_gpu(int, double*, double*, double, double); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | 4 | #include "caffe/caffe.hpp" 5 | #include "caffe/test/test_caffe_main.hpp" 6 | 7 | namespace caffe { 8 | #ifndef CPU_ONLY 9 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 10 | #endif 11 | } 12 | 13 | #ifndef CPU_ONLY 14 | using caffe::CAFFE_TEST_CUDA_PROP; 15 | #endif 16 | 17 | int main(int argc, char** argv) { 18 | ::testing::InitGoogleTest(&argc, argv); 19 | caffe::GlobalInit(&argc, &argv); 20 | #ifndef CPU_ONLY 21 | // Before starting testing, let's first print out a few cuda defice info. 22 | int device; 23 | cudaGetDeviceCount(&device); 24 | cout << "Cuda number of devices: " << device << endl; 25 | if (argc > 1) { 26 | // Use the given device 27 | device = atoi(argv[1]); 28 | cudaSetDevice(device); 29 | cout << "Setting to use device " << device << endl; 30 | } else if (CUDA_TEST_DEVICE >= 0) { 31 | // Use the device assigned in build configuration; but with a lower priority 32 | device = CUDA_TEST_DEVICE; 33 | } 34 | cudaGetDevice(&device); 35 | cout << "Current device id: " << device << endl; 36 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 37 | cout << "Current device name: " << CAFFE_TEST_CUDA_PROP.name << endl; 38 | #endif 39 | // invoke the test. 40 | return RUN_ALL_TESTS(); 41 | } 42 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/caffe-dslt/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | 12 | class InternalThreadTest : public ::testing::Test {}; 13 | 14 | TEST_F(InternalThreadTest, TestStartAndExit) { 15 | InternalThread thread; 16 | EXPECT_FALSE(thread.is_started()); 17 | thread.StartInternalThread(); 18 | EXPECT_TRUE(thread.is_started()); 19 | thread.StopInternalThread(); 20 | EXPECT_FALSE(thread.is_started()); 21 | } 22 | 23 | class TestThreadA : public InternalThread { 24 | void InternalThreadEntry() { 25 | EXPECT_EQ(4244559767, caffe_rng_rand()); 26 | } 27 | }; 28 | 29 | class TestThreadB : public InternalThread { 30 | void InternalThreadEntry() { 31 | EXPECT_EQ(1726478280, caffe_rng_rand()); 32 | } 33 | }; 34 | 35 | TEST_F(InternalThreadTest, TestRandomSeed) { 36 | TestThreadA t1; 37 | Caffe::set_random_seed(9658361); 38 | t1.StartInternalThread(); 39 | t1.StopInternalThread(); 40 | 41 | TestThreadA t2; 42 | Caffe::set_random_seed(9658361); 43 | t2.StartInternalThread(); 44 | t2.StopInternalThread(); 45 | 46 | TestThreadB t3; 47 | Caffe::set_random_seed(3435563); 48 | t3.StartInternalThread(); 49 | t3.StopInternalThread(); 50 | } 51 | 52 | } // namespace caffe 53 | 54 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /caffe-dslt/src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /caffe-dslt/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /caffe-dslt/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /caffe-dslt/tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-dslt/tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-dslt/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-dslt/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-dslt/tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/framework.png -------------------------------------------------------------------------------- /model/finetune_net.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/model/finetune_net.caffemodel -------------------------------------------------------------------------------- /model/new_vgg_net.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoma99/DSLT/3264da5413f29c6523ae6f02b0632b8956354545/model/new_vgg_net.caffemodel -------------------------------------------------------------------------------- /prototxt/initial_feanet.m: -------------------------------------------------------------------------------- 1 | function initial_feanet(caffe, ) 2 | solver_def_file = 'fea_solver.prototxt'; %de-conv_solver1.prototxt is designed for vgg_deconv 3 | model_file = '/home/machao/luxiankai/caffe/models/vgg_16layers_fc6/VGG_ILSVRC_16_layers.caffemodel';% 4 | %'new_vgg_net1.caffemodel'; 5 | % %'/home/luxiankai/code/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'; 6 | % %new_alex_net 7 | fsolver = caffe.Solver(solver_def_file); 8 | fsolver.net.copy_from(model_file); 9 | 10 | -------------------------------------------------------------------------------- /prototxt/vgg_se_res_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: 'vgg_layer1.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 2e-8#8e-7#1e-6#1e-9#0.001#2e-7# 7 | gamma: 0.1 8 | stepsize: 40000 9 | display: 10 10 | #max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.00051 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | #device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | type: "Adam" 22 | random_seed: 701 -------------------------------------------------------------------------------- /prototxt/vgg_se_res_solver1.prototxt: -------------------------------------------------------------------------------- 1 | net: 'vgg_layer2.prototxt' 2 | #test_iter: 1000 3 | #test_interval: 1000 4 | ##update_interval: 2 5 | lr_policy: "step" 6 | base_lr: 1e-8#2e-9#5e-9#1e-8#5e-9#1e-7#5e-9#4e-7#4e-7#5e-8# 7 | gamma: 0.1 8 | stepsize: 40000 9 | display: 10 10 | #max_iter: 450001 11 | momentum: 0.6 #0.7 #0.6 #0.6 #0.5 #0.3 12 | weight_decay: 0.00051 #0.0005 13 | #snapshot: 1000 14 | #snapshot_prefix: "model5/track" 15 | solver_mode: GPU 16 | #use_mpi: false 17 | #device_id: 2 18 | #test_initialization: false 19 | #debug_info: false 20 | #debug_display: 1000 21 | type: "Adam" 22 | random_seed: 701 -------------------------------------------------------------------------------- /util/choose_video.m: -------------------------------------------------------------------------------- 1 | function video_name = choose_video(base_path) 2 | %CHOOSE_VIDEO 3 | % Allows the user to choose a video (sub-folder in the given path). 4 | % 5 | 6 | %process path to make sure it's uniform 7 | if ispc(), base_path = strrep(base_path, '\', '/'); end 8 | if base_path(end) ~= '/', base_path(end+1) = '/'; end 9 | 10 | %list all sub-folders 11 | contents = dir(base_path); 12 | names = {}; 13 | for k = 1:numel(contents), 14 | name = contents(k).name; 15 | if isdir([base_path name]) && ~any(strcmp(name, {'.', '..'})), 16 | names{end+1} = name; %#ok 17 | end 18 | end 19 | 20 | names(strcmpi('Jogging', names)) = []; 21 | names(end+1:end+2) = {'Jogging.1', 'Jogging.2'}; 22 | 23 | %no sub-folders found 24 | if isempty(names), video_name = []; return; end 25 | 26 | %choice GUI 27 | choice = listdlg('ListString',names, 'Name','Choose video', 'SelectionMode','single'); 28 | 29 | if isempty(choice), %user cancelled 30 | video_name = []; 31 | else 32 | video_name = names{choice}; 33 | end 34 | 35 | end 36 | 37 | -------------------------------------------------------------------------------- /util/cleanupFun.m: -------------------------------------------------------------------------------- 1 | function cleanupFun 2 | % fprintf('Runing clean up function\n'); 3 | caffe.reset_all(); 4 | %caffe.delete_solver([5,4]); 5 | -------------------------------------------------------------------------------- /util/collectData.m: -------------------------------------------------------------------------------- 1 | clc 2 | % path = 'cnn_res/'; 3 | num = 384; 4 | path = ['select_cnn_res_cn' num2str(num) '/']; 5 | 6 | dataSet = dir(path); 7 | 8 | 9 | % dataSet.name = 'Basketball'; 10 | % resPath = ['select_cnn' num2str(num) '_TBres/']; 11 | resPath = ['fcn7_restore_' num2str(num) '_TBres/']; 12 | 13 | if ~isdir(resPath) 14 | mkdir(resPath); 15 | end 16 | 17 | for idSeq = 1:length(dataSet) 18 | if ~isdir([path dataSet(idSeq).name]) || strcmp(dataSet(idSeq).name,'.') || strcmp(dataSet(idSeq).name,'..') 19 | continue; 20 | end 21 | load([path dataSet(idSeq).name '/position.mat']); 22 | results=cell(1); 23 | results{1}.res = position(:,1:end)'; 24 | results{1}.type = 'ivtAff'; 25 | results{1}.tmplsize = [64,64]; 26 | results{1}.startFame = 1; 27 | results{1}.annoBegin = 1; 28 | results{1}.len = length(results{1}.res); 29 | results{1}.property = 'geom'; 30 | 31 | 32 | if strcmp(dataSet(idSeq).name, 'Tiger1') 33 | results{1}.startFame = 6; 34 | results{1}.res = results{1}.res(6:end,:); 35 | results{1}.len = length(results{1}.res); 36 | end 37 | % save([resPath lower(dataSet(idSeq).name) '_select_cnn' num2str(num) '-7.mat'], 'results') 38 | save([resPath lower(dataSet(idSeq).name) '_fcn7_restore_' num2str(num) '.mat'], 'results') 39 | 40 | end -------------------------------------------------------------------------------- /util/crop_bg.m: -------------------------------------------------------------------------------- 1 | function I = crop_bg(im, GT, mean_pix) 2 | [im_h, im_w, ~] = size(im); 3 | win_w = GT(3); 4 | win_h = GT(4); 5 | win_lt_x = max(GT(1), 1); 6 | win_lt_x = min(im_w, win_lt_x); 7 | win_lt_y = max(GT(2), 1); 8 | win_lt_y = min(im_h, win_lt_y); 9 | 10 | win_rb_x = max(win_lt_x+win_w-1, 1); 11 | win_rb_x = min(im_w, win_rb_x); 12 | win_rb_y = max(win_lt_y+win_h-1, 1); 13 | win_rb_y = min(im_h, win_rb_y); 14 | 15 | I = zeros(im_h, im_w, 3); 16 | I(:,:,1) = mean_pix(3); 17 | I(:,:,2) = mean_pix(2); 18 | I(:,:,3) = mean_pix(1); 19 | I(win_lt_y:win_rb_y, win_lt_x:win_rb_x, :) = im(win_lt_y:win_rb_y, win_lt_x:win_rb_x, :); 20 | end -------------------------------------------------------------------------------- /util/ext_roi.m: -------------------------------------------------------------------------------- 1 | function [roi, roi_pos, preim, pad] = ext_roi(im, GT, l_off, roi_size, r_w_scale) 2 | [h, w, ~] = size(im); 3 | win_w = GT(3); 4 | win_h = GT(4); 5 | win_lt_x = GT(1); 6 | win_lt_y = GT(2); 7 | win_cx = round(win_lt_x+win_w/2+l_off(1)); 8 | win_cy = round(win_lt_y+win_h/2+l_off(2)); 9 | roi_w = r_w_scale(1)*win_w; 10 | roi_h = r_w_scale(2)*win_h; 11 | x1 = win_cx-round(roi_w/2); 12 | y1 = win_cy-round(roi_h/2); 13 | x2 = win_cx+round(roi_w/2); 14 | y2 = win_cy+round(roi_h/2); 15 | 16 | im = double(im); 17 | clip = min([x1,y1,h-y2, w-x2]); 18 | pad = 0; 19 | if clip<=0 20 | pad = abs(clip)+1; 21 | im = padarray(im, [pad, pad]); 22 | x1 = x1+pad; 23 | x2 = x2+pad; 24 | y1 = y1+pad; 25 | y2 = y2+pad; 26 | end 27 | roi = imresize(im(y1:y2, x1:x2, :), [roi_size(1), roi_size(2)]); 28 | preim = zeros(size(im,1), size(im,2)); % new padded image size 29 | roi_pos = [x1, y1, x2-x1+1, y2-y1+1];% location in the padded image 30 | % marginl = floor((roi_warp_size-roi_size)/2); 31 | % marginr = roi_warp_size-roi_size-marginl; 32 | 33 | % roi = roi(marginl+1:end-marginr, marginl+1:end-marginr, :); 34 | % roi = imresize(roi, [roi_size, roi_size]); 35 | end -------------------------------------------------------------------------------- /util/feature_norm.m: -------------------------------------------------------------------------------- 1 | function feature_map_norm = feature_norm(x) 2 | feature_map_norm = bsxfun(@times, x, ... 3 | sqrt((size(x,1)*size(x,2))^1* size(x,3)^1 ./ ... 4 | (sum(reshape(x, [], 1, 1, size(x,4)).^2, 1) + eps))); -------------------------------------------------------------------------------- /util/gaussian_test.m: -------------------------------------------------------------------------------- 1 | clear; 2 | close all; 3 | sz= 40; 4 | [rs] = 0:0.01:sz;%ndgrid((1:sz) - floor(sz/2)); 5 | sigma = 0.001; 6 | labels = gaussmf(rs, [0.5 5]); 7 | %labels = exp(-0.5 / sigma^2 * (rs.^2 )); 8 | figure,plot(rs,labels,'b'); 9 | labels1 = exp(labels); 10 | hold on,plot(rs,labels1.*labels,'r'); -------------------------------------------------------------------------------- /util/generate_data_list.m: -------------------------------------------------------------------------------- 1 | data_list = cell(4,2); 2 | num = zeros(4, 2); 3 | for i = iter_num : -1 : 1 4 | data = load(['data/' num2str(i) '.mat']); 5 | l_id = double(data.reward_t>0.5) + 1; 6 | data_list{data.action_id_t, l_id} = [data_list{data.action_id_t, l_id} i]; 7 | num(data.action_id_t, l_id) = num(data.action_id_t, l_id)+1; 8 | if mod(i,100)==0 fprintf('i = %d\n',i); end 9 | if min(num(:)) > buffer_sz 10 | break; 11 | end 12 | 13 | end -------------------------------------------------------------------------------- /util/get_crops.m: -------------------------------------------------------------------------------- 1 | function [im_crop_z, padded_im, roi_pos, left_pad_z, top_pad_z, right_pad_z, bottom_pad_z] = get_crops(im, object, context_amount) 2 | % ------------------------------------------------------------------------------------------------------------------- 3 | %% Get exemplar sample 4 | % take bbox with context for the exemplar 5 | 6 | bbox = object; 7 | [cx, cy, w, h] = deal(bbox(1)+bbox(3)/2, bbox(2)+bbox(4)/2, bbox(3), bbox(4)); 8 | wc_z = w + context_amount(1)*(w); 9 | hc_z = h + context_amount(2)*(h); 10 | [im_crop_z, padded_im,roi_pos,left_pad_z, top_pad_z, right_pad_z, bottom_pad_z] = get_subwindow_avg(im, [cy cx], ([hc_z wc_z])); %pos win 11 | % [im_crop_z, padded_im,roi_pos,left_pad_z, top_pad_z, right_pad_z, bottom_pad_z] =... 12 | % get_subwindow_avg1(im, [bbox(1) bbox(2) w h], [0 0], context_amount+1); %pos win 13 | 14 | 15 | end -------------------------------------------------------------------------------- /util/get_scale_im.m: -------------------------------------------------------------------------------- 1 | function scale_sample = get_scale_im(im, location, roi_sz, scaleFactors, context) 2 | 3 | num = length(scaleFactors); 4 | fea_sz = roi_sz; 5 | scale_sample = single(zeros(fea_sz(1), fea_sz(2), fea_sz(3), num)); 6 | for scale_id = 1:length(scaleFactors) 7 | %location([3,4]) = location([3,4])*scaleFactors(scale_id); 8 | re_sz = 2 * floor(fea_sz(1:2) / 2 * scaleFactors(scale_id)); 9 | roi1 = get_crops(im, location, context*scaleFactors(scale_id)); 10 | fea_resized = imresize(roi1, re_sz); 11 | xs_start = max((re_sz(2) - fea_sz(2))/2, 0) + 1; 12 | ys_start = max((re_sz(1) - fea_sz(1))/2, 0) + 1; 13 | xs_end = re_sz(2) - xs_start + 1; 14 | ys_end = re_sz(1) - ys_start + 1; 15 | 16 | xt_start = max((fea_sz(2) - re_sz(2))/2, 0) + 1; 17 | yt_start = max((fea_sz(1) - re_sz(1))/2, 0) + 1; 18 | xt_end = fea_sz(2) - xt_start + 1; 19 | yt_end = fea_sz(1) - yt_start + 1; 20 | 21 | scale_sample(yt_start:yt_end, xt_start:xt_end ,:, scale_id) = fea_resized(ys_start:ys_end, xs_start:xs_end, :); 22 | end -------------------------------------------------------------------------------- /util/get_scale_im1.m: -------------------------------------------------------------------------------- 1 | function scale_sample = get_scale_im1(im, location, roi_sz, scaleFactors, context) 2 | 3 | num = length(scaleFactors); 4 | fea_sz = roi_sz; 5 | scale_sample = single(zeros(fea_sz(1), fea_sz(2), fea_sz(3), num,num)); 6 | for scale_id = 1:length(scaleFactors) 7 | for scale_id1 = 1:length(scaleFactors) 8 | %location([3,4]) = location([3,4])*scaleFactors(scale_id); 9 | re_sz = 2 * floor([fea_sz(1) / 2 * scaleFactors(scale_id) fea_sz(2) / 2 * scaleFactors(scale_id1)]); 10 | roi1 = get_crops(im, location, context.*[scaleFactors(scale_id) scaleFactors(scale_id1)]); 11 | fea_resized = imresize(roi1, re_sz); 12 | xs_start = max((re_sz(2) - fea_sz(2))/2, 0) + 1; 13 | ys_start = max((re_sz(1) - fea_sz(1))/2, 0) + 1; 14 | xs_end = re_sz(2) - xs_start + 1; 15 | ys_end = re_sz(1) - ys_start + 1; 16 | 17 | xt_start = max((fea_sz(2) - re_sz(2))/2, 0) + 1; 18 | yt_start = max((fea_sz(1) - re_sz(1))/2, 0) + 1; 19 | xt_end = fea_sz(2) - xt_start + 1; 20 | yt_end = fea_sz(1) - yt_start + 1; 21 | 22 | scale_sample(yt_start:yt_end, xt_start:xt_end ,:,scale_id, scale_id1) = fea_resized(ys_start:ys_end, xs_start:xs_end, :); 23 | end 24 | end -------------------------------------------------------------------------------- /util/get_scale_sample.m: -------------------------------------------------------------------------------- 1 | function scale_sample = get_scale_sample(fea, scaleFactors, tmp_target_sz) 2 | num = length(scaleFactors); 3 | num1 = 31; 4 | scale_window_train = single(hann(num1)); 5 | scale_window = scale_window_train((num1 - num)/2 + 1: (num1 + num)/2); 6 | fea_sz = size(fea); 7 | scale_sample = single(zeros(fea_sz(1), fea_sz(2), fea_sz(3), num)); 8 | for scale_id = 1:length(scaleFactors) 9 | re_sz = 2 * floor(fea_sz(1:2) / 2 * scaleFactors(scale_id)); 10 | fea_resized = imresize(fea, re_sz); 11 | xs_start = max((re_sz(2) - fea_sz(2))/2, 0) + 1; 12 | ys_start = max((re_sz(1) - fea_sz(1))/2, 0) + 1; 13 | xs_end = re_sz(2) - xs_start + 1; 14 | ys_end = re_sz(1) - ys_start + 1; 15 | 16 | xt_start = max((fea_sz(2) - re_sz(2))/2, 0) + 1; 17 | yt_start = max((fea_sz(1) - re_sz(1))/2, 0) + 1; 18 | xt_end = fea_sz(2) - xt_start + 1; 19 | yt_end = fea_sz(1) - yt_start + 1; 20 | 21 | scale_sample(yt_start:yt_end, xt_start:xt_end ,:, scale_id) = fea_resized(ys_start:ys_end, xs_start:xs_end, :)* scale_window(scale_id) ; 22 | end -------------------------------------------------------------------------------- /util/get_scale_sample1.m: -------------------------------------------------------------------------------- 1 | function scale_sample = get_scale_sample1(fea, scaleFactors, scale_window) 2 | fea_sz = size(fea); 3 | scale_sample = single(zeros(fea_sz(1), fea_sz(2), fea_sz(3), length(scaleFactors), length(scaleFactors))); 4 | for scale_id = 1:length(scaleFactors) 5 | for scale_id1 = 1:length(scaleFactors) 6 | re_sz = 2 * floor([fea_sz(1) / 2 * scaleFactors(scale_id) fea_sz(2) / 2 * scaleFactors(scale_id1)]); 7 | fea_resized = imresize(fea, re_sz); 8 | xs_start = max((re_sz(2) - fea_sz(2))/2, 0) + 1; 9 | ys_start = max((re_sz(1) - fea_sz(1))/2, 0) + 1; 10 | xs_end = re_sz(2) - xs_start + 1; 11 | ys_end = re_sz(1) - ys_start + 1; 12 | 13 | xt_start = max((fea_sz(2) - re_sz(2))/2, 0) + 1; 14 | yt_start = max((fea_sz(1) - re_sz(1))/2, 0) + 1; 15 | xt_end = fea_sz(2) - xt_start + 1; 16 | yt_end = fea_sz(1) - yt_start + 1; 17 | 18 | scale_sample(yt_start:yt_end, xt_start:xt_end ,:, scale_id,scale_id1) = fea_resized(ys_start:ys_end, xs_start:xs_end, :) * scale_window(scale_id); 19 | end 20 | end -------------------------------------------------------------------------------- /util/get_scale_sample3.m: -------------------------------------------------------------------------------- 1 | function scale_sample = get_scale_sample3(fea, scaleFactors, target_sz, context) 2 | num = length(scaleFactors); 3 | num1 = 31; 4 | scale_window_train = single(hann(num1)); 5 | scale_window = scale_window_train((num1 - num)/2 + 1: (num1 + num)/2); 6 | fea_sz = size(fea); 7 | scale_sample = single(zeros(fea_sz(1), fea_sz(2), fea_sz(3), num)); 8 | if ~context== [2, 1.5] 9 | fea(1:(floor(fea_sz(1)./2 - 1.5*target_sz(1))-1),1:(floor(fea_sz(2)./2 - 1.5*target_sz(2))-1),:)=0; 10 | fea((floor(fea_sz(1)./2 + 1.5*target_sz(1))-1:end),(floor(fea_sz(2)./2 + 1.5*target_sz(2))-1 : end),:)=0; 11 | end; 12 | for scale_id = 1:length(scaleFactors) 13 | re_sz = 2 * floor(fea_sz(1:2) / 2 * scaleFactors(scale_id)); 14 | fea_resized = imresize(fea, re_sz); 15 | xs_start = max((re_sz(2) - fea_sz(2))/2, 0) + 1; 16 | ys_start = max((re_sz(1) - fea_sz(1))/2, 0) + 1; 17 | xs_end = re_sz(2) - xs_start + 1; 18 | ys_end = re_sz(1) - ys_start + 1; 19 | 20 | xt_start = max((fea_sz(2) - re_sz(2))/2, 0) + 1; 21 | yt_start = max((fea_sz(1) - re_sz(1))/2, 0) + 1; 22 | xt_end = fea_sz(2) - xt_start + 1; 23 | yt_end = fea_sz(1) - yt_start + 1; 24 | 25 | scale_sample(yt_start:yt_end, xt_start:xt_end ,:, scale_id) = fea_resized(ys_start:ys_end, xs_start:xs_end, :)* scale_window(scale_id) ; 26 | end -------------------------------------------------------------------------------- /util/get_subwindow.m: -------------------------------------------------------------------------------- 1 | function out = get_subwindow(im, pos, sz) 2 | %GET_SUBWINDOW Obtain sub-window from image, with replication-padding. 3 | % Returns sub-window of image IM centered at POS ([y, x] coordinates), 4 | % with size SZ ([height, width]). If any pixels are outside of the image, 5 | % they will replicate the values at the borders. 6 | % 7 | % Joao F. Henriques, 2014 8 | % http://www.isr.uc.pt/~henriques/ 9 | 10 | if isscalar(sz), %square sub-window 11 | sz = [sz, sz]; 12 | end 13 | 14 | 15 | xs = floor(pos(2)) + (1:sz(2)) - floor(sz(2)/2); 16 | ys = floor(pos(1)) + (1:sz(1)) - floor(sz(1)/2); 17 | 18 | %check for out-of-bounds coordinates, and set them to the values at 19 | %the borders 20 | xs(xs < 1) = 1; 21 | ys(ys < 1) = 1; 22 | xs(xs > size(im,2)) = size(im,2); 23 | ys(ys > size(im,1)) = size(im,1); 24 | 25 | %extract image 26 | out=im(ys,xs,:); 27 | 28 | end -------------------------------------------------------------------------------- /util/impreprocess.m: -------------------------------------------------------------------------------- 1 | function I = impreprocess(im) 2 | 3 | mean_pix = [103.939, 116.779, 123.68]; % BGR 4 | im = permute(im, [2,1,3]); 5 | im = im(:,:,3:-1:1); 6 | I(:,:,1) = im(:,:,1)-mean_pix(1); % substract mean 7 | I(:,:,2) = im(:,:,2)-mean_pix(2); 8 | I(:,:,3) = im(:,:,3)-mean_pix(3); 9 | %I = I*0.017; 10 | end 11 | -------------------------------------------------------------------------------- /util/impreprocess1.m: -------------------------------------------------------------------------------- 1 | function I = impreprocess1(im,ratio) 2 | im = imresize(im,floor([ratio*size(im,1) ratio*size(im,2)])); 3 | mean_pix = [103.939, 116.779, 123.68]; % BGR 4 | im = permute(im, [2,1,3]); 5 | im = im(:,:,3:-1:1); 6 | I(:,:,1) = im(:,:,1)-mean_pix(1); % substract mean 7 | I(:,:,2) = im(:,:,2)-mean_pix(2); 8 | I(:,:,3) = im(:,:,3)-mean_pix(3); 9 | %I = I*0.017; 10 | end 11 | -------------------------------------------------------------------------------- /util/init_scale_estimator.m: -------------------------------------------------------------------------------- 1 | function[scale_param] = init_scale_estimator 2 | scale_param.scale_thr = 0.05; 3 | scale_param.scale_sigma_factor = 1/4; 4 | scale_param.number_of_scales_test = 9; 5 | scale_param.number_of_scales_train = 33; 6 | scale_param.scale_step = 1.02; 7 | scale_param.scale_sigma = sqrt(scale_param.number_of_scales_train) * scale_param.scale_sigma_factor; 8 | ss = (1:scale_param.number_of_scales_train) - ceil(scale_param.number_of_scales_train/2); 9 | ys = exp(-0.5 * (ss.^2) / scale_param.scale_sigma^2); 10 | scale_param.y = single(ys); 11 | scale_param.scale_window_train = single(hann(scale_param.number_of_scales_train)); 12 | scale_param.scale_window_test = scale_param.scale_window_train((scale_param.number_of_scales_train - scale_param.number_of_scales_test)/2 + 1: (scale_param.number_of_scales_train + scale_param.number_of_scales_test)/2); 13 | ss = 1:scale_param.number_of_scales_train; 14 | scale_param.scaleFactors_train = scale_param.scale_step.^(ceil(scale_param.number_of_scales_train/2) - ss); 15 | ss = 1:scale_param.number_of_scales_test; 16 | scale_param.scaleFactors_test = scale_param.scale_step.^(ceil(scale_param.number_of_scales_test/2) - ss); 17 | end 18 | 19 | -------------------------------------------------------------------------------- /util/loss_object_grad.m: -------------------------------------------------------------------------------- 1 | function [loss, delta] = loss_object_grad(pred, label) 2 | %% return loss and gradient 3 | 4 | 5 | label_exp = exp(1.6*label); 6 | labels = label_exp.*label; 7 | labels = labels./max(labels(:)); 8 | diff = abs(pred - label); 9 | a = 10; 10 | c = 0.2; 11 | a1 =labels.^2; 12 | a2 = diff.^2; 13 | a3 = 1.0./(1+exp(a.*(c-abs(diff)))); 14 | loss =a1.*a2.*a3; 15 | delta = -labels.^2.*(2.*diff./(exp(a.*(c-diff))+1)+ ... 16 | a.*diff.^2.*exp(a.*(c-diff))./((exp(a.*(c-diff))+1).^2)); 17 | %delta(delta>0)=0; 18 | %% 19 | 20 | 21 | -------------------------------------------------------------------------------- /util/loss_object_grad_focal.m: -------------------------------------------------------------------------------- 1 | function [loss, delta] = loss_object_grad_focal(pred, label) 2 | 3 | %% 4 | % loss1 = abs(pred - label); %pred_label (0.9-1.1-> 0-1) 5 | % loss2 = pred-label; %(-1,0) 6 | % loss = -loss1.^2.*log10((1-(label-pred))); 7 | % %loss(ind)=0; 8 | % 9 | % %loss2(ind) = 0; 10 | % delta = log10((pred-label)).*(-loss2)-(loss1.^2).*(log(10)./(1+pred-label) ); %for logistic loss 11 | %% 12 | % soft_max1 = (6./(1+exp(7.*(0.9-label)))+1); 13 | % label_exp = soft_max1/(min(soft_max1(:))); 14 | % labels = label_exp.*label; 15 | % labels = labels./max(labels(:)); 16 | % diff = abs(pred - label); 17 | % exp_tmp = exp(label); 18 | % a = 6; 19 | % factor_exp = exp_tmp;%./max(exp_tmp(:)); 20 | % loss = factor_exp.*diff.^2/(1+exp(a.*(0.8-diff))); 21 | % delta = -factor_exp.*(2.*diff./(exp(a.*(0.8-diff))+1)+ ... 22 | % a.*diff.^2.*exp(a.*(0.8-diff))./((exp(a.*(0.8-diff))+1).^2)); 23 | % %loss(ind)=0; 24 | % loss2 = pred-label; 25 | % %loss2(ind) = 0; 26 | % % 27 | %% 28 | label_exp = exp(1.6*label); 29 | labels = label_exp.*label; 30 | labels = labels./max(labels(:)); 31 | loss1 = abs(pred - label); 32 | ind = find(loss1<0.1); 33 | loss = loss1.^3.*labels.^2; 34 | %loss(ind)=0; 35 | loss2 = pred-label; 36 | %loss2(ind) = 0; 37 | delta = -labels.^2.*(loss2.^2); %for logistic lossdelta = labels.^2.*(loss2); %for logistic loss -------------------------------------------------------------------------------- /util/myGetMap1.m: -------------------------------------------------------------------------------- 1 | function map = myGetMap1(im_sz, fea_sz, roi_sz, location, l_off, output_sigma_factor) 2 | 3 | sz = roi_sz([2,1]); 4 | [rs, cs] = ndgrid((0.5:sz(1)-0.5) - (sz(1)/2), (0.5:sz(2)-0.5) - (sz(2)/2)); 5 | %output_sigma = sqrt(prod(location([3,4]))) * output_sigma_factor; 6 | sigma = output_sigma_factor; 7 | map = exp(-0.5 / sigma^2 * (rs.^2 +cs.^2)); 8 | %%map = map(sz(1)/2-fea_sz(1)/2: sz(1)/2+fea_sz(1)/2, sz(2)/2-fea_sz(2)/2: sz(2)/2+fea_sz(2)/2,: ); 9 | %map1 = zeros(roi_sz([2,1])); 10 | %map1(roi_sz(2)/2-roi_sz(1)/2: roi_sz(2)/2-sz(1)/2+sz(1)-1,roi_sz(1)/2-sz(2)/2: roi_sz(1)/2-sz(2)/2+sz(2)-1) = map; 11 | map = imresize(map(:,:,1), [fea_sz(1), fea_sz(2)],'bilinear'); 12 | map = (map - min(map(:))) / (max(map(:)) - min(map(:)) + eps); 13 | %mask = exp(-0.5 * (((rs.^2 + cs.^2) / output_sigma^2))); 14 | -------------------------------------------------------------------------------- /util/my_data_augmentation.m: -------------------------------------------------------------------------------- 1 | function imgs = my_data_augmentation(img) 2 | %shifting_value = [100, 0, -100, 0; 0, 100, 0, -100]; 3 | %center_img = center_crop(original_img, input_width, input_height, shifting_value(1,i), shifting_value(2,i)); 4 | %img = imread('/home/ying/1.jpg'); 5 | %rotation_angle = [-45, -30,-20,-10,10,20,30,45]; 6 | imgs=cell(1,1);%6+flip+gaussian 7 | idx =0; 8 | %for i =1:size(rotation_angle,2) 9 | % idx = idx+1; 10 | % img1 = imrotate(img,rotation_angle(i),'bicubic','crop'); 11 | % imgs{idx,1} = img1; 12 | %figure,imshow(mat2gray(img1)) 13 | %end 14 | 15 | g_sigma =[10,5,1,0.1]; 16 | 17 | for i =1:size(g_sigma,2) 18 | idx = idx+1; 19 | w = fspecial('gaussian',[5 5],g_sigma(i)); 20 | im2 = imfilter(img,w); 21 | imgs{idx,1}=im2; 22 | %figure,imshow(mat2gray(im2)) 23 | end 24 | 25 | im3 = fliplr(img); 26 | for i =1:3 27 | idx = idx+1; 28 | imgs{idx,1} = im3; 29 | end 30 | %figure,imshow(mat2gray(im3)) 31 | -------------------------------------------------------------------------------- /util/mynewGetMap.m: -------------------------------------------------------------------------------- 1 | function map = mynewGetMap(fea_sz, res_sz, l_off, output_sigma_factor) 2 | % sz = fea_sz([2,1]); 3 | % [rs, cs] = ndgrid((0.5:sz(1)-0.5) - (sz(1)/2), (0.5:sz(2)-0.5) - (sz(2)/2)); 4 | % %output_sigma = sqrt(prod(location([3,4]))) * output_sigma_factor; 5 | % sigma = [15 6]*0.6;%output_sigma_factor([2,1]); 6 | % map = exp(-0.5 / sigma(1)^2 * (rs.^2 )).*exp(-0.5 / sigma(2)^2 * (cs.^2 )); 7 | % %%map = map(sz(1)/2-fea_sz(1)/2: sz(1)/2+fea_sz(1)/2, sz(2)/2-fea_sz(2)/2: sz(2)/2+fea_sz(2)/2,: ); 8 | % %map1 = zeros(roi_sz([2,1])); 9 | % %map1(roi_sz(2)/2-roi_sz(1)/2: roi_sz(2)/2-sz(1)/2+sz(1)-1,roi_sz(1)/2-sz(2)/2: roi_sz(1)/2-sz(2)/2+sz(2)-1) = map; 10 | % map = imresize(map(:,:,1), [fea_sz(1), fea_sz(2)],'bilinear'); 11 | % map = (map - min(map(:))) / (max(map(:)) - min(map(:)) + eps); 12 | 13 | 14 | sz = fea_sz([2,1]); 15 | [rs, cs] = ndgrid((0.5:sz(1)-0.5) - (sz(1)/2), (0.5:sz(2)-0.5) - (sz(2)/2)); 16 | %output_sigma = sqrt(prod(location([3,4]))) * output_sigma_factor; 17 | sigma = output_sigma_factor([2,1]); 18 | map = exp(-0.5 *( (rs.^2 +cs.^2)./ sigma(1)^2)); %+ (cs.^2 )./ sigma(2)^2) ); 19 | %%map = map(sz(1)/2-fea_sz(1)/2: sz(1)/2+fea_sz(1)/2, sz(2)/2-fea_sz(2)/2: sz(2)/2+fea_sz(2)/2,: ); 20 | %map1 = zeros(roi_sz([2,1])); 21 | %map1(roi_sz(2)/2-roi_sz(1)/2: roi_sz(2)/2-sz(1)/2+sz(1)-1,roi_sz(1)/2-sz(2)/2: roi_sz(1)/2-sz(2)/2+sz(2)-1) = map; 22 | map = imresize(map(:,:,1), [res_sz(1), res_sz(2)],'bilinear'); 23 | map = (map - min(map(:))) / (max(map(:)) - min(map(:)) + eps); 24 | 25 | -------------------------------------------------------------------------------- /util/mypca.m: -------------------------------------------------------------------------------- 1 | function [newfea_map, eigenVectors, scores] = mypca(fea_map, num) 2 | 3 | [a,b,c] = size(fea_map); 4 | fea_map = reshape(fea_map,[a*b,c]); %num*dim 5 | [eigenVectors, scores, eigenValues] = pca(fea_map'); %,'econ' 6 | [ Yk, X, avsq ] = pcaApply( fea_map', eigenVectors, scores, num ); 7 | %fea_map = scores(:,1:num); 8 | newfea_map = reshape(Yk',[a,b,num]); 9 | 10 | -------------------------------------------------------------------------------- /util/plot_focal_loss.m: -------------------------------------------------------------------------------- 1 | close all; 2 | %for cross entropy loss 3 | x = 0.001:0.001:1; 4 | y = -log(x); 5 | figure(1),plot(x,y,'b-'); 6 | y1 = - (1-x).^(0.5).*log(x); 7 | y2 = -(1-x).*log(x); 8 | y3 = -(1-x).^2.*log(x); 9 | hold on, 10 | plot(x,y1,'y-'); 11 | plot(x,y2,'g-'); 12 | plot(x,y3,'k-'); 13 | %for L2 loss 14 | x1 = zeros(size(x)); 15 | th = 0.1 16 | for i =1:size(x,2) 17 | if x(1,i)>=th 18 | x1(1,i)=x(1,i); 19 | end 20 | end 21 | figure(2), 22 | plot(x,x.^2,'r-') 23 | hold on, 24 | plot(x,x1.^2,'b-') 25 | %y4 = 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /util/revise_bilinear_layer.m: -------------------------------------------------------------------------------- 1 | function revise_bilinear_layer( prototext_file, fea_sz, tmp_target_sz ) 2 | fidin1 = fopen(prototext_file);%fopen('fea_net1.prototxt'); % 3 | fidout1 = fopen('vgg_bilinear_layer1.prototxt','w'); %%fopen([video '_layer.prototxt'],'w'); % 4 | i=0; 5 | im_w = fea_sz(1); 6 | im_h = fea_sz(2); 7 | kernel_h = tmp_target_sz(1); 8 | kernel_w = tmp_target_sz(2); 9 | while ~feof(fidin1) % 10 | i=i+1; 11 | tline=fgetl(fidin1); % 12 | if i==7 13 | len = length(num2str(im_w)); 14 | tline(12:12+len-1) = num2str(im_w); 15 | fprintf(fidout1,'%s\n',tline); % 16 | elseif i==8 17 | len = length(num2str(im_h)); 18 | tline(12:12+len-1) = num2str(im_h); 19 | fprintf(fidout1,'%s\n',tline); % 20 | elseif i==10 21 | len = length(num2str(kernel_h)); 22 | tline(11:11+len-1) = num2str(kernel_h); 23 | fprintf(fidout1,'%s\n',tline); % 24 | elseif i==11 25 | len = length(num2str(kernel_w)); 26 | tline(11:11+len-1) = num2str(kernel_w); 27 | fprintf(fidout1,'%s\n',tline); % 28 | else 29 | fprintf(fidout1,'%s\n',tline); % 30 | end 31 | end 32 | fclose(fidout1); -------------------------------------------------------------------------------- /util/revise_fea_prototxt.m: -------------------------------------------------------------------------------- 1 | function revise_fea_prototxt(model_file,win_sz) 2 | 3 | fid = fopen(model_file, 'w'); 4 | 5 | fprintf(fid, '#--------input---------\n'); 6 | fprintf(fid, 'name: "%s"\n', input.name); 7 | fprintf(fid, 'inputs: "%s"\n', input.inputs); 8 | fprintf(fid, 'input_dim: %d\n', input.dim(1)); 9 | fprintf(fid, 'input_dim: %d\n', input.dim(2)); 10 | fprintf(fid, 'input_dim: %d\n', input.dim(3)); 11 | fprintf(fid, 'input_dim: %d\n', input.dim(4)); 12 | 13 | for i = 1 : length(layers) 14 | 15 | end 16 | 17 | fclose(fid); -------------------------------------------------------------------------------- /util/revise_layer2.m: -------------------------------------------------------------------------------- 1 | function revise_layer2(video, prototext_file, fea_sz1, red_dim, tmp_target_sz ) 2 | 3 | fidin1 = fopen(prototext_file);%fopen('fea_net1.prototxt'); % 4 | fidout1 = fopen('res_reg_layer12.prototxt','w'); %%fopen([video '_layer.prototxt'],'w'); % 5 | i=0; 6 | im_w = fea_sz1(1); 7 | im_h = fea_sz1(2); 8 | kernel_h = tmp_target_sz(1); 9 | kernel_w = tmp_target_sz(2); 10 | while ~feof(fidin1) % 11 | i=i+1; 12 | tline=fgetl(fidin1); % 13 | if i==7 14 | len = length(num2str(im_w)); 15 | tline(12:12+len-1) = num2str(im_w); 16 | fprintf(fidout1,'%s\n',tline); % 17 | elseif i==6 % || i==13 18 | len = length(num2str(red_dim)); 19 | tline(12:12+len-1) = num2str(red_dim); 20 | fprintf(fidout1,'%s\n',tline); % 21 | elseif i==8 22 | len = length(num2str(im_h)); 23 | tline(12:12+len-1) = num2str(im_h); 24 | fprintf(fidout1,'%s\n',tline); % 25 | elseif i==12 26 | len = length(num2str(kernel_h)); 27 | tline(11:11+len-1) = num2str(kernel_h); 28 | fprintf(fidout1,'%s\n',tline); % 29 | elseif i==13 30 | len = length(num2str(kernel_w)); 31 | tline(11:11+len-1) = num2str(kernel_w); 32 | fprintf(fidout1,'%s\n',tline); % 33 | else 34 | fprintf(fidout1,'%s\n',tline); % 35 | end 36 | end 37 | fclose(fidout1); -------------------------------------------------------------------------------- /util/revise_prototxt1.m: -------------------------------------------------------------------------------- 1 | function revise_prototxt1(video, prototext_file, im_size ) 2 | fidin = fopen(prototext_file); % fopen(prototext_file);% 3 | fidout = fopen([video '_fea_net.prototxt'],'w'); % 4 | i=0; 5 | im_w = im_size(1); 6 | im_h = im_size(2); 7 | while ~feof(fidin) % 8 | i=i+1; 9 | tline=fgetl(fidin); % 10 | if i==7 11 | len = length(num2str(im_w)); 12 | tline(12:12+len-1) = num2str(im_w); 13 | fprintf(fidout,'%s',tline); % 14 | end 15 | if i==8 16 | len = length(num2str(im_h)); 17 | tline(12:12+len-1) = num2str(im_h); 18 | fprintf(fidout,'%s\n',tline); % 19 | else 20 | fprintf(fidout,'%s\n',tline); % 21 | end 22 | end 23 | fclose(fidout); -------------------------------------------------------------------------------- /util/revise_prototxt2.m: -------------------------------------------------------------------------------- 1 | function revise_prototxt2(video, prototext_file, fea_sz, tmp_target_sz, red_dim ) 2 | fidin1 = fopen(prototext_file);%fopen('fea_net1.prototxt'); % 3 | fidout1 = fopen('vgg_layer1.prototxt','w'); %%fopen([video '_layer.prototxt'],'w'); % 4 | i=0; 5 | im_w = fea_sz(1); 6 | im_h = fea_sz(2); 7 | kernel_h = tmp_target_sz(1); 8 | kernel_w = tmp_target_sz(2); 9 | while ~feof(fidin1) % 10 | i=i+1; 11 | tline=fgetl(fidin1); % 12 | if i==7 13 | len = length(num2str(im_w)); 14 | tline(12:12+len-1) = num2str(im_w); 15 | fprintf(fidout1,'%s\n',tline); % 16 | elseif i==6 % || i==13 17 | len = length(num2str(red_dim)); 18 | tline(12:12+len-1) = num2str(red_dim); 19 | fprintf(fidout1,'%s\n',tline); % 20 | elseif i==8 21 | len = length(num2str(im_h)); 22 | tline(12:12+len-1) = num2str(im_h); 23 | fprintf(fidout1,'%s\n',tline); % 24 | elseif i==12 25 | len = length(num2str(kernel_h)); 26 | tline(11:11+len-1) = num2str(kernel_h); 27 | fprintf(fidout1,'%s\n',tline); % 28 | elseif i==13 29 | len = length(num2str(kernel_w)); 30 | tline(11:11+len-1) = num2str(kernel_w); 31 | fprintf(fidout1,'%s\n',tline); % 32 | else 33 | fprintf(fidout1,'%s\n',tline); % 34 | end 35 | end 36 | fclose(fidout1); --------------------------------------------------------------------------------