├── 000791_BBoxCandiates.jpg ├── README.md ├── caffe_densebox ├── CMakeLists.txt ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config ├── Makefile.config.example ├── README.md ├── caffe.cloc ├── cmake │ ├── ConfigGen.cmake │ ├── Cuda.cmake │ ├── Dependencies.cmake │ ├── External │ │ ├── gflags.cmake │ │ └── glog.cmake │ ├── Misc.cmake │ ├── Modules │ │ ├── FindAtlas.cmake │ │ ├── FindGFlags.cmake │ │ ├── FindGlog.cmake │ │ ├── FindLAPACK.cmake │ │ ├── FindLMDB.cmake │ │ ├── FindLevelDB.cmake │ │ ├── FindMKL.cmake │ │ ├── FindMatlabMex.cmake │ │ ├── FindNumPy.cmake │ │ ├── FindOpenBLAS.cmake │ │ ├── FindSnappy.cmake │ │ └── FindvecLib.cmake │ ├── ProtoBuf.cmake │ ├── Summary.cmake │ ├── Targets.cmake │ ├── Templates │ │ ├── CaffeConfig.cmake.in │ │ ├── CaffeConfigVersion.cmake.in │ │ └── caffe_config.h.in │ ├── Utils.cmake │ └── lint.cmake ├── data │ ├── cifar10 │ │ └── get_cifar10.sh │ ├── ilsvrc12 │ │ └── get_ilsvrc_aux.sh │ └── mnist │ │ └── get_mnist.sh ├── docker │ ├── Makefile │ ├── README.md │ ├── standalone │ │ ├── cpu │ │ │ └── Dockerfile │ │ └── gpu │ │ │ └── Dockerfile │ └── templates │ │ └── Dockerfile.template ├── docs │ ├── CMakeLists.txt │ ├── CNAME │ ├── README.md │ ├── _config.yml │ ├── _layouts │ │ └── default.html │ ├── development.md │ ├── images │ │ ├── GitHub-Mark-64px.png │ │ └── caffeine-icon.png │ ├── index.md │ ├── install_apt.md │ ├── install_osx.md │ ├── install_yum.md │ ├── installation.md │ ├── model_zoo.md │ ├── multigpu.md │ ├── performance_hardware.md │ ├── stylesheets │ │ ├── pygment_trac.css │ │ ├── reset.css │ │ └── styles.css │ └── tutorial │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig │ │ ├── .gitignore │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ ├── forward_backward.png │ │ └── layer.jpg │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md ├── examples │ ├── 00-classification.ipynb │ ├── 01-learning-lenet.ipynb │ ├── 02-fine-tuning.ipynb │ ├── CMakeLists.txt │ ├── brewing-logreg.ipynb │ ├── cifar10 │ │ ├── cifar10_full.prototxt │ │ ├── cifar10_full_sigmoid_solver.prototxt │ │ ├── cifar10_full_sigmoid_solver_bn.prototxt │ │ ├── cifar10_full_sigmoid_train_test.prototxt │ │ ├── cifar10_full_sigmoid_train_test_bn.prototxt │ │ ├── cifar10_full_solver.prototxt │ │ ├── cifar10_full_solver_lr1.prototxt │ │ ├── cifar10_full_solver_lr2.prototxt │ │ ├── cifar10_full_train_test.prototxt │ │ ├── cifar10_quick.prototxt │ │ ├── cifar10_quick_solver.prototxt │ │ ├── cifar10_quick_solver_lr1.prototxt │ │ ├── cifar10_quick_train_test.prototxt │ │ ├── convert_cifar_data.cpp │ │ ├── create_cifar10.sh │ │ ├── readme.md │ │ ├── train_full.sh │ │ ├── train_full_sigmoid.sh │ │ ├── train_full_sigmoid_bn.sh │ │ └── train_quick.sh │ ├── cpp_classification │ │ ├── classification.cpp.back │ │ └── readme.md │ ├── detection.ipynb │ ├── feature_extraction │ │ ├── imagenet_val.prototxt │ │ └── readme.md │ ├── finetune_flickr_style │ │ ├── assemble_data.py │ │ ├── readme.md │ │ └── style_names.txt │ ├── finetune_pascal_detection │ │ ├── pascal_finetune_solver.prototxt │ │ └── pascal_finetune_trainval_test.prototxt │ ├── hdf5_classification │ │ ├── nonlinear_auto_test.prototxt │ │ ├── nonlinear_auto_train.prototxt │ │ ├── nonlinear_train_val.prototxt │ │ └── train_val.prototxt │ ├── imagenet │ │ ├── create_imagenet.sh │ │ ├── make_imagenet_mean.sh │ │ ├── readme.md │ │ ├── resume_training.sh │ │ └── train_caffenet.sh │ ├── images │ │ ├── cat.jpg │ │ ├── cat_gray.jpg │ │ └── fish-bike.jpg │ ├── mnist │ │ ├── convert_mnist_data.cpp │ │ ├── create_mnist.sh │ │ ├── lenet.prototxt │ │ ├── lenet_adadelta_solver.prototxt │ │ ├── lenet_auto_solver.prototxt │ │ ├── lenet_consolidated_solver.prototxt │ │ ├── lenet_multistep_solver.prototxt │ │ ├── lenet_solver.prototxt │ │ ├── lenet_solver_adam.prototxt │ │ ├── lenet_solver_rmsprop.prototxt │ │ ├── lenet_train_test.prototxt │ │ ├── mnist_autoencoder.prototxt │ │ ├── mnist_autoencoder_solver.prototxt │ │ ├── mnist_autoencoder_solver_adadelta.prototxt │ │ ├── mnist_autoencoder_solver_adagrad.prototxt │ │ ├── mnist_autoencoder_solver_nesterov.prototxt │ │ ├── readme.md │ │ ├── train_lenet.sh │ │ ├── train_lenet_adam.sh │ │ ├── train_lenet_consolidated.sh │ │ ├── train_lenet_docker.sh │ │ ├── train_lenet_rmsprop.sh │ │ ├── train_mnist_autoencoder.sh │ │ ├── train_mnist_autoencoder_adadelta.sh │ │ ├── train_mnist_autoencoder_adagrad.sh │ │ └── train_mnist_autoencoder_nesterov.sh │ ├── net_surgery.ipynb │ ├── net_surgery │ │ ├── bvlc_caffenet_full_conv.prototxt │ │ └── conv.prototxt │ ├── pascal-multilabel-with-datalayer.ipynb │ ├── pycaffe │ │ ├── caffenet.py │ │ ├── layers │ │ │ ├── pascal_multilabel_datalayers.py │ │ │ └── pyloss.py │ │ ├── linreg.prototxt │ │ └── tools.py │ ├── siamese │ │ ├── convert_mnist_siamese_data.cpp │ │ ├── create_mnist_siamese.sh │ │ ├── mnist_siamese.ipynb │ │ ├── mnist_siamese.prototxt │ │ ├── mnist_siamese_solver.prototxt │ │ ├── mnist_siamese_train_test.prototxt │ │ ├── readme.md │ │ └── train_mnist_siamese.sh │ └── web_demo │ │ ├── app.py │ │ ├── exifutil.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ └── templates │ │ └── index.html ├── include │ ├── boost │ │ ├── threadpool.hpp │ │ └── threadpool │ │ │ ├── detail │ │ │ ├── future.hpp │ │ │ ├── locking_ptr.hpp │ │ │ ├── pool_core.hpp │ │ │ ├── scope_guard.hpp │ │ │ └── worker_thread.hpp │ │ │ ├── future.hpp │ │ │ ├── pool.hpp │ │ │ ├── pool_adaptors.hpp │ │ │ ├── scheduling_policies.hpp │ │ │ ├── shutdown_policies.hpp │ │ │ ├── size_policies.hpp │ │ │ └── task_adaptors.hpp │ └── caffe │ │ ├── blob.hpp │ │ ├── blob_transform.hpp │ │ ├── caffe.hpp │ │ ├── caffe_wrapper.hpp │ │ ├── caffe_wrapper_common.hpp │ │ ├── common.hpp │ │ ├── data_reader.hpp │ │ ├── data_transformer.hpp │ │ ├── filler.hpp │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── layer_factory.hpp │ │ ├── layers │ │ ├── absval_layer.hpp │ │ ├── accuracy_layer.hpp │ │ ├── argmax_layer.hpp │ │ ├── base_conv_layer.hpp │ │ ├── base_data_layer.hpp │ │ ├── batch_norm_layer.hpp │ │ ├── batch_reindex_layer.hpp │ │ ├── bnll_layer.hpp │ │ ├── color_aug_layer.hpp │ │ ├── concat_layer.hpp │ │ ├── contrastive_loss_layer.hpp │ │ ├── conv_layer.hpp │ │ ├── cudnn_conv_layer.hpp │ │ ├── cudnn_lcn_layer.hpp │ │ ├── cudnn_lrn_layer.hpp │ │ ├── cudnn_pooling_layer.hpp │ │ ├── cudnn_relu_layer.hpp │ │ ├── cudnn_sigmoid_layer.hpp │ │ ├── cudnn_softmax_layer.hpp │ │ ├── cudnn_tanh_layer.hpp │ │ ├── data_layer.hpp │ │ ├── deconv_layer.hpp │ │ ├── dropout_layer.hpp │ │ ├── dummy_data_layer.hpp │ │ ├── element_hinge_loss_layer.hpp │ │ ├── eltwise_layer.hpp │ │ ├── embed_layer.hpp │ │ ├── euclidean_loss_layer.hpp │ │ ├── exp_layer.hpp │ │ ├── fcn_data_layers.hpp │ │ ├── filter_layer.hpp │ │ ├── fixed_conv_layer.hpp │ │ ├── flatten_layer.hpp │ │ ├── hdf5_data_layer.hpp │ │ ├── hdf5_output_layer.hpp │ │ ├── hinge_loss_layer.hpp │ │ ├── im2col_layer.hpp │ │ ├── image_data_layer.hpp │ │ ├── infogain_loss_layer.hpp │ │ ├── inner_product_layer.hpp │ │ ├── label_convert_layer.hpp │ │ ├── landmark_detection_layers.hpp │ │ ├── loss_layer.hpp │ │ ├── lrn_layer.hpp │ │ ├── memory_data_layer.hpp │ │ ├── multinomial_logistic_loss_layer.hpp │ │ ├── mvn_layer.hpp │ │ ├── neuron_layer.hpp │ │ ├── pooling_layer.hpp │ │ ├── power_layer.hpp │ │ ├── prelu_layer.hpp │ │ ├── pyramid_data_layers.hpp │ │ ├── python_layer.hpp │ │ ├── reduction_layer.hpp │ │ ├── relu_layer.hpp │ │ ├── reshape_layer.hpp │ │ ├── resize_layer.hpp │ │ ├── roi_data_layers.hpp │ │ ├── sigmoid_cross_entropy_loss_layer.hpp │ │ ├── sigmoid_layer.hpp │ │ ├── silence_layer.hpp │ │ ├── slice_layer.hpp │ │ ├── softmax_layer.hpp │ │ ├── softmax_loss_layer.hpp │ │ ├── split_layer.hpp │ │ ├── spp_layer.hpp │ │ ├── tanh_layer.hpp │ │ ├── threshold_layer.hpp │ │ ├── tile_layer.hpp │ │ └── window_data_layer.hpp │ │ ├── net.hpp │ │ ├── parallel.hpp │ │ ├── sgd_solvers.hpp │ │ ├── solver.hpp │ │ ├── solver_factory.hpp │ │ ├── syncedmem.hpp │ │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ │ └── util │ │ ├── RectMap.hpp │ │ ├── benchmark.hpp │ │ ├── blocking_queue.hpp │ │ ├── buffered_reader.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── db_leveldb.hpp │ │ ├── db_lmdb.hpp │ │ ├── device_alternate.hpp │ │ ├── format.hpp │ │ ├── gpu_util.cuh │ │ ├── hdf5.hpp │ │ ├── im2col.hpp │ │ ├── insert_inceptions.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── rng.hpp │ │ ├── signal_handler.h │ │ ├── upgrade_proto.hpp │ │ ├── util_img.hpp │ │ └── util_others.hpp ├── matlab │ ├── +caffe │ │ ├── +test │ │ │ ├── test_io.m │ │ │ ├── test_net.m │ │ │ └── test_solver.m │ │ ├── Blob.m │ │ ├── Layer.m │ │ ├── Net.m │ │ ├── Solver.m │ │ ├── get_net.m │ │ ├── get_solver.m │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.mat │ │ ├── io.m │ │ ├── private │ │ │ ├── CHECK.m │ │ │ ├── CHECK_FILE_EXIST.m │ │ │ ├── caffe_.cpp │ │ │ └── is_valid_handle.m │ │ ├── reset_all.m │ │ ├── run_tests.m │ │ ├── set_device.m │ │ ├── set_mode_cpu.m │ │ ├── set_mode_gpu.m │ │ └── version.m │ ├── CMakeLists.txt │ ├── demo │ │ └── classification_demo.m │ └── hdf5creation │ │ ├── .gitignore │ │ ├── demo.m │ │ └── store2hdf5.m ├── python │ ├── CMakeLists.txt │ ├── caffe │ │ ├── __init__.py │ │ ├── _caffe.cpp │ │ ├── classifier.py │ │ ├── coord_map.py │ │ ├── detector.py │ │ ├── draw.py │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.npy │ │ ├── io.py │ │ ├── net_spec.py │ │ ├── pycaffe.py │ │ └── test │ │ │ ├── test_coord_map.py │ │ │ ├── test_io.py │ │ │ ├── test_layer_type_list.py │ │ │ ├── test_net.py │ │ │ ├── test_net_spec.py │ │ │ ├── test_python_layer.py │ │ │ ├── test_python_layer_with_param_str.py │ │ │ └── test_solver.py │ ├── classify.py │ ├── detect.py │ ├── draw_net.py │ └── requirements.txt ├── scripts │ ├── copy_notebook.py │ ├── cpp_lint.py │ ├── deploy_docs.sh │ ├── download_model_binary.py │ ├── download_model_from_gist.sh │ ├── gather_examples.sh │ ├── travis │ │ ├── travis_build_and_test.sh │ │ ├── travis_install.sh │ │ └── travis_setup_makefile_config.sh │ └── upload_model_to_gist.sh ├── src │ ├── caffe │ │ ├── CMakeLists.txt │ │ ├── blob.cpp │ │ ├── caffe_wrapper.cpp │ │ ├── common.cpp │ │ ├── data_reader.cpp │ │ ├── data_transformer.cpp │ │ ├── internal_thread.cpp │ │ ├── layer.cpp │ │ ├── layer_factory.cpp │ │ ├── layers │ │ │ ├── absval_layer.cpp │ │ │ ├── absval_layer.cu │ │ │ ├── accuracy_layer.cpp │ │ │ ├── argmax_layer.cpp │ │ │ ├── base_conv_layer.cpp │ │ │ ├── base_data_layer.cpp │ │ │ ├── base_data_layer.cu │ │ │ ├── batch_norm_layer.cpp │ │ │ ├── batch_norm_layer.cu │ │ │ ├── batch_reindex_layer.cpp │ │ │ ├── batch_reindex_layer.cu │ │ │ ├── bgr_2_gray_layer.cpp │ │ │ ├── bgr_2_gray_layer.cu │ │ │ ├── bnll_layer.cpp │ │ │ ├── bnll_layer.cu │ │ │ ├── color_aug_layer.cpp │ │ │ ├── color_aug_layer.cu │ │ │ ├── concat_layer.cpp │ │ │ ├── concat_layer.cu │ │ │ ├── contrastive_loss_layer.cpp │ │ │ ├── contrastive_loss_layer.cu │ │ │ ├── conv_layer.cpp │ │ │ ├── conv_layer.cu │ │ │ ├── cudnn_conv_layer.cpp │ │ │ ├── cudnn_conv_layer.cu │ │ │ ├── cudnn_lcn_layer.cpp │ │ │ ├── cudnn_lcn_layer.cu │ │ │ ├── cudnn_lrn_layer.cpp │ │ │ ├── cudnn_lrn_layer.cu │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ ├── cudnn_pooling_layer.cu │ │ │ ├── cudnn_relu_layer.cpp │ │ │ ├── cudnn_relu_layer.cu │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ ├── cudnn_softmax_layer.cu │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ ├── cudnn_tanh_layer.cu │ │ │ ├── data_layer.cpp │ │ │ ├── deconv_layer.cpp │ │ │ ├── deconv_layer.cu │ │ │ ├── detection_output_layer.cpp │ │ │ ├── dropout_layer.cpp │ │ │ ├── dropout_layer.cu │ │ │ ├── dummy_data_layer.cpp │ │ │ ├── element_hinge_loss_layer.cpp │ │ │ ├── element_hinge_loss_layer.cu │ │ │ ├── eltwise_layer.cpp │ │ │ ├── eltwise_layer.cu │ │ │ ├── embed_layer.cpp │ │ │ ├── embed_layer.cu │ │ │ ├── euclidean_loss_layer.cpp │ │ │ ├── euclidean_loss_layer.cu │ │ │ ├── exp_layer.cpp │ │ │ ├── exp_layer.cu │ │ │ ├── fcn_base_image_data_layer.cpp │ │ │ ├── fcn_image_buffered_data_reader_layer.cpp │ │ │ ├── fcn_image_data_detection_box_layer.cpp │ │ │ ├── fcn_image_data_ignore_box_layer.cpp │ │ │ ├── fcn_image_data_key_point_layer.cpp │ │ │ ├── fcn_image_data_layer.cpp │ │ │ ├── fcn_image_data_processor_layer.cpp │ │ │ ├── fcn_image_data_reader_layer.cpp │ │ │ ├── fcn_image_data_source_provider_layer.cpp │ │ │ ├── filter_layer.cpp │ │ │ ├── filter_layer.cu │ │ │ ├── fixed_conv_layer.cpp │ │ │ ├── fixed_conv_layer.cu │ │ │ ├── flatten_layer.cpp │ │ │ ├── hdf5_data_layer.cpp │ │ │ ├── hdf5_data_layer.cu │ │ │ ├── hdf5_output_layer.cpp │ │ │ ├── hdf5_output_layer.cu │ │ │ ├── hinge_loss_layer.cpp │ │ │ ├── im2col_layer.cpp │ │ │ ├── im2col_layer.cu │ │ │ ├── image_data_layer.cpp │ │ │ ├── infogain_loss_layer.cpp │ │ │ ├── inner_product_layer.cpp │ │ │ ├── inner_product_layer.cu │ │ │ ├── label_convert_layer.cpp │ │ │ ├── label_related_dropout_layer.cpp │ │ │ ├── label_related_dropout_layer.cu │ │ │ ├── landmark_detection_layer.cpp │ │ │ ├── loss_layer.cpp │ │ │ ├── lrn_layer.cpp │ │ │ ├── lrn_layer.cu │ │ │ ├── memory_data_layer.cpp │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ ├── mvn_layer.cpp │ │ │ ├── mvn_layer.cu │ │ │ ├── neuron_layer.cpp │ │ │ ├── pooling_layer.cpp │ │ │ ├── pooling_layer.cu │ │ │ ├── power_layer.cpp │ │ │ ├── power_layer.cu │ │ │ ├── prelu_layer.cpp │ │ │ ├── prelu_layer.cu │ │ │ ├── pyramid_image_data_layer.cpp │ │ │ ├── pyramid_image_data_layer.cu │ │ │ ├── pyramid_image_online_data_layer.cpp │ │ │ ├── pyramid_image_online_data_layer.cu │ │ │ ├── reduction_layer.cpp │ │ │ ├── reduction_layer.cu │ │ │ ├── relu_layer.cpp │ │ │ ├── relu_layer.cu │ │ │ ├── reshape_layer.cpp │ │ │ ├── resize_layer.cpp │ │ │ ├── resize_layer.cu │ │ │ ├── roi_2_heatmap_layer.cpp │ │ │ ├── roi_data_layer.cpp │ │ │ ├── roi_output_layer.cpp │ │ │ ├── roi_refine_layer.cpp │ │ │ ├── roi_show_layer.cpp │ │ │ ├── show_image_pair_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ │ ├── sigmoid_layer.cpp │ │ │ ├── sigmoid_layer.cu │ │ │ ├── silence_layer.cpp │ │ │ ├── silence_layer.cu │ │ │ ├── slice_layer.cpp │ │ │ ├── slice_layer.cu │ │ │ ├── softmax_layer.cpp │ │ │ ├── softmax_layer.cu │ │ │ ├── softmax_loss_layer.cpp │ │ │ ├── softmax_loss_layer.cu │ │ │ ├── split_layer.cpp │ │ │ ├── split_layer.cu │ │ │ ├── spp_layer.cpp │ │ │ ├── tanh_layer.cpp │ │ │ ├── tanh_layer.cu │ │ │ ├── threshold_layer.cpp │ │ │ ├── threshold_layer.cu │ │ │ ├── tile_layer.cpp │ │ │ ├── tile_layer.cu │ │ │ └── window_data_layer.cpp │ │ ├── net.cpp │ │ ├── parallel.cpp │ │ ├── proto │ │ │ ├── caffe.proto │ │ │ └── caffe_fcn_data_layer.proto │ │ ├── solver.cpp │ │ ├── solvers │ │ │ ├── adadelta_solver.cpp │ │ │ ├── adagrad_solver.cpp │ │ │ ├── adam_solver.cpp │ │ │ ├── nesterov_solver.cpp │ │ │ ├── rmsprop_solver.cpp │ │ │ └── sgd_solver.cpp │ │ ├── syncedmem.cpp │ │ ├── test │ │ │ ├── CMakeLists.txt │ │ │ ├── test_accuracy_layer.cpp │ │ │ ├── test_argmax_layer.cpp │ │ │ ├── test_batch_norm_layer.cpp │ │ │ ├── test_batch_reindex_layer.cpp │ │ │ ├── test_benchmark.cpp │ │ │ ├── test_blob.cpp │ │ │ ├── test_blob_trans.cpp │ │ │ ├── test_caffe_main.cpp │ │ │ ├── test_common.cpp │ │ │ ├── test_concat_layer.cpp │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ ├── test_convolution_layer.cpp │ │ │ ├── test_data │ │ │ │ ├── generate_sample_data.py │ │ │ │ ├── sample_data.h5 │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ ├── sample_data_list.txt │ │ │ │ ├── solver_data.h5 │ │ │ │ └── solver_data_list.txt │ │ │ ├── test_data_layer.cpp │ │ │ ├── test_data_transformer.cpp │ │ │ ├── test_db.cpp │ │ │ ├── test_deconvolution_layer.cpp │ │ │ ├── test_dummy_data_layer.cpp │ │ │ ├── test_eltwise_layer.cpp │ │ │ ├── test_embed_layer.cpp │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ ├── test_filler.cpp │ │ │ ├── test_filter_layer.cpp │ │ │ ├── test_flatten_layer.cpp │ │ │ ├── test_gradient_based_solver.cpp │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ ├── test_hdf5data_layer.cpp │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ ├── test_im2col_kernel.cu │ │ │ ├── test_im2col_layer.cpp │ │ │ ├── test_image_data_layer.cpp │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ ├── test_inner_product_layer.cpp │ │ │ ├── test_internal_thread.cpp │ │ │ ├── test_io.cpp │ │ │ ├── test_layer_factory.cpp │ │ │ ├── test_lrn_layer.cpp │ │ │ ├── test_math_functions.cpp │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ ├── test_memory_data_layer.cpp │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ ├── test_mvn_layer.cpp │ │ │ ├── test_net.cpp │ │ │ ├── test_neuron_layer.cpp │ │ │ ├── test_platform.cpp │ │ │ ├── test_pooling_layer.cpp │ │ │ ├── test_power_layer.cpp │ │ │ ├── test_protobuf.cpp │ │ │ ├── test_random_number_generator.cpp │ │ │ ├── test_reduction_layer.cpp │ │ │ ├── test_reshape_layer.cpp │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── test_slice_layer.cpp │ │ │ ├── test_softmax_layer.cpp │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ ├── test_solver.cpp │ │ │ ├── test_solver_factory.cpp │ │ │ ├── test_split_layer.cpp │ │ │ ├── test_spp_layer.cpp │ │ │ ├── test_stochastic_pooling.cpp │ │ │ ├── test_syncedmem.cpp │ │ │ ├── test_tanh_layer.cpp │ │ │ ├── test_threshold_layer.cpp │ │ │ ├── test_tile_layer.cpp │ │ │ ├── test_upgrade_proto.cpp │ │ │ └── test_util_blas.cpp │ │ └── util │ │ │ ├── RectMap.cpp │ │ │ ├── benchmark.cpp │ │ │ ├── blocking_queue.cpp │ │ │ ├── buffered_reader.cpp │ │ │ ├── cudnn.cpp │ │ │ ├── db.cpp │ │ │ ├── db_leveldb.cpp │ │ │ ├── db_lmdb.cpp │ │ │ ├── hdf5.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_inceptions.cpp │ │ │ ├── insert_splits.cpp │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ ├── signal_handler.cpp │ │ │ ├── upgrade_proto.cpp │ │ │ ├── util_img.cpp │ │ │ ├── util_img.cu │ │ │ └── util_others.cpp │ └── gtest │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc └── tools │ ├── BBox_voting_FDDB.cpp │ ├── CMakeLists.txt │ ├── FDDB2FG.cpp │ ├── FDDB2KITTI.cpp │ ├── FDDB2VOC.cpp │ ├── ModelConvert.cpp │ ├── RectMapTest.cpp │ ├── avi2jpg.cpp │ ├── caffe.cpp │ ├── caffe_densebox_test.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── device_query.cpp │ ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.py │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py │ ├── plot_training_log.py.example │ └── resize_and_crop_images.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── forward_get_attribute.cpp │ ├── generate_random_number.cpp │ ├── get_curve_FDDB.cpp │ ├── landmark_test.cpp │ ├── make_ref_result.cpp │ ├── net_speed_benchmark.cpp │ ├── pyramid_online_datalayer_test.cpp │ ├── pyramid_test.cpp │ ├── select_model_pyramid_test.cpp │ ├── select_model_pyramid_test_face.cpp │ ├── select_model_pyramid_test_heads.cpp │ ├── select_model_pyramid_test_no_curve.cpp │ ├── select_model_pyramid_test_no_curve_avi.cpp │ ├── show_class_color.cpp │ ├── show_output.cpp │ ├── show_response_field.cpp │ ├── system_test.cpp │ ├── test_bufferedImgVideoReader.cpp │ ├── test_net.cpp │ ├── time_test.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ ├── upgrade_net_proto_text.cpp │ └── upgrade_solver_proto_text.cpp ├── experiment ├── baidu_car │ └── ssd │ │ └── create_baidu_data.py ├── baidu_common │ ├── __init__.py │ └── config.py ├── kitti │ ├── README.md │ ├── densebox │ │ ├── dense_kitti.py │ │ ├── prepare │ │ │ ├── create_data.sh │ │ │ ├── create_list.py │ │ │ ├── labelmap_kitti.prototxt │ │ │ ├── labelmap_kitti_car.prototxt │ │ │ ├── test.txt │ │ │ └── trainval.txt │ │ └── ssd │ │ │ ├── cur_net.prototxt │ │ │ ├── prepare │ │ │ ├── create_data.sh │ │ │ ├── create_list.py │ │ │ ├── create_list.sh │ │ │ ├── labelmap_voc.prototxt │ │ │ ├── test.txt │ │ │ ├── test_name_size.txt │ │ │ └── trainval.txt │ │ │ ├── score_ssd_pascal.py │ │ │ ├── ssd_pascal.py │ │ │ └── ssd_pascal_webcam.py │ ├── draw_net.sh │ ├── multiscale-sgnet-03-solver.prototxt │ ├── multiscale-sgnet-03.prototxt │ ├── old_densebox │ │ ├── README.md │ │ ├── cur_net.prototxt │ │ ├── multiscale-sgnet-03-solver.prototxt │ │ ├── multiscale-sgnet-03.prototxt │ │ ├── plot.py │ │ ├── prepare_data.sh │ │ ├── prepare_data │ │ │ ├── README.md │ │ │ ├── matlab │ │ │ │ ├── .gitignore │ │ │ │ ├── filter_gt.m │ │ │ │ ├── kitti_to_mine.m │ │ │ │ ├── objectKITTITrain.mat │ │ │ │ ├── plotbox.m │ │ │ │ ├── preprocess_jpg.m │ │ │ │ └── val_idx.mat │ │ │ └── python │ │ │ │ ├── .gitignore │ │ │ │ └── prepare.py │ │ ├── show_multiscale-sgnet-03.prototxt │ │ ├── show_result.sh │ │ ├── test_model.sh │ │ ├── test_multiscale-sgnet-03.prototxt │ │ ├── train.sh │ │ └── val_gt_file_list.txt │ ├── old_densebox_python │ │ ├── cur_net.prototxt │ │ ├── dense_kitti.py │ │ └── multiscale-sgnet-03.prototxt │ ├── prepare_data.sh │ ├── prepare_data │ │ ├── README.md │ │ ├── matlab │ │ │ ├── .gitignore │ │ │ ├── filter_gt.m │ │ │ ├── kitti_to_mine.m │ │ │ ├── objectKITTITrain.mat │ │ │ ├── plotbox.m │ │ │ ├── preprocess_jpg.m │ │ │ └── val_idx.mat │ │ └── python │ │ │ ├── .gitignore │ │ │ └── prepare.py │ ├── show_multiscale-sgnet-03.prototxt │ ├── show_result.sh │ ├── snapshot │ │ └── sgnet03 │ │ │ ├── sgnet03_iter_597000.caffemodel │ │ │ └── sgnet03_iter_597000.solverstate │ ├── ssd │ │ ├── .gitignore │ │ ├── create_kitti_data.py │ │ ├── eval_ssd.sh │ │ ├── inception │ │ │ └── 552x552 │ │ │ │ ├── deploy.prototxt │ │ │ │ ├── eval_solver.prototxt │ │ │ │ ├── run_test.prototxt │ │ │ │ ├── solver.prototxt │ │ │ │ ├── test.prototxt │ │ │ │ └── train.prototxt │ │ ├── labelmap.prototxt │ │ ├── ssd_inception_v3_test_one_path.prototxt │ │ ├── ssd_kitti_inception.py │ │ ├── ssd_kitti_inception_multi_scale.py │ │ ├── ssd_kitti_inception_multi_scale_0.7.py │ │ └── ssd_kitti_vgg.py │ ├── test_model.sh │ ├── test_multiscale-sgnet-03.prototxt │ ├── train.sh │ └── val_gt_file_list.txt └── voc │ └── old_densebox │ ├── dense_v2_inception │ ├── dense_v2_inception-solver.prototxt │ ├── dense_v2_inception.prototxt │ ├── gt │ │ ├── _test_gt.txt │ │ ├── aeroplane_test_gt.txt │ │ ├── bicycle_test_gt.txt │ │ ├── bird_test_gt.txt │ │ ├── boat_test_gt.txt │ │ ├── bottle_test_gt.txt │ │ ├── bus_test_gt.txt │ │ ├── car_test_gt.txt │ │ ├── cat_test_gt.txt │ │ ├── chair_test_gt.txt │ │ ├── cow_test_gt.txt │ │ ├── diningtable_test_gt.txt │ │ ├── dog_test_gt.txt │ │ ├── horse_test_gt.txt │ │ ├── motorbike_test_gt.txt │ │ ├── person_test_gt.txt │ │ ├── pottedplant_test_gt.txt │ │ ├── sheep_test_gt.txt │ │ ├── sofa_test_gt.txt │ │ ├── train_test_gt.txt │ │ └── tvmonitor_test_gt.txt │ ├── select.sh │ ├── test.sh │ ├── test_dense_v2_inception.prototxt │ ├── train.sh │ └── val_gt_file_list.txt │ ├── multiscale-vgg-16 │ ├── gt │ │ ├── _test_gt.txt │ │ ├── aeroplane_test_gt.txt │ │ ├── bicycle_test_gt.txt │ │ ├── bird_test_gt.txt │ │ ├── boat_test_gt.txt │ │ ├── bottle_test_gt.txt │ │ ├── bus_test_gt.txt │ │ ├── car_test_gt.txt │ │ ├── cat_test_gt.txt │ │ ├── chair_test_gt.txt │ │ ├── cow_test_gt.txt │ │ ├── diningtable_test_gt.txt │ │ ├── dog_test_gt.txt │ │ ├── horse_test_gt.txt │ │ ├── motorbike_test_gt.txt │ │ ├── person_test_gt.txt │ │ ├── pottedplant_test_gt.txt │ │ ├── sheep_test_gt.txt │ │ ├── sofa_test_gt.txt │ │ ├── train_test_gt.txt │ │ └── tvmonitor_test_gt.txt │ ├── multi-scale-vgg-16-solver.prototxt │ ├── multi-scale-vgg-16.prototxt │ ├── select.sh │ ├── show_map.sh │ ├── show_multi-scale-vgg-16.prototxt │ ├── test.sh │ ├── test_multiscale-sgnet-03.prototxt │ ├── train.sh │ └── val_gt_file_list.txt │ └── prepare_data │ ├── VOC2007 │ └── Annotations │ │ ├── 000001.xml │ │ ├── 000002.xml │ │ ├── 000003.xml │ │ ├── 000004.xml │ │ ├── 000005.xml │ │ ├── 000006.xml │ │ └── 000007.xml │ ├── class_list.txt │ ├── gen_test_gt_files_list.py │ ├── gt │ ├── aeroplane_test_gt.txt │ ├── bicycle_test_gt.txt │ ├── bird_test_gt.txt │ ├── boat_test_gt.txt │ ├── bottle_test_gt.txt │ ├── bus_test_gt.txt │ ├── car_test_gt.txt │ ├── cat_test_gt.txt │ ├── chair_test_gt.txt │ ├── cow_test_gt.txt │ ├── diningtable_test_gt.txt │ ├── dog_test_gt.txt │ ├── horse_test_gt.txt │ ├── motorbike_test_gt.txt │ ├── person_test_gt.txt │ ├── pottedplant_test_gt.txt │ ├── sheep_test_gt.txt │ ├── sofa_test_gt.txt │ ├── train_test_gt.txt │ └── tvmonitor_test_gt.txt │ ├── gt_val.txt │ ├── prepare.py │ ├── test.txt │ ├── test_name_size.txt │ └── trainval.txt └── paper └── DenseBoxPaper ├── DenseBoxV1_alan.pdf ├── DenseBoxV1_alan.tex ├── Makefile ├── README.md ├── figures ├── MALF_1-eps-converted-to.pdf ├── MALF_2-eps-converted-to.pdf ├── figure1-crop.pdf ├── figure2-crop.pdf ├── figure3-crop.pdf ├── figure4-crop.pdf ├── figure5-crop.pdf └── figure_landmark-crop.pdf ├── nips15submit_e.sty └── section ├── abstract.tex ├── experiments.tex ├── introduction.tex ├── model.tex ├── my_abstract.tex ├── references.bib └── related_work.tex /000791_BBoxCandiates.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/000791_BBoxCandiates.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DenseBox: A Fully Convolutional Neural Net Framework for Object Detection # 2 | 3 | 4 | ### Introduction ### 5 | 6 | DenseBox is an unified framework for object detection with a single network. You can use the code to train/evaluate a network for object detection task. For more details, please refer to our [arXiv paper](http://arxiv.org/abs/1509.04874). 7 | 8 |

9 | DenseBox Example 10 |

11 | 12 | ### Directory Explanation ### 13 | 14 | * caffe_densebox: The caffe code for running densebox detection. This folder is similar to caffe, with only core cpp detection code added. 15 | * experiment: All the matlab/python/linux shell scripts for running experiments. If you want a start, please refer to experiment/kitti. 16 | * paper: latex files for writing the densebox paper. 17 | -------------------------------------------------------------------------------- /caffe_densebox/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /caffe_densebox/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the users group in case you need help: 7 | https://groups.google.com/forum/#!forum/caffe-users 8 | -------------------------------------------------------------------------------- /caffe_densebox/caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /caffe_densebox/cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /caffe_densebox/cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /caffe_densebox/cmake/Modules/FindvecLib.cmake: -------------------------------------------------------------------------------- 1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework 2 | # 3 | # The following are set after configuration is done: 4 | # VECLIB_FOUND 5 | # vecLib_INCLUDE_DIR 6 | # vecLib_LINKER_LIBS 7 | 8 | 9 | if(NOT APPLE) 10 | return() 11 | endif() 12 | 13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") 14 | 15 | find_path(vecLib_INCLUDE_DIR vecLib.h 16 | DOC "vecLib include directory" 17 | PATHS /System/Library/${__veclib_include_suffix} 18 | /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} 19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) 20 | 21 | include(FindPackageHandleStandardArgs) 22 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) 23 | 24 | if(VECLIB_FOUND) 25 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") 26 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") 27 | message(STATUS "Found standalone vecLib.framework") 28 | else() 29 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") 30 | message(STATUS "Found vecLib as part of Accelerate.framework") 31 | endif() 32 | 33 | mark_as_advanced(vecLib_INCLUDE_DIR) 34 | endif() 35 | -------------------------------------------------------------------------------- /caffe_densebox/cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /caffe_densebox/cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* NVIDA Cuda */ 8 | #cmakedefine HAVE_CUDA 9 | 10 | /* NVIDA cuDNN */ 11 | #cmakedefine HAVE_CUDNN 12 | #cmakedefine USE_CUDNN 13 | 14 | /* NVIDA cuDNN */ 15 | #cmakedefine CPU_ONLY 16 | 17 | /* Test device */ 18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 19 | 20 | /* Temporary (TODO: remove) */ 21 | #if 1 22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 24 | #define CMAKE_EXT ".gen.cmake" 25 | #else 26 | #define CMAKE_SOURCE_DIR "src/" 27 | #define EXAMPLES_SOURCE_DIR "examples/" 28 | #define CMAKE_EXT "" 29 | #endif 30 | 31 | /* Matlab */ 32 | #cmakedefine HAVE_MATLAB 33 | 34 | /* IO libraries */ 35 | #cmakedefine USE_OPENCV 36 | #cmakedefine USE_LEVELDB 37 | #cmakedefine USE_LMDB 38 | #cmakedefine ALLOW_LMDB_NOLOCK 39 | -------------------------------------------------------------------------------- /caffe_densebox/data/cifar10/get_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This scripts downloads the CIFAR10 (binary version) data and unzips it. 3 | 4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 5 | cd $DIR 6 | 7 | echo "Downloading..." 8 | 9 | wget --no-check-certificate http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz 10 | 11 | echo "Unzipping..." 12 | 13 | tar -xf cifar-10-binary.tar.gz && rm -f cifar-10-binary.tar.gz 14 | mv cifar-10-batches-bin/* . && rm -rf cifar-10-batches-bin 15 | 16 | # Creation is split out because leveldb sometimes causes segfault 17 | # and needs to be re-created. 18 | 19 | echo "Done." 20 | -------------------------------------------------------------------------------- /caffe_densebox/data/ilsvrc12/get_ilsvrc_aux.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # 3 | # N.B. This does not download the ilsvrcC12 data set, as it is gargantuan. 4 | # This script downloads the imagenet example auxiliary files including: 5 | # - the ilsvrc12 image mean, binaryproto 6 | # - synset ids and words 7 | # - Python pickle-format data of ImageNet graph structure and relative infogain 8 | # - the training splits with labels 9 | 10 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 11 | cd $DIR 12 | 13 | echo "Downloading..." 14 | 15 | wget -c http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz 16 | 17 | echo "Unzipping..." 18 | 19 | tar -xf caffe_ilsvrc12.tar.gz && rm -f caffe_ilsvrc12.tar.gz 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe_densebox/data/mnist/get_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This scripts downloads the mnist data and unzips it. 3 | 4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 5 | cd $DIR 6 | 7 | echo "Downloading..." 8 | 9 | for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte 10 | do 11 | if [ ! -e $fname ]; then 12 | wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz 13 | gunzip ${fname}.gz 14 | fi 15 | done 16 | -------------------------------------------------------------------------------- /caffe_densebox/docker/Makefile: -------------------------------------------------------------------------------- 1 | # A makefile to build the docker images for caffe. 2 | # Two caffe images will be built: 3 | # caffe:cpu --> A CPU-only build of caffe. 4 | # caffe:gpu --> A GPU-enabled build using the latest CUDA and CUDNN versions. 5 | 6 | DOCKER ?= docker 7 | 8 | all: docker_files standalone 9 | 10 | .PHONY: standalone devel 11 | 12 | standalone: cpu_standalone gpu_standalone 13 | 14 | 15 | cpu_standalone: standalone/cpu/Dockerfile 16 | $(DOCKER) build -t caffe:cpu standalone/cpu 17 | 18 | gpu_standalone: standalone/gpu/Dockerfile 19 | $(DOCKER) build -t caffe:gpu standalone/gpu 20 | 21 | docker_files: standalone_files 22 | 23 | standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile 24 | 25 | FROM_GPU = "nvidia/cuda:cudnn" 26 | FROM_CPU = "ubuntu:14.04" 27 | GPU_CMAKE_ARGS = -DUSE_CUDNN=1 28 | CPU_CMAKE_ARGS = -DCPU_ONLY=1 29 | 30 | # A make macro to select the CPU or GPU base image. 31 | define from_image 32 | $(if $(strip $(findstring gpu,$@)),$(FROM_GPU),$(FROM_CPU)) 33 | endef 34 | 35 | # A make macro to select the CPU or GPU build args. 36 | define build_args 37 | $(if $(strip $(findstring gpu,$@)),$(GPU_CMAKE_ARGS),$(CPU_CMAKE_ARGS)) 38 | endef 39 | 40 | # A make macro to construct the CPU or GPU Dockerfile from the template 41 | define create_docker_file 42 | @echo creating $@ 43 | @echo "FROM "$(from_image) > $@ 44 | @cat $^ | sed 's/$${CMAKE_ARGS}/$(build_args)/' >> $@ 45 | endef 46 | 47 | 48 | standalone/%/Dockerfile: templates/Dockerfile.template 49 | $(create_docker_file) 50 | 51 | -------------------------------------------------------------------------------- /caffe_densebox/docker/standalone/cpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | MAINTAINER caffe-maint@googlegroups.com 3 | 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | build-essential \ 6 | cmake \ 7 | git \ 8 | wget \ 9 | libatlas-base-dev \ 10 | libboost-all-dev \ 11 | libgflags-dev \ 12 | libgoogle-glog-dev \ 13 | libhdf5-serial-dev \ 14 | libleveldb-dev \ 15 | liblmdb-dev \ 16 | libopencv-dev \ 17 | libprotobuf-dev \ 18 | libsnappy-dev \ 19 | protobuf-compiler \ 20 | python-dev \ 21 | python-numpy \ 22 | python-pip \ 23 | python-scipy && \ 24 | rm -rf /var/lib/apt/lists/* 25 | 26 | ENV CAFFE_ROOT=/opt/caffe 27 | WORKDIR $CAFFE_ROOT 28 | 29 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 30 | ENV CLONE_TAG=master 31 | 32 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 33 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 34 | mkdir build && cd build && \ 35 | cmake -DCPU_ONLY=1 .. && \ 36 | make -j"$(nproc)" 37 | 38 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 39 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 40 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 41 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 42 | 43 | WORKDIR /workspace 44 | -------------------------------------------------------------------------------- /caffe_densebox/docker/standalone/gpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:cudnn 2 | MAINTAINER caffe-maint@googlegroups.com 3 | 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | build-essential \ 6 | cmake \ 7 | git \ 8 | wget \ 9 | libatlas-base-dev \ 10 | libboost-all-dev \ 11 | libgflags-dev \ 12 | libgoogle-glog-dev \ 13 | libhdf5-serial-dev \ 14 | libleveldb-dev \ 15 | liblmdb-dev \ 16 | libopencv-dev \ 17 | libprotobuf-dev \ 18 | libsnappy-dev \ 19 | protobuf-compiler \ 20 | python-dev \ 21 | python-numpy \ 22 | python-pip \ 23 | python-scipy && \ 24 | rm -rf /var/lib/apt/lists/* 25 | 26 | ENV CAFFE_ROOT=/opt/caffe 27 | WORKDIR $CAFFE_ROOT 28 | 29 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 30 | ENV CLONE_TAG=master 31 | 32 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 33 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 34 | mkdir build && cd build && \ 35 | cmake -DUSE_CUDNN=1 .. && \ 36 | make -j"$(nproc)" 37 | 38 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 39 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 40 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 41 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 42 | 43 | WORKDIR /workspace 44 | -------------------------------------------------------------------------------- /caffe_densebox/docker/templates/Dockerfile.template: -------------------------------------------------------------------------------- 1 | MAINTAINER caffe-maint@googlegroups.com 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends \ 4 | build-essential \ 5 | cmake \ 6 | git \ 7 | wget \ 8 | libatlas-base-dev \ 9 | libboost-all-dev \ 10 | libgflags-dev \ 11 | libgoogle-glog-dev \ 12 | libhdf5-serial-dev \ 13 | libleveldb-dev \ 14 | liblmdb-dev \ 15 | libopencv-dev \ 16 | libprotobuf-dev \ 17 | libsnappy-dev \ 18 | protobuf-compiler \ 19 | python-dev \ 20 | python-numpy \ 21 | python-pip \ 22 | python-scipy && \ 23 | rm -rf /var/lib/apt/lists/* 24 | 25 | ENV CAFFE_ROOT=/opt/caffe 26 | WORKDIR $CAFFE_ROOT 27 | 28 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 29 | ENV CLONE_TAG=master 30 | 31 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 32 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 33 | mkdir build && cd build && \ 34 | cmake ${CMAKE_ARGS} .. && \ 35 | make -j"$(nproc)" 36 | 37 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 38 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 39 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 40 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 41 | 42 | WORKDIR /workspace 43 | -------------------------------------------------------------------------------- /caffe_densebox/docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /caffe_densebox/docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /caffe_densebox/docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /caffe_densebox/docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /caffe_densebox/docs/images/caffeine-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/images/caffeine-icon.png -------------------------------------------------------------------------------- /caffe_densebox/docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/tutorial/fig/.gitignore -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /caffe_densebox/docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /caffe_densebox/examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB_RECURSE examples_srcs "${PROJECT_SOURCE_DIR}/examples/*.cpp") 2 | 3 | foreach(source_file ${examples_srcs}) 4 | # get file name 5 | get_filename_component(name ${source_file} NAME_WE) 6 | 7 | # get folder name 8 | get_filename_component(path ${source_file} PATH) 9 | get_filename_component(folder ${path} NAME_WE) 10 | 11 | add_executable(${name} ${source_file}) 12 | target_link_libraries(${name} ${Caffe_LINK}) 13 | caffe_default_properties(${name}) 14 | 15 | # set back RUNTIME_OUTPUT_DIRECTORY 16 | set_target_properties(${name} PROPERTIES 17 | RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/examples/${folder}") 18 | 19 | caffe_set_solution_folder(${name} examples) 20 | 21 | # install 22 | install(TARGETS ${name} DESTINATION bin) 23 | 24 | if(UNIX OR APPLE) 25 | # Funny command to make tutorials work 26 | # TODO: remove in future as soon as naming is standartaized everywhere 27 | set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${Caffe_POSTFIX}) 28 | add_custom_command(TARGET ${name} POST_BUILD 29 | COMMAND ln -sf "${__outname}" "${__outname}.bin") 30 | endif() 31 | endforeach() 32 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_full_sigmoid_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 200 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_sigmoid_train_test_bn.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 10 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | #weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "step" 18 | gamma: 1 19 | stepsize: 5000 20 | # Display every 200 iterations 21 | display: 100 22 | # The maximum number of iterations 23 | max_iter: 60000 24 | # snapshot intermediate results 25 | snapshot: 10000 26 | snapshot_prefix: "examples/cifar10_full_sigmoid_bn" 27 | # solver mode: CPU or GPU 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_full_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 60000 22 | # snapshot intermediate results 23 | snapshot: 10000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_full_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.0001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 65000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_full_solver_lr2.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.00001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 70000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_format: HDF5 25 | snapshot_prefix: "examples/cifar10/cifar10_full" 26 | # solver mode: CPU or GPU 27 | solver_mode: GPU 28 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 4000 21 | # snapshot intermediate results 22 | snapshot: 4000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/cifar10_quick_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.0001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 5000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_format: HDF5 24 | snapshot_prefix: "examples/cifar10/cifar10_quick" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/create_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the cifar data into leveldb format. 3 | 4 | EXAMPLE=examples/cifar10 5 | DATA=data/cifar10 6 | DBTYPE=lmdb 7 | 8 | echo "Creating $DBTYPE..." 9 | 10 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE 11 | 12 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE 13 | 14 | echo "Computing image mean..." 15 | 16 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 17 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto 18 | 19 | echo "Done." 20 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/train_full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate.h5 12 | 13 | # reduce learning rate by factor of 10 14 | $TOOLS/caffe train \ 15 | --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \ 16 | --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate.h5 17 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/train_full_sigmoid.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_sigmoid_solver.prototxt 7 | 8 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/train_full_sigmoid_bn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt 7 | 8 | -------------------------------------------------------------------------------- /caffe_densebox/examples/cifar10/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_quick_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 after 8 epochs 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 12 | -------------------------------------------------------------------------------- /caffe_densebox/examples/finetune_flickr_style/style_names.txt: -------------------------------------------------------------------------------- 1 | Detailed 2 | Pastel 3 | Melancholy 4 | Noir 5 | HDR 6 | Vintage 7 | Long Exposure 8 | Horror 9 | Sunny 10 | Bright 11 | Hazy 12 | Bokeh 13 | Serene 14 | Texture 15 | Ethereal 16 | Macro 17 | Depth of Field 18 | Geometric Composition 19 | Minimal 20 | Romantic 21 | -------------------------------------------------------------------------------- /caffe_densebox/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt" 2 | test_iter: 100 3 | test_interval: 1000 4 | base_lr: 0.001 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 20000 8 | display: 20 9 | max_iter: 100000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune" 14 | -------------------------------------------------------------------------------- /caffe_densebox/examples/hdf5_classification/nonlinear_auto_test.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/test.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe_densebox/examples/hdf5_classification/nonlinear_auto_train.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/train.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe_densebox/examples/hdf5_classification/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layer { 3 | name: "data" 4 | type: "HDF5Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | hdf5_data_param { 11 | source: "examples/hdf5_classification/data/train.txt" 12 | batch_size: 10 13 | } 14 | } 15 | layer { 16 | name: "data" 17 | type: "HDF5Data" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | hdf5_data_param { 24 | source: "examples/hdf5_classification/data/test.txt" 25 | batch_size: 10 26 | } 27 | } 28 | layer { 29 | name: "fc1" 30 | type: "InnerProduct" 31 | bottom: "data" 32 | top: "fc1" 33 | param { 34 | lr_mult: 1 35 | decay_mult: 1 36 | } 37 | param { 38 | lr_mult: 2 39 | decay_mult: 0 40 | } 41 | inner_product_param { 42 | num_output: 2 43 | weight_filler { 44 | type: "xavier" 45 | } 46 | bias_filler { 47 | type: "constant" 48 | value: 0 49 | } 50 | } 51 | } 52 | layer { 53 | name: "loss" 54 | type: "SoftmaxWithLoss" 55 | bottom: "fc1" 56 | bottom: "label" 57 | top: "loss" 58 | } 59 | layer { 60 | name: "accuracy" 61 | type: "Accuracy" 62 | bottom: "fc1" 63 | bottom: "label" 64 | top: "accuracy" 65 | include { 66 | phase: TEST 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /caffe_densebox/examples/imagenet/make_imagenet_mean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Compute the mean image from the imagenet training lmdb 3 | # N.B. this is available in data/ilsvrc12 4 | 5 | EXAMPLE=examples/imagenet 6 | DATA=data/ilsvrc12 7 | TOOLS=build/tools 8 | 9 | $TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ 10 | $DATA/imagenet_mean.binaryproto 11 | 12 | echo "Done." 13 | -------------------------------------------------------------------------------- /caffe_densebox/examples/imagenet/resume_training.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt \ 5 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 6 | -------------------------------------------------------------------------------- /caffe_densebox/examples/imagenet/train_caffenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/examples/images/cat.jpg -------------------------------------------------------------------------------- /caffe_densebox/examples/images/cat_gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/examples/images/cat_gray.jpg -------------------------------------------------------------------------------- /caffe_densebox/examples/images/fish-bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/examples/images/fish-bike.jpg -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/create_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into lmdb/leveldb format, 3 | # depending on the value assigned to $BACKEND. 4 | 5 | EXAMPLE=examples/mnist 6 | DATA=data/mnist 7 | BUILD=build/examples/mnist 8 | 9 | BACKEND="lmdb" 10 | 11 | echo "Creating ${BACKEND}..." 12 | 13 | rm -rf $EXAMPLE/mnist_train_${BACKEND} 14 | rm -rf $EXAMPLE/mnist_test_${BACKEND} 15 | 16 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ 17 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND} 18 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ 19 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND} 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_adadelta_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 1.0 11 | lr_policy: "fixed" 12 | momentum: 0.95 13 | weight_decay: 0.0005 14 | # Display every 100 iterations 15 | display: 100 16 | # The maximum number of iterations 17 | max_iter: 10000 18 | # snapshot intermediate results 19 | snapshot: 5000 20 | snapshot_prefix: "examples/mnist/lenet_adadelta" 21 | # solver mode: CPU or GPU 22 | solver_mode: GPU 23 | type: "AdaDelta" 24 | delta: 1e-6 25 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_auto_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | train_net: "mnist/lenet_auto_train.prototxt" 3 | test_net: "mnist/lenet_auto_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # The base learning rate, momentum and the weight decay of the network. 11 | base_lr: 0.01 12 | momentum: 0.9 13 | weight_decay: 0.0005 14 | # The learning rate policy 15 | lr_policy: "inv" 16 | gamma: 0.0001 17 | power: 0.75 18 | # Display every 100 iterations 19 | display: 100 20 | # The maximum number of iterations 21 | max_iter: 10000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "mnist/lenet" 25 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_multistep_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "multistep" 15 | gamma: 0.9 16 | stepvalue: 5000 17 | stepvalue: 7000 18 | stepvalue: 8000 19 | stepvalue: 9000 20 | stepvalue: 9500 21 | # Display every 100 iterations 22 | display: 100 23 | # The maximum number of iterations 24 | max_iter: 10000 25 | # snapshot intermediate results 26 | snapshot: 5000 27 | snapshot_prefix: "examples/mnist/lenet_multistep" 28 | # solver mode: CPU or GPU 29 | solver_mode: GPU 30 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_solver_adam.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | # this follows "ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION" 3 | net: "examples/mnist/lenet_train_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # All parameters are from the cited paper above 11 | base_lr: 0.001 12 | momentum: 0.9 13 | momentum2: 0.999 14 | # since Adam dynamically changes the learning rate, we set the base learning 15 | # rate to a fixed value 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | type: "Adam" 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/lenet_solver_rmsprop.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.0 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet_rmsprop" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | type: "RMSProp" 27 | rms_decay: 0.98 28 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/mnist_autoencoder_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder" 17 | momentum: 0.9 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/mnist_autoencoder_solver_adadelta.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 1.0 9 | lr_policy: "fixed" 10 | momentum: 0.95 11 | delta: 1e-8 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adadelta_train" 17 | # solver mode: CPU or GPU 18 | solver_mode: GPU 19 | type: "AdaDelta" 20 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "fixed" 10 | display: 100 11 | max_iter: 65000 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train" 15 | # solver mode: CPU or GPU 16 | solver_mode: GPU 17 | type: "AdaGrad" 18 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train" 17 | momentum: 0.95 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | type: "Nesterov" 21 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_lenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt 4 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_lenet_adam.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt 4 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_lenet_consolidated.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/lenet_consolidated_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_lenet_rmsprop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_rmsprop.prototxt 4 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_mnist_autoencoder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_mnist_autoencoder_adadelta.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_mnist_autoencoder_adagrad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/mnist/train_mnist_autoencoder_nesterov.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt 5 | -------------------------------------------------------------------------------- /caffe_densebox/examples/net_surgery/conv.prototxt: -------------------------------------------------------------------------------- 1 | # Simple single-layer network to showcase editing model parameters. 2 | name: "convolution" 3 | layer { 4 | name: "data" 5 | type: "Input" 6 | top: "data" 7 | input_param { shape: { dim: 1 dim: 1 dim: 100 dim: 100 } } 8 | } 9 | layer { 10 | name: "conv" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv" 14 | convolution_param { 15 | num_output: 3 16 | kernel_size: 5 17 | stride: 1 18 | weight_filler { 19 | type: "gaussian" 20 | std: 0.01 21 | } 22 | bias_filler { 23 | type: "constant" 24 | value: 0 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /caffe_densebox/examples/pycaffe/layers/pyloss.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | 4 | 5 | class EuclideanLossLayer(caffe.Layer): 6 | """ 7 | Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer 8 | to demonstrate the class interface for developing layers in Python. 9 | """ 10 | 11 | def setup(self, bottom, top): 12 | # check input pair 13 | if len(bottom) != 2: 14 | raise Exception("Need two inputs to compute distance.") 15 | 16 | def reshape(self, bottom, top): 17 | # check input dimensions match 18 | if bottom[0].count != bottom[1].count: 19 | raise Exception("Inputs must have the same dimension.") 20 | # difference is shape of inputs 21 | self.diff = np.zeros_like(bottom[0].data, dtype=np.float32) 22 | # loss output is scalar 23 | top[0].reshape(1) 24 | 25 | def forward(self, bottom, top): 26 | self.diff[...] = bottom[0].data - bottom[1].data 27 | top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2. 28 | 29 | def backward(self, top, propagate_down, bottom): 30 | for i in range(2): 31 | if not propagate_down[i]: 32 | continue 33 | if i == 0: 34 | sign = 1 35 | else: 36 | sign = -1 37 | bottom[i].diff[...] = sign * self.diff / bottom[i].num 38 | -------------------------------------------------------------------------------- /caffe_densebox/examples/pycaffe/linreg.prototxt: -------------------------------------------------------------------------------- 1 | name: 'LinearRegressionExample' 2 | # define a simple network for linear regression on dummy data 3 | # that computes the loss by a PythonLayer. 4 | layer { 5 | type: 'DummyData' 6 | name: 'x' 7 | top: 'x' 8 | dummy_data_param { 9 | shape: { dim: 10 dim: 3 dim: 2 } 10 | data_filler: { type: 'gaussian' } 11 | } 12 | } 13 | layer { 14 | type: 'DummyData' 15 | name: 'y' 16 | top: 'y' 17 | dummy_data_param { 18 | shape: { dim: 10 dim: 3 dim: 2 } 19 | data_filler: { type: 'gaussian' } 20 | } 21 | } 22 | # include InnerProduct layers for parameters 23 | # so the net will need backward 24 | layer { 25 | type: 'InnerProduct' 26 | name: 'ipx' 27 | top: 'ipx' 28 | bottom: 'x' 29 | inner_product_param { 30 | num_output: 10 31 | weight_filler { type: 'xavier' } 32 | } 33 | } 34 | layer { 35 | type: 'InnerProduct' 36 | name: 'ipy' 37 | top: 'ipy' 38 | bottom: 'y' 39 | inner_product_param { 40 | num_output: 10 41 | weight_filler { type: 'xavier' } 42 | } 43 | } 44 | layer { 45 | type: 'Python' 46 | name: 'loss' 47 | top: 'loss' 48 | bottom: 'ipx' 49 | bottom: 'ipy' 50 | python_param { 51 | # the module name -- usually the filename -- that needs to be in $PYTHONPATH 52 | module: 'pyloss' 53 | # the layer name -- the class name in the module 54 | layer: 'EuclideanLossLayer' 55 | } 56 | # set loss weight so Caffe knows this is a loss layer. 57 | # since PythonLayer inherits directly from Layer, this isn't automatically 58 | # known to Caffe 59 | loss_weight: 1 60 | } 61 | -------------------------------------------------------------------------------- /caffe_densebox/examples/siamese/create_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into leveldb format. 3 | 4 | EXAMPLES=./build/examples/siamese 5 | DATA=./data/mnist 6 | 7 | echo "Creating leveldb..." 8 | 9 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb 10 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb 11 | 12 | $EXAMPLES/convert_mnist_siamese_data.bin \ 13 | $DATA/train-images-idx3-ubyte \ 14 | $DATA/train-labels-idx1-ubyte \ 15 | ./examples/siamese/mnist_siamese_train_leveldb 16 | $EXAMPLES/convert_mnist_siamese_data.bin \ 17 | $DATA/t10k-images-idx3-ubyte \ 18 | $DATA/t10k-labels-idx1-ubyte \ 19 | ./examples/siamese/mnist_siamese_test_leveldb 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe_densebox/examples/siamese/mnist_siamese_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/siamese/mnist_siamese_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0000 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 50000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/siamese/mnist_siamese" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe_densebox/examples/siamese/train_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt 6 | -------------------------------------------------------------------------------- /caffe_densebox/examples/web_demo/exifutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script handles the skimage exif problem. 3 | """ 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | ORIENTATIONS = { # used in apply_orientation 9 | 2: (Image.FLIP_LEFT_RIGHT,), 10 | 3: (Image.ROTATE_180,), 11 | 4: (Image.FLIP_TOP_BOTTOM,), 12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 13 | 6: (Image.ROTATE_270,), 14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 15 | 8: (Image.ROTATE_90,) 16 | } 17 | 18 | 19 | def open_oriented_im(im_path): 20 | im = Image.open(im_path) 21 | if hasattr(im, '_getexif'): 22 | exif = im._getexif() 23 | if exif is not None and 274 in exif: 24 | orientation = exif[274] 25 | im = apply_orientation(im, orientation) 26 | img = np.asarray(im).astype(np.float32) / 255. 27 | if img.ndim == 2: 28 | img = img[:, :, np.newaxis] 29 | img = np.tile(img, (1, 1, 3)) 30 | elif img.shape[2] == 4: 31 | img = img[:, :, :3] 32 | return img 33 | 34 | 35 | def apply_orientation(im, orientation): 36 | if orientation in ORIENTATIONS: 37 | for method in ORIENTATIONS[orientation]: 38 | im = im.transpose(method) 39 | return im 40 | -------------------------------------------------------------------------------- /caffe_densebox/examples/web_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | werkzeug 2 | flask 3 | tornado 4 | numpy 5 | pandas 6 | pillow 7 | pyyaml 8 | -------------------------------------------------------------------------------- /caffe_densebox/include/boost/threadpool.hpp: -------------------------------------------------------------------------------- 1 | /*! \file 2 | * \brief Main include. 3 | * 4 | * This is the only file you have to include in order to use the 5 | * complete threadpool library. 6 | * 7 | * Copyright (c) 2005-2007 Philipp Henkel 8 | * 9 | * Use, modification, and distribution are subject to the 10 | * Boost Software License, Version 1.0. (See accompanying file 11 | * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 12 | * 13 | * http://threadpool.sourceforge.net 14 | * 15 | */ 16 | 17 | #ifndef THREADPOOL_HPP_INCLUDED 18 | #define THREADPOOL_HPP_INCLUDED 19 | 20 | #include "./threadpool/future.hpp" 21 | #include "./threadpool/pool.hpp" 22 | 23 | #include "./threadpool/pool_adaptors.hpp" 24 | #include "./threadpool/task_adaptors.hpp" 25 | 26 | 27 | #endif // THREADPOOL_HPP_INCLUDED 28 | 29 | -------------------------------------------------------------------------------- /caffe_densebox/include/boost/threadpool/detail/scope_guard.hpp: -------------------------------------------------------------------------------- 1 | /*! \file 2 | * \brief TODO. 3 | * 4 | * TODO. 5 | * 6 | * Copyright (c) 2005-2007 Philipp Henkel 7 | * 8 | * Use, modification, and distribution are subject to the 9 | * Boost Software License, Version 1.0. (See accompanying file 10 | * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) 11 | * 12 | * http://threadpool.sourceforge.net 13 | * 14 | */ 15 | 16 | 17 | #ifndef THREADPOOL_DETAIL_SCOPE_GUARD_HPP_INCLUDED 18 | #define THREADPOOL_DETAIL_SCOPE_GUARD_HPP_INCLUDED 19 | 20 | 21 | 22 | #include 23 | 24 | 25 | namespace boost { namespace threadpool { namespace detail 26 | { 27 | 28 | // TODO documentation 29 | class scope_guard 30 | : private boost::noncopyable 31 | { 32 | function0 const m_function; 33 | bool m_is_active; 34 | 35 | public: 36 | scope_guard(function0 const & call_on_exit) 37 | : m_function(call_on_exit) 38 | , m_is_active(true) 39 | { 40 | } 41 | 42 | ~scope_guard() 43 | { 44 | if(m_is_active && m_function) 45 | { 46 | m_function(); 47 | } 48 | } 49 | 50 | void disable() 51 | { 52 | m_is_active = false; 53 | } 54 | }; 55 | 56 | 57 | 58 | 59 | 60 | 61 | } } } // namespace boost::threadpool::detail 62 | 63 | #endif // THREADPOOL_DETAIL_SCOPE_GUARD_HPP_INCLUDED 64 | 65 | 66 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/solver_factory.hpp" 17 | #include "caffe/util/benchmark.hpp" 18 | #include "caffe/util/io.hpp" 19 | #include "caffe/util/upgrade_proto.hpp" 20 | #include "caffe/blob_transform.hpp" 21 | #include "caffe/caffe_wrapper.hpp" 22 | // #include "opencv2/videoio.hpp" 23 | #endif // CAFFE_CAFFE_HPP_ 24 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/caffe_wrapper.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * caffe_wrapper.hpp 3 | * 4 | * Created on: 2016年2月15日 5 | * Author: Alan_Huang 6 | */ 7 | 8 | #ifndef CAFFE_WRAPPER_HPP_ 9 | #define CAFFE_WRAPPER_HPP_ 10 | 11 | #include "caffe/caffe_wrapper_common.hpp" 12 | #include 13 | #include 14 | #include 15 | namespace caffe{ 16 | 17 | 18 | class CaffeDenseBoxDetector{ 19 | public: 20 | explicit CaffeDenseBoxDetector(const std::string proto_name, const std::string model_name, 21 | const bool use_cuda = true); 22 | explicit CaffeDenseBoxDetector(const std::string proto_name, const bool use_cuda = true); 23 | 24 | ~CaffeDenseBoxDetector(); 25 | 26 | void CopyFromModel(const std::string model_name); 27 | 28 | int ClassNum(); 29 | void SetCudaFlag(bool flag); 30 | 31 | bool LoadImgToBuffer(cv::Mat & src_img); 32 | bool SetRoiWithScale(const std::vector& roi_scale); 33 | 34 | void PredictOneImg(); 35 | 36 | std::vector< BBox >& GetBBoxResults(int class_id); 37 | std::vector > >& GetBBoxResults(); 38 | 39 | private: 40 | CaffeDenseBoxDetector(const CaffeDenseBoxDetector&); 41 | CaffeDenseBoxDetector& operator=(const CaffeDenseBoxDetector&); 42 | 43 | void* net_; 44 | 45 | }; 46 | 47 | 48 | } 49 | 50 | 51 | 52 | #endif /* CAFFE_WRAPPER_HPP_ */ 53 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/caffe_wrapper_common.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * caffe_wrapper.hpp 3 | 4 | * Author: Alan_Huang 5 | */ 6 | 7 | #ifndef CAFFE_WRAPPER_COMMON_HPP_ 8 | #define CAFFE_WRAPPER_COMMON_HPP_ 9 | 10 | #include 11 | #include 12 | 13 | namespace caffe{ 14 | 15 | struct ROIWithScale{ 16 | float l,t,r,b,scale; 17 | ROIWithScale(){ 18 | l=t=r=b=scale=0; 19 | } 20 | ROIWithScale(float l_, float t_, float r_, float b_, float scale_){ 21 | l=l_;t=t_;r=r_;b=b_; scale = scale_; 22 | } 23 | friend std::ostream& operator << (std::ostream & stream,const ROIWithScale & rect){ 24 | stream<< "("< 31 | struct BBox{ 32 | BBox(){ 33 | id = center_h = center_w = score = x1 = x2 = y1 = y2 = 0; 34 | } 35 | Dtype score,x1,y1,x2,y2, center_h, center_w,id; 36 | std::vector data; 37 | static bool greater(const BBox& a, const BBox& b){ 38 | return a.score > b.score; 39 | } 40 | }; 41 | 42 | 43 | } 44 | 45 | 46 | 47 | 48 | #endif /* CAFFE_WRAPPER_COMMON_HPP_ */ 49 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_lcn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LCN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | #include "caffe/layers/power_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | template 17 | class CuDNNLCNLayer : public LRNLayer { 18 | public: 19 | explicit CuDNNLCNLayer(const LayerParameter& param) 20 | : LRNLayer(param), handles_setup_(false), tempDataSize(0), 21 | tempData1(NULL), tempData2(NULL) {} 22 | virtual void LayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual void Reshape(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual ~CuDNNLCNLayer(); 27 | 28 | protected: 29 | virtual void Forward_gpu(const vector*>& bottom, 30 | const vector*>& top); 31 | virtual void Backward_gpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | 34 | bool handles_setup_; 35 | cudnnHandle_t handle_; 36 | cudnnLRNDescriptor_t norm_desc_; 37 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 38 | 39 | int size_, pre_pad_; 40 | Dtype alpha_, beta_, k_; 41 | 42 | size_t tempDataSize; 43 | void *tempData1, *tempData2; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_LCN_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_lrn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LRN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | template 16 | class CuDNNLRNLayer : public LRNLayer { 17 | public: 18 | explicit CuDNNLRNLayer(const LayerParameter& param) 19 | : LRNLayer(param), handles_setup_(false) {} 20 | virtual void LayerSetUp(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual ~CuDNNLRNLayer(); 25 | 26 | protected: 27 | virtual void Forward_gpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | 32 | bool handles_setup_; 33 | cudnnHandle_t handle_; 34 | cudnnLRNDescriptor_t norm_desc_; 35 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 36 | 37 | int size_; 38 | Dtype alpha_, beta_, k_; 39 | }; 40 | #endif 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_CUDNN_LRN_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_relu_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ 2 | #define CAFFE_CUDNN_RELU_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/relu_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of ReLULayer. 18 | */ 19 | template 20 | class CuDNNReLULayer : public ReLULayer { 21 | public: 22 | explicit CuDNNReLULayer(const LayerParameter& param) 23 | : ReLULayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNReLULayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_RELU_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_sigmoid_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of SigmoidLayer. 18 | */ 19 | template 20 | class CuDNNSigmoidLayer : public SigmoidLayer { 21 | public: 22 | explicit CuDNNSigmoidLayer(const LayerParameter& param) 23 | : SigmoidLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSigmoidLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/softmax_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /** 16 | * @brief cuDNN implementation of SoftmaxLayer. 17 | * Fallback to SoftmaxLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNSoftmaxLayer : public SoftmaxLayer { 21 | public: 22 | explicit CuDNNSoftmaxLayer(const LayerParameter& param) 23 | : SoftmaxLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSoftmaxLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/cudnn_tanh_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ 2 | #define CAFFE_CUDNN_TANH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/tanh_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of TanHLayer. 18 | */ 19 | template 20 | class CuDNNTanHLayer : public TanHLayer { 21 | public: 22 | explicit CuDNNTanHLayer(const LayerParameter& param) 23 | : TanHLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNTanHLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_TANH_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class DataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit DataLayer(const LayerParameter& param); 21 | virtual ~DataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "Data"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 2; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | }; 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_DATA_LAYER_HPP_ 40 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/image_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from image files. 19 | * 20 | * TODO(dox): thorough documentation for Forward and proto params. 21 | */ 22 | template 23 | class ImageDataLayer : public BasePrefetchingDataLayer { 24 | public: 25 | explicit ImageDataLayer(const LayerParameter& param) 26 | : BasePrefetchingDataLayer(param) {} 27 | virtual ~ImageDataLayer(); 28 | virtual void DataLayerSetUp(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "ImageData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int ExactNumTopBlobs() const { return 2; } 34 | 35 | protected: 36 | shared_ptr prefetch_rng_; 37 | virtual void ShuffleImages(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | vector > lines_; 41 | int lines_id_; 42 | }; 43 | 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/layers/tile_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_TILE_LAYER_HPP_ 2 | #define CAFFE_TILE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Copy a Blob along specified dimensions. 14 | */ 15 | template 16 | class TileLayer : public Layer { 17 | public: 18 | explicit TileLayer(const LayerParameter& param) 19 | : Layer(param) {} 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | 23 | virtual inline const char* type() const { return "Tile"; } 24 | virtual inline int ExactNumBottomBlobs() const { return 1; } 25 | virtual inline int ExactNumTopBlobs() const { return 1; } 26 | 27 | protected: 28 | virtual void Forward_cpu(const vector*>& bottom, 29 | const vector*>& top); 30 | virtual void Forward_gpu(const vector*>& bottom, 31 | const vector*>& top); 32 | 33 | virtual void Backward_cpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | virtual void Backward_gpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | 38 | unsigned int axis_, tiles_, outer_dim_, inner_dim_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_TILE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/insert_inceptions.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_INCEPTIONS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_INCEPTIONS_HPP_ 3 | 4 | #include 5 | #include "caffe/proto/caffe.pb.h" 6 | using namespace std; 7 | /** 8 | * Design by alan 9 | */ 10 | namespace caffe { 11 | 12 | 13 | void InsertInceptions(const NetParameter& param, NetParameter* param_split); 14 | 15 | void InsertInception(const InceptionParameter& inception_param, 16 | const LayerParameter layer_param,NetParameter* param_split); 17 | 18 | string InsertInceptionColumn(const LayerParameter& layer_param, 19 | const InceptionParameter& inception_param, 20 | const string& bottom_name, const string& layer_name, 21 | const InceptionColumnParameter& inception_column, 22 | NetParameter* param_split); 23 | 24 | 25 | 26 | string InceptionSubLayerName(const string& layer_name, const string& column_name, 27 | const int blob_idx, string postfix = string("")); 28 | 29 | string InceptionSubBlobName(const string& layer_name, const string& column_name, 30 | const int blob_idx,string postfix = string("")); 31 | 32 | string ConfigureInceptionConvLayer(const string& layer_name, const string& column_name, 33 | const int blob_idx, const string& bottom_name,const vector& blob_params, 34 | LayerParameter* conv_layer_param, const ConvolutionParameter& conv_param); 35 | 36 | 37 | 38 | 39 | } // namespace caffe 40 | 41 | #endif // _CAFFE_UTIL_INSERT_INCEPTIONS_HPP_ 42 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe_densebox/include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/+test/test_io.m: -------------------------------------------------------------------------------- 1 | classdef test_io < matlab.unittest.TestCase 2 | methods (Test) 3 | function test_read_write_mean(self) 4 | % randomly generate mean data 5 | width = 200; 6 | height = 300; 7 | channels = 3; 8 | mean_data_write = 255 * rand(width, height, channels, 'single'); 9 | % write mean data to binary proto 10 | mean_proto_file = tempname(); 11 | caffe.io.write_mean(mean_data_write, mean_proto_file); 12 | % read mean data from saved binary proto and test whether they are equal 13 | mean_data_read = caffe.io.read_mean(mean_proto_file); 14 | self.verifyEqual(mean_data_write, mean_data_read) 15 | delete(mean_proto_file); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/Layer.m: -------------------------------------------------------------------------------- 1 | classdef Layer < handle 2 | % Wrapper class of caffe::Layer in matlab 3 | 4 | properties (Access = private) 5 | hLayer_self 6 | attributes 7 | % attributes fields: 8 | % hBlob_blobs 9 | end 10 | properties (SetAccess = private) 11 | params 12 | end 13 | 14 | methods 15 | function self = Layer(hLayer_layer) 16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle'); 17 | 18 | % setup self handle and attributes 19 | self.hLayer_self = hLayer_layer; 20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self); 21 | 22 | % setup weights 23 | self.params = caffe.Blob.empty(); 24 | for n = 1:length(self.attributes.hBlob_blobs) 25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n)); 26 | end 27 | end 28 | function layer_type = type(self) 29 | layer_type = caffe_('layer_get_type', self.hLayer_self); 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/get_net.m: -------------------------------------------------------------------------------- 1 | function net = get_net(varargin) 2 | % net = get_net(model_file, phase_name) or 3 | % net = get_net(model_file, weights_file, phase_name) 4 | % Construct a net from model_file, and load weights from weights_file 5 | % phase_name can only be 'train' or 'test' 6 | 7 | CHECK(nargin == 2 || nargin == 3, ['usage: ' ... 8 | 'net = get_net(model_file, phase_name) or ' ... 9 | 'net = get_net(model_file, weights_file, phase_name)']); 10 | if nargin == 3 11 | model_file = varargin{1}; 12 | weights_file = varargin{2}; 13 | phase_name = varargin{3}; 14 | elseif nargin == 2 15 | model_file = varargin{1}; 16 | phase_name = varargin{2}; 17 | end 18 | 19 | CHECK(ischar(model_file), 'model_file must be a string'); 20 | CHECK(ischar(phase_name), 'phase_name must be a string'); 21 | CHECK_FILE_EXIST(model_file); 22 | CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ... 23 | sprintf('phase_name can only be %strain%s or %stest%s', ... 24 | char(39), char(39), char(39), char(39))); 25 | 26 | % construct caffe net from model_file 27 | hNet = caffe_('get_net', model_file, phase_name); 28 | net = caffe.Net(hNet); 29 | 30 | % load weights from weights_file 31 | if nargin == 3 32 | CHECK(ischar(weights_file), 'weights_file must be a string'); 33 | CHECK_FILE_EXIST(weights_file); 34 | net.copy_from(weights_file); 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/get_solver.m: -------------------------------------------------------------------------------- 1 | function solver = get_solver(solver_file) 2 | % solver = get_solver(solver_file) 3 | % Construct a Solver object from solver_file 4 | 5 | CHECK(ischar(solver_file), 'solver_file must be a string'); 6 | CHECK_FILE_EXIST(solver_file); 7 | pSolver = caffe_('get_solver', solver_file); 8 | solver = caffe.Solver(pSolver); 9 | 10 | end 11 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/private/CHECK.m: -------------------------------------------------------------------------------- 1 | function CHECK(expr, error_msg) 2 | 3 | if ~expr 4 | error(error_msg); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/private/CHECK_FILE_EXIST.m: -------------------------------------------------------------------------------- 1 | function CHECK_FILE_EXIST(filename) 2 | 3 | if exist(filename, 'file') == 0 4 | error('%s does not exist', filename); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/private/is_valid_handle.m: -------------------------------------------------------------------------------- 1 | function valid = is_valid_handle(hObj) 2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') 3 | % Check if a handle is valid (has the right data type and init_key matches) 4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++; 5 | 6 | % a handle is a struct array with the following fields 7 | % (uint64) ptr : the pointer to the C++ object 8 | % (double) init_key : caffe initialization key 9 | 10 | persistent init_key; 11 | if isempty(init_key) 12 | init_key = caffe_('get_init_key'); 13 | end 14 | 15 | % is_valid_handle('get_new_init_key') to get new init_key from C++; 16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key') 17 | init_key = caffe_('get_init_key'); 18 | return 19 | else 20 | % check whether data types are correct and init_key matches 21 | valid = isstruct(hObj) ... 22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ... 23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ... 24 | && hObj.init_key == init_key; 25 | end 26 | 27 | end 28 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/reset_all.m: -------------------------------------------------------------------------------- 1 | function reset_all() 2 | % reset_all() 3 | % clear all solvers and stand-alone nets and reset Caffe to initial status 4 | 5 | caffe_('reset'); 6 | is_valid_handle('get_new_init_key'); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/run_tests.m: -------------------------------------------------------------------------------- 1 | function results = run_tests() 2 | % results = run_tests() 3 | % run all tests in this caffe matlab wrapper package 4 | 5 | % use CPU for testing 6 | caffe.set_mode_cpu(); 7 | 8 | % reset caffe before testing 9 | caffe.reset_all(); 10 | 11 | % put all test cases here 12 | results = [... 13 | run(caffe.test.test_net) ... 14 | run(caffe.test.test_solver) ... 15 | run(caffe.test.test_io) ]; 16 | 17 | % reset caffe after testing 18 | caffe.reset_all(); 19 | 20 | end 21 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/set_device.m: -------------------------------------------------------------------------------- 1 | function set_device(device_id) 2 | % set_device(device_id) 3 | % set Caffe's GPU device ID 4 | 5 | CHECK(isscalar(device_id) && device_id >= 0, ... 6 | 'device_id must be non-negative integer'); 7 | device_id = double(device_id); 8 | 9 | caffe_('set_device', device_id); 10 | 11 | end 12 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/set_mode_cpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_cpu() 2 | % set_mode_cpu() 3 | % set Caffe to CPU mode 4 | 5 | caffe_('set_mode_cpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/set_mode_gpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_gpu() 2 | % set_mode_gpu() 3 | % set Caffe to GPU mode 4 | 5 | caffe_('set_mode_gpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/+caffe/version.m: -------------------------------------------------------------------------------- 1 | function version_str = version() 2 | % version() 3 | % show Caffe's version. 4 | 5 | version_str = caffe_('version'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe_densebox/matlab/hdf5creation/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | list.txt 3 | -------------------------------------------------------------------------------- /caffe_densebox/python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver 2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list 3 | from .proto.caffe_pb2 import TRAIN, TEST 4 | from .classifier import Classifier 5 | from .detector import Detector 6 | from . import io 7 | from .net_spec import layers, params, NetSpec, to_proto 8 | -------------------------------------------------------------------------------- /caffe_densebox/python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /caffe_densebox/python/caffe/test/test_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | 4 | import caffe 5 | 6 | class TestBlobProtoToArray(unittest.TestCase): 7 | 8 | def test_old_format(self): 9 | data = np.zeros((10,10)) 10 | blob = caffe.proto.caffe_pb2.BlobProto() 11 | blob.data.extend(list(data.flatten())) 12 | shape = (1,1,10,10) 13 | blob.num, blob.channels, blob.height, blob.width = shape 14 | 15 | arr = caffe.io.blobproto_to_array(blob) 16 | self.assertEqual(arr.shape, shape) 17 | 18 | def test_new_format(self): 19 | data = np.zeros((10,10)) 20 | blob = caffe.proto.caffe_pb2.BlobProto() 21 | blob.data.extend(list(data.flatten())) 22 | blob.shape.dim.extend(list(data.shape)) 23 | 24 | arr = caffe.io.blobproto_to_array(blob) 25 | self.assertEqual(arr.shape, data.shape) 26 | 27 | def test_no_shape(self): 28 | data = np.zeros((10,10)) 29 | blob = caffe.proto.caffe_pb2.BlobProto() 30 | blob.data.extend(list(data.flatten())) 31 | 32 | with self.assertRaises(ValueError): 33 | caffe.io.blobproto_to_array(blob) 34 | 35 | def test_scalar(self): 36 | data = np.ones((1)) * 123 37 | blob = caffe.proto.caffe_pb2.BlobProto() 38 | blob.data.extend(list(data.flatten())) 39 | 40 | arr = caffe.io.blobproto_to_array(blob) 41 | self.assertEqual(arr, 123) 42 | -------------------------------------------------------------------------------- /caffe_densebox/python/caffe/test/test_layer_type_list.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import caffe 4 | 5 | class TestLayerTypeList(unittest.TestCase): 6 | 7 | def test_standard_types(self): 8 | #removing 'Data' from list 9 | for type_name in ['Data', 'Convolution', 'InnerProduct']: 10 | self.assertIn(type_name, caffe.layer_type_list(), 11 | '%s not in layer_type_list()' % type_name) 12 | -------------------------------------------------------------------------------- /caffe_densebox/python/draw_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Draw a graph of the net architecture. 4 | """ 5 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter 6 | from google.protobuf import text_format 7 | 8 | import caffe 9 | import caffe.draw 10 | from caffe.proto import caffe_pb2 11 | 12 | 13 | def parse_args(): 14 | """Parse input arguments 15 | """ 16 | 17 | parser = ArgumentParser(description=__doc__, 18 | formatter_class=ArgumentDefaultsHelpFormatter) 19 | 20 | parser.add_argument('input_net_proto_file', 21 | help='Input network prototxt file') 22 | parser.add_argument('output_image_file', 23 | help='Output image file') 24 | parser.add_argument('--rankdir', 25 | help=('One of TB (top-bottom, i.e., vertical), ' 26 | 'RL (right-left, i.e., horizontal), or another ' 27 | 'valid dot option; see ' 28 | 'http://www.graphviz.org/doc/info/' 29 | 'attrs.html#k:rankdir'), 30 | default='LR') 31 | 32 | args = parser.parse_args() 33 | return args 34 | 35 | 36 | def main(): 37 | args = parse_args() 38 | net = caffe_pb2.NetParameter() 39 | text_format.Merge(open(args.input_net_proto_file).read(), net) 40 | print('Drawing net to %s' % args.output_image_file) 41 | caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /caffe_densebox/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | matplotlib>=1.3.1 6 | ipython>=3.0.0 7 | h5py>=2.2.0 8 | leveldb>=0.191 9 | networkx>=1.8.1 10 | nose>=1.3.0 11 | pandas>=0.12.0 12 | python-dateutil>=1.4,<2 13 | protobuf>=2.5.0 14 | python-gflags>=2.0 15 | pyyaml>=3.10 16 | Pillow>=2.3.0 17 | six>=1.1.0 -------------------------------------------------------------------------------- /caffe_densebox/scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /caffe_densebox/scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip 22 | unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR 23 | rm $MODEL_DIR/gist.zip 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /caffe_densebox/scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /caffe_densebox/scripts/travis/travis_build_and_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script called by Travis to build and test Caffe. 3 | # Travis CI tests are CPU-only for lack of compatible hardware. 4 | 5 | set -e 6 | MAKE="make --jobs=$NUM_THREADS --keep-going" 7 | 8 | if $WITH_CMAKE; then 9 | mkdir build 10 | cd build 11 | CPU_ONLY=" -DCPU_ONLY=ON" 12 | if ! $WITH_CUDA; then 13 | CPU_ONLY=" -DCPU_ONLY=OFF" 14 | fi 15 | PYTHON_ARGS="" 16 | if [ "$PYTHON_VERSION" = "3" ]; then 17 | PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/" 18 | fi 19 | if $WITH_IO; then 20 | IO_ARGS="-DUSE_OPENCV=ON -DUSE_LMDB=ON -DUSE_LEVELDB=ON" 21 | else 22 | IO_ARGS="-DUSE_OPENCV=OFF -DUSE_LMDB=OFF -DUSE_LEVELDB=OFF" 23 | fi 24 | cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" $IO_ARGS .. 25 | $MAKE 26 | $MAKE pytest 27 | if ! $WITH_CUDA; then 28 | $MAKE runtest 29 | $MAKE lint 30 | fi 31 | $MAKE clean 32 | cd - 33 | else 34 | if ! $WITH_CUDA; then 35 | export CPU_ONLY=1 36 | fi 37 | if $WITH_IO; then 38 | export USE_LMDB=1 39 | export USE_LEVELDB=1 40 | export USE_OPENCV=1 41 | fi 42 | $MAKE all test pycaffe warn lint || true 43 | if ! $WITH_CUDA; then 44 | $MAKE runtest 45 | fi 46 | $MAKE all 47 | $MAKE test 48 | $MAKE pycaffe 49 | $MAKE pytest 50 | $MAKE warn 51 | if ! $WITH_CUDA; then 52 | $MAKE lint 53 | fi 54 | fi 55 | -------------------------------------------------------------------------------- /caffe_densebox/scripts/travis/travis_setup_makefile_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mv Makefile.config.example Makefile.config 6 | 7 | if $WITH_CUDA; then 8 | # Only generate compute_50. 9 | GENCODE="-gencode arch=compute_50,code=sm_50" 10 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" 11 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config 12 | fi 13 | 14 | # Remove IO library settings from Makefile.config 15 | # to avoid conflicts with CI configuration 16 | sed -i -e '/USE_LMDB/d' Makefile.config 17 | sed -i -e '/USE_LEVELDB/d' Makefile.config 18 | sed -i -e '/USE_OPENCV/d' Makefile.config 19 | 20 | cat << 'EOF' >> Makefile.config 21 | # Travis' nvcc doesn't like newer boost versions 22 | NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used 23 | ANACONDA_HOME := $(CONDA_DIR) 24 | PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ 25 | $(ANACONDA_HOME)/include/python2.7 \ 26 | $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include 27 | PYTHON_LIB := $(ANACONDA_HOME)/lib 28 | INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include 29 | LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib 30 | WITH_PYTHON_LAYER := 1 31 | EOF 32 | -------------------------------------------------------------------------------- /caffe_densebox/scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | 24 | # ---[ Tests 25 | add_subdirectory(test) 26 | 27 | # ---[ Install 28 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 29 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 30 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 31 | 32 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 33 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 34 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 35 | 36 | 37 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void Layer::InitMutex() { 8 | forward_mutex_.reset(new boost::mutex()); 9 | } 10 | 11 | template 12 | void Layer::Lock() { 13 | if (IsShared()) { 14 | forward_mutex_->lock(); 15 | } 16 | } 17 | 18 | template 19 | void Layer::Unlock() { 20 | if (IsShared()) { 21 | forward_mutex_->unlock(); 22 | } 23 | } 24 | 25 | INSTANTIATE_CLASS(Layer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | NeuronLayer::LayerSetUp(bottom, top); 12 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 13 | "allow in-place computation."; 14 | } 15 | 16 | template 17 | void AbsValLayer::Forward_cpu( 18 | const vector*>& bottom, const vector*>& top) { 19 | const int count = top[0]->count(); 20 | Dtype* top_data = top[0]->mutable_cpu_data(); 21 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 22 | } 23 | 24 | template 25 | void AbsValLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const int count = top[0]->count(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | if (propagate_down[0]) { 30 | const Dtype* bottom_data = bottom[0]->cpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | caffe_cpu_sign(count, bottom_data, bottom_diff); 33 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 34 | } 35 | } 36 | 37 | #ifdef CPU_ONLY 38 | STUB_GPU(AbsValLayer); 39 | #endif 40 | 41 | INSTANTIATE_CLASS(AbsValLayer); 42 | REGISTER_LAYER_CLASS(AbsVal); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/bgr_2_gray_layer.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/layers/fcn_data_layers.hpp" 2 | #include "caffe/util/math_functions.hpp" 3 | namespace caffe { 4 | 5 | 6 | template 7 | void BGR2GrayLayer::Forward_gpu(const vector*>& bottom, 8 | const vector*>& top) { 9 | 10 | Dtype* bgr_weight = bgr_weight_.mutable_gpu_data(); 11 | int num = bottom[0]->num(); 12 | int height = bottom[0]->height(); 13 | int width = bottom[0]->width(); 14 | int size_per_channel = height * width; 15 | int spacial_size = bottom[0]->count()/num; 16 | for(int i = 0; i < num; ++i){ 17 | Dtype* in_data = bottom[0]->mutable_gpu_data() + spacial_size * i; 18 | Dtype* out_data = top[0]->mutable_gpu_data() + size_per_channel * i; 19 | caffe_gpu_gemm(CblasTrans,CblasNoTrans,size_per_channel,1,3, 20 | 1.,in_data,bgr_weight,0.,out_data); 21 | } 22 | } 23 | INSTANTIATE_LAYER_GPU_FUNCS(BGR2GrayLayer); 24 | 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/bnll_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | const float kBNLL_THRESHOLD = 50.; 9 | 10 | template 11 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = bottom_data[i] > 0 ? 18 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 19 | log(1. + exp(bottom_data[i])); 20 | } 21 | } 22 | 23 | template 24 | void BNLLLayer::Backward_cpu(const vector*>& top, 25 | const vector& propagate_down, 26 | const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* bottom_data = bottom[0]->cpu_data(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 31 | const int count = bottom[0]->count(); 32 | Dtype expval; 33 | for (int i = 0; i < count; ++i) { 34 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 35 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(BNLLLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(BNLLLayer); 45 | REGISTER_LAYER_CLASS(BNLL); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_lrn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnLRNCrossChannelForward( 15 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | cudnn::dataType::zero, 19 | top_desc_, top_data) ); 20 | } 21 | 22 | template 23 | void CuDNNLRNLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, const vector*>& bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | const Dtype* top_data = top[0]->gpu_data(); 27 | const Dtype* bottom_data = bottom[0]->gpu_data(); 28 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 29 | 30 | CUDNN_CHECK(cudnnLRNCrossChannelBackward( 31 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 32 | cudnn::dataType::one, 33 | top_desc_, top_data, 34 | top_desc_, top_diff, 35 | bottom_desc_, bottom_data, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff) ); 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLRNLayer); 41 | 42 | }; // namespace caffe 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 14 | cudnn::dataType::one, 15 | bottom_desc_, bottom_data, 16 | cudnn::dataType::zero, 17 | top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, const vector*>& bottom) { 23 | if (!propagate_down[0]) { 24 | return; 25 | } 26 | const Dtype* top_diff = top[0]->gpu_diff(); 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* bottom_data = bottom[0]->gpu_data(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 31 | cudnn::dataType::one, 32 | top_desc_, top_data, top_desc_, top_diff, 33 | bottom_desc_, bottom_data, 34 | cudnn::dataType::zero, 35 | bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | ReLULayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | handles_setup_ = true; 17 | } 18 | 19 | template 20 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 21 | const vector*>& top) { 22 | ReLULayer::Reshape(bottom, top); 23 | const int N = bottom[0]->num(); 24 | const int K = bottom[0]->channels(); 25 | const int H = bottom[0]->height(); 26 | const int W = bottom[0]->width(); 27 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 28 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 29 | } 30 | 31 | template 32 | CuDNNReLULayer::~CuDNNReLULayer() { 33 | // Check that handles have been setup before destroying. 34 | if (!handles_setup_) { return; } 35 | 36 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 37 | cudnnDestroyTensorDescriptor(this->top_desc_); 38 | cudnnDestroy(this->handle_); 39 | } 40 | 41 | INSTANTIATE_CLASS(CuDNNReLULayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | SigmoidLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | handles_setup_ = true; 17 | } 18 | 19 | template 20 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 21 | const vector*>& top) { 22 | SigmoidLayer::Reshape(bottom, top); 23 | const int N = bottom[0]->num(); 24 | const int K = bottom[0]->channels(); 25 | const int H = bottom[0]->height(); 26 | const int W = bottom[0]->width(); 27 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 28 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 29 | } 30 | 31 | template 32 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 33 | // Check that handles have been setup before destroying. 34 | if (!handles_setup_) { return; } 35 | 36 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 37 | cudnnDestroyTensorDescriptor(this->top_desc_); 38 | cudnnDestroy(this->handle_); 39 | } 40 | 41 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | SoftmaxLayer::LayerSetUp(bottom, top); 14 | // Initialize CUDNN. 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SoftmaxLayer::Reshape(bottom, top); 25 | int N = this->outer_num_; 26 | int K = bottom[0]->shape(this->softmax_axis_); 27 | int H = this->inner_num_; 28 | int W = 1; 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(bottom_desc_); 39 | cudnnDestroyTensorDescriptor(top_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_tanh_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | TanHLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | handles_setup_ = true; 17 | } 18 | 19 | template 20 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 21 | const vector*>& top) { 22 | TanHLayer::Reshape(bottom, top); 23 | const int N = bottom[0]->num(); 24 | const int K = bottom[0]->channels(); 25 | const int H = bottom[0]->height(); 26 | const int W = bottom[0]->width(); 27 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 28 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 29 | } 30 | 31 | template 32 | CuDNNTanHLayer::~CuDNNTanHLayer() { 33 | // Check that handles have been setup before destroying. 34 | if (!handles_setup_) { return; } 35 | 36 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 37 | cudnnDestroyTensorDescriptor(this->top_desc_); 38 | cudnnDestroy(this->handle_); 39 | } 40 | 41 | INSTANTIATE_CLASS(CuDNNTanHLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/exp_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ExpLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | const Dtype* bottom_data = bottom[0]->gpu_data(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | if (inner_scale_ == Dtype(1)) { 15 | caffe_gpu_exp(count, bottom_data, top_data); 16 | } else { 17 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 18 | caffe_gpu_exp(count, top_data, top_data); 19 | } 20 | if (outer_scale_ != Dtype(1)) { 21 | caffe_gpu_scal(count, outer_scale_, top_data); 22 | } 23 | } 24 | 25 | template 26 | void ExpLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | if (!propagate_down[0]) { return; } 29 | const int count = bottom[0]->count(); 30 | const Dtype* top_data = top[0]->gpu_data(); 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 33 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 34 | if (inner_scale_ != Dtype(1)) { 35 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 36 | } 37 | } 38 | 39 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 40 | 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/flatten_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void FlattenLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | const int start_axis = bottom[0]->CanonicalAxisIndex( 11 | this->layer_param_.flatten_param().axis()); 12 | const int end_axis = bottom[0]->CanonicalAxisIndex( 13 | this->layer_param_.flatten_param().end_axis()); 14 | vector top_shape; 15 | for (int i = 0; i < start_axis; ++i) { 16 | top_shape.push_back(bottom[0]->shape(i)); 17 | } 18 | const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); 19 | top_shape.push_back(flattened_dim); 20 | for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { 21 | top_shape.push_back(bottom[0]->shape(i)); 22 | } 23 | top[0]->Reshape(top_shape); 24 | CHECK_EQ(top[0]->count(), bottom[0]->count()); 25 | } 26 | 27 | template 28 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 29 | const vector*>& top) { 30 | top[0]->ShareData(*bottom[0]); 31 | } 32 | 33 | template 34 | void FlattenLayer::Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom) { 36 | bottom[0]->ShareDiff(*top[0]); 37 | } 38 | 39 | INSTANTIATE_CLASS(FlattenLayer); 40 | REGISTER_LAYER_CLASS(Flatten); 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/layers/hdf5_output_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | CHECK_GE(bottom.size(), 2); 14 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 15 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 16 | bottom[0]->height(), bottom[0]->width()); 17 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 18 | bottom[1]->height(), bottom[1]->width()); 19 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 20 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 21 | 22 | for (int i = 0; i < bottom[0]->num(); ++i) { 23 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 24 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 25 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 26 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 27 | } 28 | SaveBlobs(); 29 | } 30 | 31 | template 32 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | return; 35 | } 36 | 37 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); 38 | 39 | } // namespace caffe 40 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/loss_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void LossLayer::LayerSetUp( 9 | const vector*>& bottom, const vector*>& top) { 10 | // LossLayers have a non-zero (1) loss by default. 11 | if (this->layer_param_.loss_weight_size() == 0) { 12 | this->layer_param_.add_loss_weight(Dtype(1)); 13 | } 14 | } 15 | 16 | template 17 | void LossLayer::Reshape( 18 | const vector*>& bottom, const vector*>& top) { 19 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 20 | << "The data and label should have the same number."; 21 | vector loss_shape(0); // Loss layers output a scalar; 0 axes. 22 | top[0]->Reshape(loss_shape); 23 | } 24 | 25 | INSTANTIATE_CLASS(LossLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ReLULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 18 | } 19 | } 20 | 21 | template 22 | void ReLULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 31 | for (int i = 0; i < count; ++i) { 32 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 33 | + negative_slope * (bottom_data[i] <= 0)); 34 | } 35 | } 36 | } 37 | 38 | 39 | #ifdef CPU_ONLY 40 | STUB_GPU(ReLULayer); 41 | #endif 42 | 43 | INSTANTIATE_CLASS(ReLULayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 10 | const vector*>& top, const vector& propagate_down, 11 | const vector*>& bottom) { 12 | if (propagate_down[1]) { 13 | LOG(FATAL) << this->type() 14 | << " Layer cannot backpropagate to label inputs."; 15 | } 16 | if (propagate_down[0]) { 17 | // First, compute the diff 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 21 | const Dtype* target = bottom[1]->gpu_data(); 22 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 23 | caffe_copy(count, sigmoid_output_data, bottom_diff); 24 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 25 | // Scale down gradient 26 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 27 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | inline Dtype sigmoid(Dtype x) { 10 | return 1. / (1. + exp(-x)); 11 | } 12 | 13 | template 14 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 15 | const vector*>& top) { 16 | const Dtype* bottom_data = bottom[0]->cpu_data(); 17 | Dtype* top_data = top[0]->mutable_cpu_data(); 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | top_data[i] = sigmoid(bottom_data[i]); 21 | } 22 | } 23 | 24 | template 25 | void SigmoidLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | for (int i = 0; i < count; ++i) { 34 | const Dtype sigmoid_x = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(SigmoidLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(SigmoidLayer); 45 | 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void TanHLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = tanh(bottom_data[i]); 18 | } 19 | } 20 | 21 | template 22 | void TanHLayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* top_data = top[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype tanhx; 31 | for (int i = 0; i < count; ++i) { 32 | tanhx = top_data[i]; 33 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 34 | } 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(TanHLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(TanHLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | 4 | #include "caffe/caffe.hpp" 5 | #include "caffe/test/test_caffe_main.hpp" 6 | 7 | namespace caffe { 8 | #ifndef CPU_ONLY 9 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 10 | #endif 11 | } 12 | 13 | #ifndef CPU_ONLY 14 | using caffe::CAFFE_TEST_CUDA_PROP; 15 | #endif 16 | 17 | int main(int argc, char** argv) { 18 | ::testing::InitGoogleTest(&argc, argv); 19 | caffe::GlobalInit(&argc, &argv); 20 | #ifndef CPU_ONLY 21 | // Before starting testing, let's first print out a few cuda defice info. 22 | int device; 23 | cudaGetDeviceCount(&device); 24 | cout << "Cuda number of devices: " << device << endl; 25 | if (argc > 1) { 26 | // Use the given device 27 | device = atoi(argv[1]); 28 | cudaSetDevice(device); 29 | cout << "Setting to use device " << device << endl; 30 | } else if (CUDA_TEST_DEVICE >= 0) { 31 | // Use the device assigned in build configuration; but with a lower priority 32 | device = CUDA_TEST_DEVICE; 33 | } 34 | cudaGetDevice(&device); 35 | cout << "Current device id: " << device << endl; 36 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 37 | #endif 38 | // invoke the test. 39 | return RUN_ALL_TESTS(); 40 | } 41 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/caffe_densebox/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | 12 | class InternalThreadTest : public ::testing::Test {}; 13 | 14 | TEST_F(InternalThreadTest, TestStartAndExit) { 15 | InternalThread thread; 16 | EXPECT_FALSE(thread.is_started()); 17 | thread.StartInternalThread(); 18 | EXPECT_TRUE(thread.is_started()); 19 | thread.StopInternalThread(); 20 | EXPECT_FALSE(thread.is_started()); 21 | } 22 | 23 | class TestThreadA : public InternalThread { 24 | void InternalThreadEntry() { 25 | EXPECT_EQ(4244559767, caffe_rng_rand()); 26 | } 27 | }; 28 | 29 | class TestThreadB : public InternalThread { 30 | void InternalThreadEntry() { 31 | EXPECT_EQ(1726478280, caffe_rng_rand()); 32 | } 33 | }; 34 | 35 | TEST_F(InternalThreadTest, TestRandomSeed) { 36 | TestThreadA t1; 37 | Caffe::set_random_seed(9658361); 38 | t1.StartInternalThread(); 39 | t1.StopInternalThread(); 40 | 41 | TestThreadA t2; 42 | Caffe::set_random_seed(9658361); 43 | t2.StartInternalThread(); 44 | t2.StopInternalThread(); 45 | 46 | TestThreadB t3; 47 | Caffe::set_random_seed(3435563); 48 | t3.StartInternalThread(); 49 | t3.StopInternalThread(); 50 | } 51 | 52 | } // namespace caffe 53 | 54 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /caffe_densebox/src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /caffe_densebox/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /caffe_densebox/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /caffe_densebox/tools/RectMapTest.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Main.cpp 3 | * 4 | * Created on: 2015年5月11日 5 | * Author: Alan_Huang 6 | */ 7 | #include "caffe/util/RectMap.hpp" 8 | #include 9 | #include 10 | using namespace std; 11 | int main(){ 12 | 13 | RectMap map; 14 | 15 | float scale_start = 1; 16 | float scale_end = -3; 17 | float scale_step = -0.45; 18 | int img_h = 600; 19 | int img_w = 800; 20 | // int rect_id = 0; 21 | for(float scale = scale_start; scale >= scale_end; scale+=scale_step){ 22 | float cur_scale = pow(2,scale); 23 | Rect rect = Rect(RectPoint(), int(img_h * cur_scale), int(img_w * cur_scale)); 24 | map.PlaceRect(rect); 25 | // std::cout<<"scale "< 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | #include "caffe/caffe.hpp" 14 | #include "caffe/util/io.hpp" 15 | #include "caffe/util/util_others.hpp" 16 | #include "caffe/util/buffered_reader.hpp" 17 | 18 | using namespace caffe; 19 | int main(int argc, char** argv) { 20 | 21 | if (argc < 3) { 22 | LOG(ERROR)<< "generate_random_number.bin"<< 23 | " output_name"<< 24 | " number "; 25 | return 0; 26 | } 27 | string out_file_name = string(argv[1]); 28 | FILE* fd = fopen(out_file_name.c_str(),"w"); 29 | int number = atoi(argv[2]); 30 | Blob blob; 31 | blob.Reshape(1,1,1,number); 32 | FillerParameter filler_param; 33 | GaussianFiller filler(filler_param); 34 | filler.Fill(&blob); 35 | const float* blob_data = blob.cpu_data(); 36 | for(int i=0; i < number; ++i){ 37 | fprintf(fd, "%f ",blob_data[i]); 38 | } 39 | fclose(fd); 40 | return 0; 41 | } 42 | -------------------------------------------------------------------------------- /caffe_densebox/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe_densebox/tools/show_class_color.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "caffe/caffe.hpp" 12 | #include "caffe/util/io.hpp" 13 | #include "caffe/layers/fcn_data_layers.hpp" 14 | 15 | using namespace caffe; 16 | using std::vector; 17 | using std::string; 18 | 19 | 20 | int main(int argc, char** argv) { 21 | if (argc < 3) { 22 | LOG(ERROR)<< "show_class_color.bin class_list " 23 | << "output_name "; 24 | return 0; 25 | } 26 | vector class_names_; 27 | string file_list_name = string(argv[1]); 28 | LOG(INFO) << "Opening class list file " << file_list_name; 29 | std::ifstream infile(file_list_name.c_str()); 30 | CHECK(infile.good()); 31 | string class_name; 32 | while(std::getline(infile,class_name)){ 33 | if(class_name.empty()) continue; 34 | class_names_.push_back(class_name); 35 | } 36 | infile.close(); 37 | 38 | string out_name = string(argv[2]); 39 | ShowClassColor(class_names_,out_name); 40 | LOG(INFO)<<"saving img : "< 2 | 3 | void test() 4 | { 5 | int len=sizeof(int)*8; 6 | printf("sizeof(int)=%d\n",len); 7 | 8 | len=sizeof(int *)*8; 9 | printf("sizeof(int*)=%d\n",len); 10 | 11 | #ifdef _MSC_VER 12 | printf("_MSC_VER is defined\n"); 13 | #endif 14 | 15 | 16 | #ifdef __GNUC__ 17 | printf("__GNUC__ is defined\n"); 18 | #endif 19 | 20 | #ifdef __INTEL__ 21 | printf("__INTEL__ is defined\n"); 22 | #endif 23 | 24 | #ifdef __i386__ 25 | printf("__i386__ is defined\n"); 26 | #endif 27 | 28 | #ifdef __x86_64__ 29 | printf("__x86_64__ is defined\n"); 30 | #endif 31 | 32 | #ifdef _WIN32 33 | printf("_WIN32 is defined\n"); 34 | #endif 35 | 36 | #ifdef _WIN64 37 | printf("_WIN64 is defined\n"); 38 | #endif 39 | 40 | 41 | #ifdef __linux__ 42 | printf("__linux__ is defined\n"); 43 | #endif 44 | 45 | #ifdef __LP64__ 46 | printf("__LP64__ is defined\n"); 47 | #endif 48 | 49 | 50 | #ifdef __amd64 51 | printf("__amd64 is defined\n"); 52 | #endif 53 | 54 | #ifdef __APPLE__ 55 | printf("__APPLE__ is defined\n"); 56 | #endif 57 | 58 | #ifdef __DARWIN__ 59 | printf("__DARWIN__ is defined\n"); 60 | #endif 61 | 62 | } 63 | 64 | int main(int argc, char* argv[]) 65 | { 66 | test(); 67 | return 0; 68 | } 69 | -------------------------------------------------------------------------------- /caffe_densebox/tools/test_bufferedImgVideoReader.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | #include "caffe/caffe.hpp" 5 | #include "caffe/util/io.hpp" 6 | #include "caffe/util/util_others.hpp" 7 | #include "caffe/util/buffered_reader.hpp" 8 | 9 | using namespace caffe; 10 | int main(int argc, char** argv) { 11 | BufferedColorJPGReader * buffered_reader = new BufferedColorIMGAndAVIReader("",5); 12 | buffered_reader->Show("divx4803.avi_1222","1.jpg"); 13 | 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /caffe_densebox/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe_densebox/tools/time_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | #include "caffe/caffe.hpp" 14 | #include "caffe/util/io.hpp" 15 | #include "caffe/util/util_others.hpp" 16 | #include "caffe/util/buffered_reader.hpp" 17 | 18 | using namespace caffe; 19 | int main(int argc, char** argv) { 20 | 21 | if (argc < 3) { 22 | LOG(ERROR)<< "make_ref_result.bin"<< 23 | " net_model "<< 24 | " net_proto "; 25 | return 0; 26 | } 27 | string net_model_name = string(argv[1]); 28 | string net_proto_name = string(argv[2]); 29 | int iter_num = 200; 30 | // openblas_set_num_threads(8); 31 | Net caffe_test_net(net_proto_name,caffe::TEST); 32 | 33 | std::cout<<"net initialized"; 34 | caffe_test_net.CopyTrainedLayersFrom(net_model_name); 35 | caffe_test_net.Reshape(); 36 | vector*> input_blobs = caffe_test_net.input_blobs(); 37 | FillerParameter filler_param; 38 | GaussianFiller filler(filler_param); 39 | filler.Fill(input_blobs[0]); 40 | CHECK_EQ(input_blobs.size(),1); 41 | Timer timer; 42 | timer.Start(); 43 | for(int i=0; i < iter_num; ++i){ 44 | caffe_test_net.ForwardPrefilled(); 45 | } 46 | double total_time = timer.MicroSeconds(); 47 | std::cout<<"Time cost for "<> $dst_file 33 | 34 | rm -f $label_file 35 | rm -f $img_file 36 | done 37 | 38 | # Generate image name and size infomation. 39 | if [ $dataset == "test" ] 40 | then 41 | $bash_dir/../../build/tools/get_image_size $root_dir $dst_file $bash_dir/$dataset"_name_size.txt" 42 | fi 43 | 44 | # Shuffle trainval file. 45 | if [ $dataset == "trainval" ] 46 | then 47 | rand_file=$dst_file.random 48 | cat $dst_file | perl -MList::Util=shuffle -e 'print shuffle();' > $rand_file 49 | mv $rand_file $dst_file 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /experiment/kitti/draw_net.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=../../../caffe_densebox/python:$PYTHONPATH 2 | ../../caffe_densebox/python/draw_net.py multiscale-sgnet-03.prototxt multiscale-sgnet-03.png 3 | -------------------------------------------------------------------------------- /experiment/kitti/multiscale-sgnet-03-solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "./multiscale-sgnet-03.prototxt" 2 | test_compute_loss: true 3 | test_iter: 200 4 | test_interval: 1000 5 | clip_gradients: 5 6 | base_lr: 0.01 7 | momentum: 0.9 8 | weight_decay: 0.0005 9 | lr_policy: "inv" 10 | gamma: 0.0001 # 0.0001 11 | 12 | power: 0.75 13 | display: 1 14 | max_iter: 450000 # 450000 15 | snapshot: 1000 16 | snapshot_prefix: "snapshot/sgnet03" 17 | solver_mode: GPU 18 | device_id: 1 19 | 20 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/README.md: -------------------------------------------------------------------------------- 1 | # script for training and testing densebox for kitti car detection # 2 | 3 | ### Step ### 4 | 5 | * Prepare training and testing data: sh prepare_data.sh 6 | * Start training: sh train.sh 7 | * Show intermediate detection results given a trained model: sh show_result.sh 8 | * Show detection results and compute average precision given a trained model: sh test_model.sh 9 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/multiscale-sgnet-03-solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "./multiscale-sgnet-03.prototxt" 2 | test_compute_loss: true 3 | test_iter: 36 4 | test_interval: 1000 5 | clip_gradients: 5 6 | base_lr: 0.01 7 | momentum: 0.9 8 | weight_decay: 0.0005 9 | lr_policy: "inv" 10 | gamma: 0.0001 # 0.0001 11 | 12 | power: 0.75 13 | display: 1 14 | max_iter: 650000 # 450000 15 | snapshot: 1000 16 | snapshot_prefix: "snapshot/sgnet03" 17 | solver_mode: GPU 18 | device_id: 1 19 | 20 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data.sh: -------------------------------------------------------------------------------- 1 | # kitti_dir is the kitti dataset root path 2 | # output_dir specifies the intermediate preprocessed data 3 | python prepare_data/python/prepare.py --kitti_dir /home/vis/huyang/data/data-orig/kitti --output_dir cache 4 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/README.md: -------------------------------------------------------------------------------- 1 | # Preparing training and testing data for kitti car detection # 2 | 3 | ### Note ### 4 | 5 | * Both matlab script and python script work for the same data preprocessing. 6 | * Note that currently, the python script for preprocessing kitti car dataset is the latest. 7 | * There are slightly differences between the python script and matlab script. 8 | 9 | ### Train / Validation Split ### 10 | * Since there are no kitti validation set, we use 95% training images for training and 5% training images for validation. 11 | * The training and validation set are randomly selected, so there may be small difference in terms of performance evaluation. 12 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/matlab/.gitignore: -------------------------------------------------------------------------------- 1 | mat 2 | annotations 3 | train_jpg 4 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/matlab/objectKITTITrain.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/old_densebox/prepare_data/matlab/objectKITTITrain.mat -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/matlab/plotbox.m: -------------------------------------------------------------------------------- 1 | function varargout = plotbox(b,varargin) 2 | % Plot bounding box. 3 | 4 | nbox = size(b,1); 5 | 6 | hs = []; 7 | hold on 8 | for i=1:nbox 9 | h = plot([b(i,1) b(i,3) b(i,3) b(i,1) b(i,1)], ... 10 | [b(i,2) b(i,2) b(i,4) b(i,4) b(i,2)], ... 11 | varargin{:}); 12 | hs = [hs; h]; 13 | end 14 | 15 | if nargout == 1 16 | varargout{:} = hs; 17 | end 18 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/matlab/val_idx.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/old_densebox/prepare_data/matlab/val_idx.mat -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/prepare_data/python/.gitignore: -------------------------------------------------------------------------------- 1 | pkl 2 | train_jpg 3 | annotations 4 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/show_result.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="sgnet03" 4 | gpu_id=1 5 | cpu_or_gpu="GPU" 6 | net_name="sgnet03_iter_597000.caffemodel" 7 | output_dir="cache" 8 | threshold="0.48" 9 | 10 | export PIC_PRINT=0 11 | 12 | cmd="../../caffe_densebox/build/tools/show_output.bin ./show_multiscale-sgnet-03.prototxt ./snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${threshold}" 13 | echo $cmd 14 | 15 | ${cmd} 16 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/test_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="sgnet03" 4 | gpu_id=1 5 | cpu_or_gpu="GPU" 6 | net_name="sgnet03" 7 | output_dir="cache" 8 | start_iter=500000 9 | end_iter=510000 10 | step_iter=100000 11 | 12 | gt_file=./val_gt_file_list.txt 13 | show_result=1 14 | show_time=0 15 | 16 | # export PIC_PRINT=1 17 | # export SHOW_TIME=1 18 | 19 | cmd="../../../caffe_densebox/build/tools/select_model_pyramid_test.bin ./test_multiscale-sgnet-03.prototxt snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${start_iter} ${end_iter} ${step_iter} ${gt_file} ${show_result} ${show_time}" 20 | echo $cmd 21 | 22 | ${cmd} $1 2>&1 | tee ${tee_arg} log/${model}_select_model.log 23 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | snapshot="--snapshot=snapshot/sgnet03/sgnet03_iter_515000.solverstate" 4 | 5 | model="sgnet03" 6 | gpu_id=0,1,2,3 7 | 8 | if [ ! -d "log" ]; then 9 | mkdir "log" 10 | fi 11 | if [ ! -d "snapshot" ]; then 12 | mkdir "snapshot" 13 | fi 14 | if [ ! -d "snapshot/${model}" ]; then 15 | mkdir "snapshot/${model}" 16 | fi 17 | 18 | tee_arg='' 19 | if [ $# == 1 ]; then 20 | tee_arg='-a' 21 | echo 'Restore from '$1 22 | fi 23 | 24 | ############################################# 25 | # the following variables are used for debug 26 | # 1 for activation 27 | 28 | # print the input label in ImageMultiScaleDataLayer/ImageDataLayer 29 | export LABEL_PRINT=0 30 | 31 | # print the input image 32 | export PIC_PRINT=0 33 | 34 | # print the mask in label_related_dropout layer 35 | export LABEL_RELATED_DROPOUT_PRINT=0 36 | 37 | # print the hard samples which are selected during bootstrap step 38 | export BOOTSTRAP_PRINT=0 39 | 40 | # redirect info to error(glog) 41 | export GLOG_logtostderr=1 42 | 43 | ############################################# 44 | 45 | if [ -z ${snapshot} ]; then 46 | cmd="caffe train --solver=./multiscale-sgnet-03-solver.prototxt --gpu=${gpu_id} " 47 | else 48 | cmd="caffe train --solver=./multiscale-sgnet-03-solver.prototxt ${snapshot} --gpu=${gpu_id} " 49 | fi 50 | echo $cmd 51 | 52 | #${cmd} $1 2>&1 | tee ${tee_arg} log/${model}.log 53 | nohup ${cmd} 2>&1 1>log/${model}.log & 54 | #nohup ${cmd} 2>&1 1>temp.log & 55 | 56 | -------------------------------------------------------------------------------- /experiment/kitti/old_densebox/val_gt_file_list.txt: -------------------------------------------------------------------------------- 1 | ./cache/annotations/new_original_val_gt.txt 2 | -------------------------------------------------------------------------------- /experiment/kitti/prepare_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Generate the list and image for densebox. 3 | # The intermediate preprocessed data would be put under ./cache folder. 4 | # 5 | # Usage 6 | # ./prepare_data.sh kitti_dir 7 | # where kitti_dir is the kitti dataset root path 8 | # 9 | # Example 10 | # ./prepare_data.sh /media/yi/DATA/data-orig/kitti 11 | python prepare_data/python/prepare.py --kitti_dir $1 --output_dir cache 12 | -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/README.md: -------------------------------------------------------------------------------- 1 | # Preparing training and testing data for kitti car detection # 2 | 3 | ### Note ### 4 | 5 | * Both matlab script and python script work for the same data preprocessing. 6 | * Note that currently, the python script for preprocessing kitti car dataset is the latest. 7 | * There are slightly differences between the python script and matlab script. 8 | 9 | ### Train / Validation Split ### 10 | * Since there are no kitti validation set, we use 95% training images for training and 5% training images for validation. 11 | * The training and validation set are randomly selected, so there may be small difference in terms of performance evaluation. 12 | -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/matlab/.gitignore: -------------------------------------------------------------------------------- 1 | mat 2 | annotations 3 | train_jpg 4 | -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/matlab/objectKITTITrain.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/prepare_data/matlab/objectKITTITrain.mat -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/matlab/plotbox.m: -------------------------------------------------------------------------------- 1 | function varargout = plotbox(b,varargin) 2 | % Plot bounding box. 3 | 4 | nbox = size(b,1); 5 | 6 | hs = []; 7 | hold on 8 | for i=1:nbox 9 | h = plot([b(i,1) b(i,3) b(i,3) b(i,1) b(i,1)], ... 10 | [b(i,2) b(i,2) b(i,4) b(i,4) b(i,2)], ... 11 | varargin{:}); 12 | hs = [hs; h]; 13 | end 14 | 15 | if nargout == 1 16 | varargout{:} = hs; 17 | end 18 | -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/matlab/val_idx.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/prepare_data/matlab/val_idx.mat -------------------------------------------------------------------------------- /experiment/kitti/prepare_data/python/.gitignore: -------------------------------------------------------------------------------- 1 | pkl 2 | train_jpg 3 | annotations 4 | -------------------------------------------------------------------------------- /experiment/kitti/show_result.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="sgnet03" 4 | gpu_id=0,1 5 | cpu_or_gpu="GPU" 6 | net_name="sgnet03_iter_597000.caffemodel" 7 | output_dir="cache" 8 | threshold="0.48" 9 | 10 | export PIC_PRINT=0 11 | 12 | cmd="../../caffe_densebox/build/tools/show_output.bin ./show_multiscale-sgnet-03.prototxt ./snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${threshold}" 13 | echo $cmd 14 | 15 | ${cmd} 16 | -------------------------------------------------------------------------------- /experiment/kitti/snapshot/sgnet03/sgnet03_iter_597000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/snapshot/sgnet03/sgnet03_iter_597000.caffemodel -------------------------------------------------------------------------------- /experiment/kitti/snapshot/sgnet03/sgnet03_iter_597000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/experiment/kitti/snapshot/sgnet03/sgnet03_iter_597000.solverstate -------------------------------------------------------------------------------- /experiment/kitti/ssd/.gitignore: -------------------------------------------------------------------------------- 1 | data/ 2 | examples/ 3 | jobs/ 4 | models/ 5 | -------------------------------------------------------------------------------- /experiment/kitti/ssd/eval_ssd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dataset="KITTI" 4 | iterations="10000" 5 | input_size="552x552" 6 | GPU_ID="2" 7 | base_model="Inception" 8 | 9 | echo ${base_model} 10 | caffe train \ 11 | --solver=models/${base_model}Net/${dataset}/SSD_${input_size}/eval_solver.prototxt \ 12 | --weights=models/${base_model}Net/${dataset}/SSD_${input_size}/${base_model}_${dataset}_SSD_${input_size}_iter_${iterations}.caffemodel \ 13 | --gpu=${GPU_ID} 14 | 15 | -------------------------------------------------------------------------------- /experiment/kitti/ssd/inception/552x552/eval_solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/InceptionNet/KITTI/SSD_552x552/train.prototxt" 2 | test_net: "models/InceptionNet/KITTI/SSD_552x552/test.prototxt" 3 | test_iter: 331 4 | test_interval: 10000 5 | base_lr: 0.001 6 | display: 10 7 | max_iter: 0 #120000 8 | lr_policy: "step" 9 | gamma: 0.1 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | stepsize: 0 #80000 13 | snapshot: 10000 14 | snapshot_prefix: "models/InceptionNet/KITTI/SSD_552x552/Inception_KITTI_SSD_552x552" 15 | solver_mode: GPU 16 | device_id: 0 17 | debug_info: false 18 | snapshot_after_train: true 19 | test_initialization: false 20 | average_loss: 10 21 | iter_size: 1 22 | type: "SGD" 23 | eval_type: "detection" 24 | ap_version: "11point" 25 | 26 | -------------------------------------------------------------------------------- /experiment/kitti/ssd/inception/552x552/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/InceptionNet/KITTI/SSD_552x552/train.prototxt" 2 | #test_net: "models/InceptionNet/KITTI/SSD_552x552/test.prototxt" 3 | #test_iter: 331 4 | #test_interval: 10000 5 | base_lr: 0.001 6 | display: 10 7 | max_iter: 120000 8 | lr_policy: "step" 9 | gamma: 0.1 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | stepsize: 80000 13 | snapshot: 10000 14 | snapshot_prefix: "models/InceptionNet/KITTI/SSD_552x552/Inception_KITTI_SSD_552x552" 15 | solver_mode: GPU 16 | device_id: 0 17 | debug_info: false 18 | snapshot_after_train: true 19 | test_initialization: false 20 | average_loss: 10 21 | iter_size: 1 22 | type: "SGD" 23 | eval_type: "detection" 24 | ap_version: "11point" 25 | 26 | -------------------------------------------------------------------------------- /experiment/kitti/ssd/labelmap.prototxt: -------------------------------------------------------------------------------- 1 | item { 2 | name: "none_of_the_above" 3 | label: 0 4 | display_name: "background" 5 | } 6 | item { 7 | name: "Car" 8 | label: 1 9 | display_name: "Car" 10 | } 11 | item { 12 | name: "Pedestrian" 13 | label: 2 14 | display_name: "Pedestrian" 15 | } 16 | item { 17 | name: "Cyclist" 18 | label: 3 19 | display_name: "Cyclist" 20 | } 21 | 22 | -------------------------------------------------------------------------------- /experiment/kitti/test_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="sgnet03" 4 | gpu_id=0,1 5 | cpu_or_gpu="GPU" 6 | net_name="sgnet03" 7 | output_dir="cache" 8 | start_iter=597000 9 | end_iter=598000 10 | step_iter=1000 11 | 12 | gt_file=./val_gt_file_list.txt 13 | show_result=1 14 | show_time=0 15 | 16 | # export PIC_PRINT=1 17 | # export SHOW_TIME=1 18 | 19 | cmd="../../caffe_densebox/build/tools/select_model_pyramid_test.bin ./test_multiscale-sgnet-03.prototxt snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${start_iter} ${end_iter} ${step_iter} ${gt_file} ${show_result} ${show_time}" 20 | echo $cmd 21 | 22 | ${cmd} $1 2>&1 | tee ${tee_arg} log/${model}_select_model.log 23 | -------------------------------------------------------------------------------- /experiment/kitti/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # snapshot="--snapshot=snapshot/sgnet03/sgnet03_iter_597000.solverstate" 4 | 5 | model="sgnet03" 6 | gpu_id=0,1 7 | 8 | if [ ! -d "log" ]; then 9 | mkdir "log" 10 | fi 11 | if [ ! -d "snapshot" ]; then 12 | mkdir "snapshot" 13 | fi 14 | if [ ! -d "snapshot/${model}" ]; then 15 | mkdir "snapshot/${model}" 16 | fi 17 | 18 | tee_arg='' 19 | if [ $# == 1 ]; then 20 | tee_arg='-a' 21 | echo 'Restore from '$1 22 | fi 23 | 24 | ############################################# 25 | # the following variables are used for debug 26 | # 1 for activation 27 | 28 | # print the input label in ImageMultiScaleDataLayer/ImageDataLayer 29 | export LABEL_PRINT=0 30 | 31 | # print the input image 32 | export PIC_PRINT=0 33 | 34 | # print the mask in label_related_dropout layer 35 | export LABEL_RELATED_DROPOUT_PRINT=0 36 | 37 | # print the hard samples which are selected during bootstrap step 38 | export BOOTSTRAP_PRINT=0 39 | 40 | # redirect info to error(glog) 41 | export GLOG_logtostderr=1 42 | 43 | ############################################# 44 | 45 | if [ -z ${snapshot} ]; then 46 | cmd="../../caffe_densebox/build/tools/caffe train --solver=./multiscale-sgnet-03-solver.prototxt --gpu=${gpu_id} " 47 | else 48 | cmd="../../caffe_densebox/build/tools/caffe train --solver=./multiscale-sgnet-03-solver.prototxt ${snapshot} --gpu=${gpu_id} " 49 | fi 50 | echo $cmd 51 | 52 | ${cmd} $1 2>&1 | tee ${tee_arg} log/${model}.log 53 | 54 | -------------------------------------------------------------------------------- /experiment/kitti/val_gt_file_list.txt: -------------------------------------------------------------------------------- 1 | ./cache/annotations/new_original_val_gt.txt 2 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/dense_v2_inception/dense_v2_inception-solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "dense_v2_inception.prototxt" 2 | test_compute_loss: true 3 | test_iter: 200 4 | test_interval: 1000 5 | base_lr: 0.01 6 | clip_gradients: 5 7 | momentum: 0.9 8 | weight_decay: 0.0005 9 | lr_policy: "inv" 10 | gamma: 0.0001 11 | power: 0.75 12 | display: 1 13 | max_iter: 450000 14 | snapshot: 1000 15 | snapshot_prefix: "snapshot/VOC/dense_v2_inception" 16 | solver_mode: GPU 17 | device_id: 2 18 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/dense_v2_inception/select.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="VOC" 4 | gpu_id=2 5 | cpu_or_gpu="GPU" 6 | net_name="dense_v2_inception" 7 | output_dir="cache" 8 | start_iter=40000 9 | end_iter=45000 10 | step_iter=5000 11 | 12 | gt_file=./val_gt_file_list.txt 13 | show_result=1 14 | show_time=0 15 | 16 | # export PIC_PRINT=1 17 | # export SHOW_TIME=1 18 | 19 | cmd="select_model_pyramid_test ./test_dense_v2_inception.prototxt snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${start_iter} ${end_iter} ${step_iter} ${gt_file} ${show_result} ${show_time}" 20 | echo $cmd 21 | 22 | ${cmd} $1 2>&1 | tee ${tee_arg} log/${model}_select_model.log 23 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/dense_v2_inception/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | class="VOC" 4 | GPU_id="3" 5 | CPUorGPU="GPU" 6 | NetName="VOC/multiscale-vgg-16_iter_20000.caffemodel" 7 | outputFld="TestResultVOC2007" 8 | show_result="1" 9 | out_name="1000_voc_vgg_16.txt" 10 | show_forward_time="1" 11 | 12 | # export PIC_PRINT=1 13 | #export SHOW_TIME=1 14 | 15 | 16 | mode_str="${class}/multiscale-vgg-16" 17 | echo $mode_str 18 | 19 | cmd="pyramid_test test_multi-scale-vgg-16.prototxt snapshot/${NetName} cache/${outputFld} ${CPUorGPU} ${GPU_id} ${show_result} ${out_name} ${show_forward_time}" 20 | echo $cmd 21 | 22 | ${cmd} 23 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/dense_v2_inception/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #snapshot="--snapshot=snapshot/VOC/multiscale-vgg-16_iter_24000.solverstate" 4 | #weight="--weights=./VGG_ILSVRC_16_layers.caffemodel" 5 | 6 | gpu_id=0,1,2,3 7 | class='VOC' 8 | mode_str="${class}/dense_v2_inception" 9 | echo $mode_str 10 | if [ ! -d "log" ]; then 11 | mkdir "log" 12 | fi 13 | 14 | if [ ! -d "log/${class}" ]; then 15 | mkdir "log/${class}" 16 | fi 17 | 18 | tee_arg='' 19 | if [ $# == 1 ]; then 20 | tee_arg='-a' 21 | echo 'Restore from '$1 22 | fi 23 | 24 | ############################################# 25 | # the following variables are used for debug 26 | # 1 for activation 27 | 28 | # print the input label in ImageMultiScaleDataLayer/ImageDataLayer 29 | export LABEL_PRINT=0 30 | 31 | # print the input image 32 | export PIC_PRINT=0 33 | 34 | # print the mask in label_related_dropout layer 35 | export LABEL_RELATED_DROPOUT_PRINT=0 36 | 37 | # print the hard samples which are selected during bootstrap step 38 | export BOOTSTRAP_PRINT=0 39 | 40 | # redirect info to error(glog) 41 | export GLOG_logtostderr=1 42 | 43 | ############################################# 44 | 45 | cmd="caffe train --solver=dense_v2_inception-solver.prototxt ${snapshot} --gpu=${gpu_id}" 46 | echo $cmd 47 | 48 | #${cmd} $1 2>&1 | tee ${tee_arg} log/${mode_str}.log 49 | #nohup ${cmd} 2>&1 1>log/${model_str}.log & 50 | nohup ${cmd} 2>&1 1>dense_v2_inception.log & 51 | 52 | 53 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/dense_v2_inception/val_gt_file_list.txt: -------------------------------------------------------------------------------- 1 | ./gt/chair_test_gt.txt 2 | ./gt/car_test_gt.txt 3 | ./gt/horse_test_gt.txt 4 | ./gt/person_test_gt.txt 5 | ./gt/bicycle_test_gt.txt 6 | ./gt/cat_test_gt.txt 7 | ./gt/dog_test_gt.txt 8 | ./gt/train_test_gt.txt 9 | ./gt/aeroplane_test_gt.txt 10 | ./gt/diningtable_test_gt.txt 11 | ./gt/tvmonitor_test_gt.txt 12 | ./gt/bird_test_gt.txt 13 | ./gt/bottle_test_gt.txt 14 | ./gt/motorbike_test_gt.txt 15 | ./gt/pottedplant_test_gt.txt 16 | ./gt/boat_test_gt.txt 17 | ./gt/sofa_test_gt.txt 18 | ./gt/sheep_test_gt.txt 19 | ./gt/cow_test_gt.txt 20 | ./gt/bus_test_gt.txt 21 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/multi-scale-vgg-16-solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "multi-scale-vgg-16.prototxt" 2 | test_compute_loss: true 3 | test_iter: 200 4 | test_interval: 1000 5 | base_lr: 0.01 6 | clip_gradients: 5 7 | momentum: 0.9 8 | weight_decay: 0.0005 9 | lr_policy: "inv" 10 | gamma: 0.0001 11 | power: 0.75 12 | display: 1 13 | max_iter: 450000 14 | snapshot: 1000 15 | snapshot_prefix: "snapshot/VOC/multiscale-vgg-16" 16 | solver_mode: GPU 17 | device_id: 2 18 | 19 | #previous_stage_param{ 20 | # net_prototxt: "VGG_ILSVRC_16_layers_deploy.prototxt" 21 | # net_param_file:"./VGG_ILSVRC_16_layers.caffemodel" 22 | #} 23 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/select.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | model="sgnet03" 4 | gpu_id=3 5 | cpu_or_gpu="GPU" 6 | net_name="sgnet03" 7 | output_dir="cache" 8 | start_iter=40000 9 | end_iter=42000 10 | step_iter=5000 11 | 12 | gt_file=./val_gt_file_list.txt 13 | show_result=1 14 | show_time=0 15 | 16 | # export PIC_PRINT=1 17 | # export SHOW_TIME=1 18 | 19 | cmd="select_model_pyramid_test ./test_multiscale-sgnet-03.prototxt snapshot/${model}/${net_name} ${output_dir} ${cpu_or_gpu} ${gpu_id} ${start_iter} ${end_iter} ${step_iter} ${gt_file} ${show_result} ${show_time}" 20 | echo $cmd 21 | 22 | ${cmd} $1 2>&1 | tee ${tee_arg} log/${model}_select_model.log 23 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/show_map.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | class="VOC" 4 | GPU_id="0" 5 | CPUorGPU="GPU" 6 | NetName="VOC/multiscale-vgg-16-2_iter_66000.caffemodel" 7 | outputFld="output" 8 | threshold="0.1" 9 | 10 | export PIC_PRINT=0 11 | 12 | mode_str="${class}/multiscale-vgg-16-2" 13 | echo $mode_str 14 | 15 | cmd="./build/tools/show_output.bin examples/${mode_str}/show_multi-scale-vgg-16.prototxt snapshot/${NetName} cache/${outputFld} ${CPUorGPU} ${GPU_id} ${threshold}" 16 | echo $cmd 17 | 18 | make -j 8 && ${cmd} 19 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | class="VOC" 4 | GPU_id="3" 5 | CPUorGPU="GPU" 6 | NetName="VOC/multiscale-vgg-16_iter_20000.caffemodel" 7 | outputFld="TestResultVOC2007" 8 | show_result="1" 9 | out_name="1000_voc_vgg_16.txt" 10 | show_forward_time="1" 11 | 12 | # export PIC_PRINT=1 13 | #export SHOW_TIME=1 14 | 15 | 16 | mode_str="${class}/multiscale-vgg-16" 17 | echo $mode_str 18 | 19 | cmd="pyramid_test test_multi-scale-vgg-16.prototxt snapshot/${NetName} cache/${outputFld} ${CPUorGPU} ${GPU_id} ${show_result} ${out_name} ${show_forward_time}" 20 | echo $cmd 21 | 22 | ${cmd} 23 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #snapshot="--snapshot=snapshot/VOC/multiscale-vgg-16_iter_24000.solverstate" 4 | weight="--weights=./VGG_ILSVRC_16_layers.caffemodel" 5 | 6 | gpu_id=0,1,2,3 7 | class='VOC' 8 | mode_str="${class}/multiscale-vgg-16" 9 | echo $mode_str 10 | if [ ! -d "log" ]; then 11 | mkdir "log" 12 | fi 13 | 14 | if [ ! -d "log/${class}" ]; then 15 | mkdir "log/${class}" 16 | fi 17 | 18 | tee_arg='' 19 | if [ $# == 1 ]; then 20 | tee_arg='-a' 21 | echo 'Restore from '$1 22 | fi 23 | 24 | ############################################# 25 | # the following variables are used for debug 26 | # 1 for activation 27 | 28 | # print the input label in ImageMultiScaleDataLayer/ImageDataLayer 29 | export LABEL_PRINT=0 30 | 31 | # print the input image 32 | export PIC_PRINT=0 33 | 34 | # print the mask in label_related_dropout layer 35 | export LABEL_RELATED_DROPOUT_PRINT=0 36 | 37 | # print the hard samples which are selected during bootstrap step 38 | export BOOTSTRAP_PRINT=0 39 | 40 | # redirect info to error(glog) 41 | export GLOG_logtostderr=1 42 | 43 | ############################################# 44 | 45 | cmd="caffe train --solver=multi-scale-vgg-16-solver.prototxt ${snapshot} ${weight} --gpu=${gpu_id}" 46 | echo $cmd 47 | 48 | #${cmd} $1 2>&1 | tee ${tee_arg} log/${mode_str}.log 49 | #nohup ${cmd} 2>&1 1>log/${model_str}.log & 50 | nohup ${cmd} 2>&1 1>temp.log & 51 | 52 | 53 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/multiscale-vgg-16/val_gt_file_list.txt: -------------------------------------------------------------------------------- 1 | ./gt/chair_test_gt.txt 2 | ./gt/car_test_gt.txt 3 | ./gt/horse_test_gt.txt 4 | ./gt/person_test_gt.txt 5 | ./gt/bicycle_test_gt.txt 6 | ./gt/cat_test_gt.txt 7 | ./gt/dog_test_gt.txt 8 | ./gt/train_test_gt.txt 9 | ./gt/aeroplane_test_gt.txt 10 | ./gt/diningtable_test_gt.txt 11 | ./gt/tvmonitor_test_gt.txt 12 | ./gt/bird_test_gt.txt 13 | ./gt/bottle_test_gt.txt 14 | ./gt/motorbike_test_gt.txt 15 | ./gt/pottedplant_test_gt.txt 16 | ./gt/boat_test_gt.txt 17 | ./gt/sofa_test_gt.txt 18 | ./gt/sheep_test_gt.txt 19 | ./gt/cow_test_gt.txt 20 | ./gt/bus_test_gt.txt 21 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/VOC2007/Annotations/000001.xml: -------------------------------------------------------------------------------- 1 | 2 | VOC2007 3 | 000001.jpg 4 | 5 | The VOC2007 Database 6 | PASCAL VOC2007 7 | flickr 8 | 341012865 9 | 10 | 11 | Fried Camels 12 | Jinky the Fruit Bat 13 | 14 | 15 | 353 16 | 500 17 | 3 18 | 19 | 0 20 | 21 | dog 22 | Left 23 | 1 24 | 0 25 | 26 | 48 27 | 240 28 | 195 29 | 371 30 | 31 | 32 | 33 | person 34 | Left 35 | 1 36 | 0 37 | 38 | 8 39 | 12 40 | 352 41 | 498 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/VOC2007/Annotations/000002.xml: -------------------------------------------------------------------------------- 1 | 2 | VOC2007 3 | 000002.jpg 4 | 5 | The VOC2007 Database 6 | PASCAL VOC2007 7 | flickr 8 | 329145082 9 | 10 | 11 | hiromori2 12 | Hiroyuki Mori 13 | 14 | 15 | 335 16 | 500 17 | 3 18 | 19 | 0 20 | 21 | train 22 | Unspecified 23 | 0 24 | 0 25 | 26 | 139 27 | 200 28 | 207 29 | 301 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/VOC2007/Annotations/000003.xml: -------------------------------------------------------------------------------- 1 | 2 | VOC2007 3 | 000003.jpg 4 | 5 | The VOC2007 Database 6 | PASCAL VOC2007 7 | flickr 8 | 138563409 9 | 10 | 11 | RandomEvent101 12 | ? 13 | 14 | 15 | 500 16 | 375 17 | 3 18 | 19 | 0 20 | 21 | sofa 22 | Unspecified 23 | 0 24 | 0 25 | 26 | 123 27 | 155 28 | 215 29 | 195 30 | 31 | 32 | 33 | chair 34 | Left 35 | 0 36 | 0 37 | 38 | 239 39 | 156 40 | 307 41 | 205 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/VOC2007/Annotations/000007.xml: -------------------------------------------------------------------------------- 1 | 2 | VOC2007 3 | 000007.jpg 4 | 5 | The VOC2007 Database 6 | PASCAL VOC2007 7 | flickr 8 | 194179466 9 | 10 | 11 | monsieurrompu 12 | Thom Zemanek 13 | 14 | 15 | 500 16 | 333 17 | 3 18 | 19 | 0 20 | 21 | car 22 | Unspecified 23 | 1 24 | 0 25 | 26 | 141 27 | 50 28 | 500 29 | 330 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/class_list.txt: -------------------------------------------------------------------------------- 1 | chair 2 | car 3 | horse 4 | person 5 | bicycle 6 | cat 7 | dog 8 | train 9 | aeroplane 10 | diningtable 11 | tvmonitor 12 | bird 13 | bottle 14 | motorbike 15 | pottedplant 16 | boat 17 | sofa 18 | sheep 19 | cow 20 | bus -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gen_test_gt_files_list.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def main(): 4 | class_names_fid = open('class_list.txt') 5 | for class_name in class_names_fid.readlines(): 6 | print './gt/' + class_name.split('\n')[0] + '_test_gt.txt' 7 | 8 | if __name__ == '__main__': 9 | main() 10 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/aeroplane_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/bicycle_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/bird_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/boat_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/bottle_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/bus_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/car_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 7 9 | 13.00 311.00 71.00 51.00 0 10 | 362.00 330.00 138.00 59.00 0 11 | 235.00 328.00 99.00 47.00 0 12 | 175.00 327.00 77.00 37.00 0 13 | 139.00 320.00 50.00 39.00 0 14 | 108.00 325.00 42.00 28.00 0 15 | 84.00 323.00 37.00 27.00 0 16 | 000006 17 | 0 18 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/cat_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/chair_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 1 7 | 239.00 156.00 68.00 49.00 0 8 | 000004 9 | 0 10 | 000006 11 | 6 12 | 255.00 207.00 111.00 168.00 0 13 | 298.00 195.00 34.00 52.00 0 14 | 279.00 190.00 29.00 41.00 0 15 | 137.00 192.00 14.00 7.00 0 16 | 137.00 198.00 19.00 14.00 0 17 | 138.00 211.00 111.00 164.00 0 18 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/cow_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/diningtable_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 1 11 | 154.00 209.00 215.00 166.00 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/dog_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 1 3 | 48.00 240.00 147.00 131.00 0 4 | 000002 5 | 0 6 | 000003 7 | 0 8 | 000004 9 | 0 10 | 000006 11 | 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/horse_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/motorbike_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/person_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 1 3 | 8.00 12.00 344.00 486.00 0 4 | 000002 5 | 0 6 | 000003 7 | 0 8 | 000004 9 | 0 10 | 000006 11 | 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/pottedplant_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 1 11 | 187.00 135.00 95.00 107.00 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/sheep_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/sofa_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 1 7 | 123.00 155.00 92.00 40.00 0 8 | 000004 9 | 0 10 | 000006 11 | 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/train_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 1 5 | 139.00 200.00 68.00 101.00 0 6 | 000003 7 | 0 8 | 000004 9 | 0 10 | 000006 11 | 0 12 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt/tvmonitor_test_gt.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 0 3 | 000002 4 | 0 5 | 000003 6 | 0 7 | 000004 8 | 0 9 | 000006 10 | 0 11 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/gt_val.txt: -------------------------------------------------------------------------------- 1 | 000001 2 | 2 3 | 48.00 240.00 147.00 131.00 0 4 | 8.00 12.00 344.00 486.00 0 5 | 000002 6 | 1 7 | 139.00 200.00 68.00 101.00 0 8 | 000003 9 | 2 10 | 123.00 155.00 92.00 40.00 0 11 | 239.00 156.00 68.00 49.00 0 12 | 000004 13 | 7 14 | 13.00 311.00 71.00 51.00 0 15 | 362.00 330.00 138.00 59.00 0 16 | 235.00 328.00 99.00 47.00 0 17 | 175.00 327.00 77.00 37.00 0 18 | 139.00 320.00 50.00 39.00 0 19 | 108.00 325.00 42.00 28.00 0 20 | 84.00 323.00 37.00 27.00 0 21 | 000006 22 | 8 23 | 187.00 135.00 95.00 107.00 0 24 | 154.00 209.00 215.00 166.00 0 25 | 255.00 207.00 111.00 168.00 0 26 | 298.00 195.00 34.00 52.00 0 27 | 279.00 190.00 29.00 41.00 0 28 | 137.00 192.00 14.00 7.00 0 29 | 137.00 198.00 19.00 14.00 0 30 | 138.00 211.00 111.00 164.00 0 31 | -------------------------------------------------------------------------------- /experiment/voc/old_densebox/prepare_data/test.txt: -------------------------------------------------------------------------------- 1 | VOC2007/JPEGImages/000001.jpg VOC2007/Annotations/000001.xml 2 | VOC2007/JPEGImages/000002.jpg VOC2007/Annotations/000002.xml 3 | VOC2007/JPEGImages/000003.jpg VOC2007/Annotations/000003.xml 4 | VOC2007/JPEGImages/000004.jpg VOC2007/Annotations/000004.xml 5 | VOC2007/JPEGImages/000006.jpg VOC2007/Annotations/000006.xml -------------------------------------------------------------------------------- /paper/DenseBoxPaper/DenseBoxV1_alan.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/DenseBoxV1_alan.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/Makefile: -------------------------------------------------------------------------------- 1 | TARGET=DenseBoxV1_alan 2 | 3 | all: 4 | pdflatex $(TARGET).tex 5 | #detex $(TARGET).tex | grep -v '^$$' > $(TARGET).txt 6 | bibtex $(TARGET) 7 | pdflatex $(TARGET).tex 8 | pdflatex $(TARGET).tex 9 | rm -f $(TARGET).synctex $(TARGET).aux $(TARGET).log $(TARGET).bbl $(TARGET).blg $(TARGET).brf $(TARGET).txt $(TARGET).out 10 | 11 | clean: 12 | rm -f $(TARGET).synctex $(TARGET).aux $(TARGET).pdf $(TARGET).log $(TARGET).bbl $(TARGET).blg $(TARGET).brf $(TARGET).txt $(TARGET).out 13 | rm -f *.log 14 | find . -name '*.eps' -exec rm -v {} \; 15 | 16 | show: all 17 | evince $(TARGET).pdf 18 | -------------------------------------------------------------------------------- /paper/DenseBoxPaper/README.md: -------------------------------------------------------------------------------- 1 | # DenseBoxPaper 2 | 3 | The paper for DenseBox 4 | -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/MALF_1-eps-converted-to.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/MALF_1-eps-converted-to.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/MALF_2-eps-converted-to.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/MALF_2-eps-converted-to.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure1-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure1-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure2-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure2-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure3-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure3-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure4-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure4-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure5-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure5-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/figures/figure_landmark-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yangyi02/densebox/c6efae717a829dc3915763607ed58b8737c6d375/paper/DenseBoxPaper/figures/figure_landmark-crop.pdf -------------------------------------------------------------------------------- /paper/DenseBoxPaper/section/my_abstract.tex: -------------------------------------------------------------------------------- 1 | 2 | \begin{abstract} 3 | In this paper we propose a unified end-to-end object detection pipeline, called DenseBox. Unlike previous work such as R-CNN, which requires region proposal generation, our network can be optimized end-to-end directly on images and predict bounding boxes and class confidence score directly.The carefully designed network works very well on some tasks such as face datecection and car detection where R-CNN and YOLO could be inferior due to small-size object and occlusion. Since DenseBox is a single fully convolutional neural network, it is inherently easy to incorporate multi-task such as landmark localization and semantic segmentation into one network. We also demonstrate that object detection performance could be significantly improved by incorporating landmark localization. Our method achieves the best performance on MALF(Multi-Attribute Labelled Faces) detection dataset, and competitive performance on KITTI car detection task compared to method using stereo information. 4 | \end{abstract} 5 | 6 | 7 | --------------------------------------------------------------------------------