├── .gitignore ├── Model ├── run.py ├── solver.prototxt ├── test.prototxt ├── test.py └── train.prototxt ├── README.md ├── caffe ├── CMakeLists.txt ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config ├── Makefile.config.example ├── README.md ├── caffe.cloc ├── cmake │ ├── ConfigGen.cmake │ ├── Cuda.cmake │ ├── Cuda.cmake~ │ ├── Dependencies.cmake │ ├── External │ │ ├── gflags.cmake │ │ └── glog.cmake │ ├── Misc.cmake │ ├── Modules │ │ ├── FindAtlas.cmake │ │ ├── FindGFlags.cmake │ │ ├── FindGlog.cmake │ │ ├── FindLAPACK.cmake │ │ ├── FindLMDB.cmake │ │ ├── FindLevelDB.cmake │ │ ├── FindMKL.cmake │ │ ├── FindMatlabMex.cmake │ │ ├── FindMatlabMex.cmake~ │ │ ├── FindNumPy.cmake │ │ ├── FindOpenBLAS.cmake │ │ ├── FindSnappy.cmake │ │ └── FindvecLib.cmake │ ├── ProtoBuf.cmake │ ├── Summary.cmake │ ├── Targets.cmake │ ├── Templates │ │ ├── CaffeConfig.cmake.in │ │ ├── CaffeConfigVersion.cmake.in │ │ └── caffe_config.h.in │ ├── Utils.cmake │ └── lint.cmake ├── docker │ ├── Makefile │ ├── README.md │ ├── standalone │ │ ├── cpu │ │ │ └── Dockerfile │ │ └── gpu │ │ │ └── Dockerfile │ └── templates │ │ └── Dockerfile.template ├── docs │ ├── CMakeLists.txt │ ├── CNAME │ ├── README.md │ ├── _config.yml │ ├── _layouts │ │ └── default.html │ ├── development.md │ ├── images │ │ ├── GitHub-Mark-64px.png │ │ └── caffeine-icon.png │ ├── index.md │ ├── install_apt.md │ ├── install_osx.md │ ├── install_yum.md │ ├── installation.md │ ├── model_zoo.md │ ├── multigpu.md │ ├── performance_hardware.md │ ├── stylesheets │ │ ├── pygment_trac.css │ │ ├── reset.css │ │ └── styles.css │ └── tutorial │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig │ │ ├── .gitignore │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ ├── forward_backward.png │ │ ├── layer.jpg │ │ └── logreg.jpg │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md ├── examples │ ├── 00-classification.ipynb │ ├── 01-learning-lenet.ipynb │ ├── 02-fine-tuning.ipynb │ ├── CMakeLists.txt │ ├── brewing-logreg.ipynb │ ├── cifar10 │ │ ├── convert_cifar_data.cpp │ │ ├── create_cifar10.sh │ │ ├── gen_resnets.py │ │ ├── mean.binaryproto │ │ ├── readme.md │ │ ├── resnet_cifar10_20.prototxt │ │ ├── resnet_cifar10_20_deploy.prototxt │ │ ├── resnet_cifar10_56.prototxt │ │ ├── resnet_cifar10_56_deploy.prototxt │ │ ├── solver_resnet.prototxt │ │ └── train_full.sh │ ├── cpp_classification │ │ ├── classification.cpp │ │ └── readme.md │ ├── detection.ipynb │ ├── feature_extraction │ │ ├── imagenet_val.prototxt │ │ └── readme.md │ ├── finetune_flickr_style │ │ ├── assemble_data.py │ │ ├── flickr_style.csv.gz │ │ ├── readme.md │ │ └── style_names.txt │ ├── finetune_pascal_detection │ │ ├── pascal_finetune_solver.prototxt │ │ └── pascal_finetune_trainval_test.prototxt │ ├── hdf5_classification │ │ ├── nonlinear_auto_test.prototxt │ │ ├── nonlinear_auto_train.prototxt │ │ ├── nonlinear_train_val.prototxt │ │ └── train_val.prototxt │ ├── imagenet │ │ ├── create_imagenet.sh │ │ ├── make_imagenet_mean.sh │ │ ├── readme.md │ │ ├── resume_training.sh │ │ └── train_caffenet.sh │ ├── images │ │ ├── cat gray.jpg │ │ ├── cat.jpg │ │ ├── cat_gray.jpg │ │ └── fish-bike.jpg │ ├── mnist │ │ ├── convert_mnist_data.cpp │ │ ├── create_mnist.sh │ │ ├── lenet.prototxt │ │ ├── lenet_adadelta_solver.prototxt │ │ ├── lenet_auto_solver.prototxt │ │ ├── lenet_consolidated_solver.prototxt │ │ ├── lenet_multistep_solver.prototxt │ │ ├── lenet_solver.prototxt │ │ ├── lenet_solver_adam.prototxt │ │ ├── lenet_solver_rmsprop.prototxt │ │ ├── lenet_train_test.prototxt │ │ ├── mnist_autoencoder.prototxt │ │ ├── mnist_autoencoder_solver.prototxt │ │ ├── mnist_autoencoder_solver_adadelta.prototxt │ │ ├── mnist_autoencoder_solver_adagrad.prototxt │ │ ├── mnist_autoencoder_solver_nesterov.prototxt │ │ ├── readme.md │ │ ├── train_lenet.sh │ │ ├── train_lenet_adam.sh │ │ ├── train_lenet_consolidated.sh │ │ ├── train_lenet_docker.sh │ │ ├── train_lenet_rmsprop.sh │ │ ├── train_mnist_autoencoder.sh │ │ ├── train_mnist_autoencoder_adadelta.sh │ │ ├── train_mnist_autoencoder_adagrad.sh │ │ └── train_mnist_autoencoder_nesterov.sh │ ├── net_surgery.ipynb │ ├── net_surgery │ │ ├── bvlc_caffenet_full_conv.prototxt │ │ └── conv.prototxt │ ├── pascal-multilabel-with-datalayer.ipynb │ ├── pycaffe │ │ ├── caffenet.py │ │ ├── layers │ │ │ ├── pascal_multilabel_datalayers.py │ │ │ └── pyloss.py │ │ ├── linreg.prototxt │ │ └── tools.py │ ├── siamese │ │ ├── convert_mnist_siamese_data.cpp │ │ ├── create_mnist_siamese.sh │ │ ├── mnist_siamese.ipynb │ │ ├── mnist_siamese.prototxt │ │ ├── mnist_siamese_solver.prototxt │ │ ├── mnist_siamese_train_test.prototxt │ │ ├── readme.md │ │ └── train_mnist_siamese.sh │ └── web_demo │ │ ├── app.py │ │ ├── exifutil.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ └── templates │ │ └── index.html ├── include │ └── caffe │ │ ├── blob.hpp │ │ ├── caffe.hpp │ │ ├── common.cuh │ │ ├── common.hpp │ │ ├── data_layers.hpp │ │ ├── data_reader.hpp │ │ ├── data_transformer.hpp │ │ ├── fast_rcnn_layers.hpp │ │ ├── filler.hpp │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── layer_factory.hpp │ │ ├── layers │ │ ├── .softmax_loss_layer.hpp.swp │ │ ├── absval_layer.hpp │ │ ├── accuracy_layer.hpp │ │ ├── argmax_layer.hpp │ │ ├── axpy_layer.hpp │ │ ├── base_conv_layer.hpp │ │ ├── base_data_layer.hpp │ │ ├── batch_norm_layer.hpp │ │ ├── batch_reindex_layer.hpp │ │ ├── bias_layer.hpp │ │ ├── bnll_layer.hpp │ │ ├── concat_layer.hpp │ │ ├── contrastive_loss_layer.hpp │ │ ├── conv_layer.hpp │ │ ├── crop_layer.hpp │ │ ├── cross_entropy_loss_layer.hpp │ │ ├── cudnn_conv_layer.hpp │ │ ├── cudnn_lcn_layer.hpp │ │ ├── cudnn_lrn_layer.hpp │ │ ├── cudnn_pooling_layer.hpp │ │ ├── cudnn_relu_layer.hpp │ │ ├── cudnn_sigmoid_layer.hpp │ │ ├── cudnn_softmax_layer.hpp │ │ ├── cudnn_tanh_layer.hpp │ │ ├── data_layer.hpp │ │ ├── deconv_layer.hpp │ │ ├── dropout_layer.hpp │ │ ├── dummy_data_layer.hpp │ │ ├── eltwise_layer.hpp │ │ ├── elu_layer.hpp │ │ ├── embed_layer.hpp │ │ ├── euclidean_loss_layer.hpp │ │ ├── exp_layer.hpp │ │ ├── filter_layer.hpp │ │ ├── flatten_layer.hpp │ │ ├── hdf5_data_layer.hpp │ │ ├── hdf5_output_layer.hpp │ │ ├── hinge_loss_layer.hpp │ │ ├── im2col_layer.hpp │ │ ├── image_data_layer.hpp │ │ ├── image_labelmap_data_layer.hpp │ │ ├── image_seg_data_layer.hpp │ │ ├── image_superpixelmap_data_layer.hpp │ │ ├── infogain_loss_layer.hpp │ │ ├── inner_product_layer.hpp │ │ ├── input_layer.hpp │ │ ├── interp_layer.hpp │ │ ├── iou_loss_layer.hpp │ │ ├── l1_loss_layer.hpp │ │ ├── log_layer.hpp │ │ ├── loss_layer.hpp │ │ ├── lrn_layer.hpp │ │ ├── lstm_layer.hpp │ │ ├── memory_data_layer.hpp │ │ ├── multinomial_logistic_loss_layer.hpp │ │ ├── mvn_layer.hpp │ │ ├── neuron_layer.hpp │ │ ├── normalize_layer.hpp │ │ ├── parameter_layer.hpp │ │ ├── pooling_layer.hpp │ │ ├── power_layer.hpp │ │ ├── prelu_layer.hpp │ │ ├── python_layer.hpp │ │ ├── recurrent_layer.hpp │ │ ├── reduction_layer.hpp │ │ ├── relu_layer.hpp │ │ ├── reshape_layer.hpp │ │ ├── rnn_layer.hpp │ │ ├── s_pooling_layer.hpp │ │ ├── scale_layer.hpp │ │ ├── sigmoid_cross_entropy_loss_layer.hpp │ │ ├── sigmoid_layer.hpp │ │ ├── silence_layer.hpp │ │ ├── similarity_loss_layer.hpp │ │ ├── similarity_loss_layer.hpp~ │ │ ├── slice_layer.hpp │ │ ├── softmax_layer.hpp │ │ ├── softmax_loss_layer.hpp │ │ ├── softmax_loss_layer_weighted.hpp │ │ ├── sp_loss_layer.hpp │ │ ├── split_layer.hpp │ │ ├── spp_layer.hpp │ │ ├── tanh_layer.hpp │ │ ├── threshold_layer.hpp │ │ ├── tile_layer.hpp │ │ ├── weak_seg_data_layer.hpp │ │ └── window_data_layer.hpp │ │ ├── net.hpp │ │ ├── parallel.hpp │ │ ├── sgd_solvers.hpp │ │ ├── solver.hpp │ │ ├── solver_factory.hpp │ │ ├── syncedmem.hpp │ │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ │ └── util │ │ ├── benchmark.hpp │ │ ├── blocking_queue.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── db_leveldb.hpp │ │ ├── db_lmdb.hpp │ │ ├── device_alternate.hpp │ │ ├── format.hpp │ │ ├── gpu_util.cuh │ │ ├── hdf5.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── interp.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── rng.hpp │ │ ├── signal_handler.h │ │ └── upgrade_proto.hpp ├── lib │ ├── DepthContrastLoss.py │ ├── DepthContrastLoss.py~ │ ├── ImageLabelData.py │ ├── ImageLabelData.py~ │ ├── ImageLabelDataTest.py │ ├── add_weight.py │ └── add_weight.py~ ├── matlab │ ├── +caffe │ │ ├── +test │ │ │ ├── test_io.m │ │ │ ├── test_net.m │ │ │ └── test_solver.m │ │ ├── Blob.m │ │ ├── Layer.m │ │ ├── Net.m │ │ ├── Solver.m │ │ ├── get_net.m │ │ ├── get_solver.m │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.mat │ │ ├── io.m │ │ ├── private │ │ │ ├── CHECK.m │ │ │ ├── CHECK_FILE_EXIST.m │ │ │ ├── caffe_.cpp │ │ │ ├── caffe_.mexa64 │ │ │ └── is_valid_handle.m │ │ ├── reset_all.m │ │ ├── run_tests.m │ │ ├── set_device.m │ │ ├── set_mode_cpu.m │ │ ├── set_mode_gpu.m │ │ └── version.m │ ├── CMakeLists.txt │ ├── demo │ │ └── classification_demo.m │ └── hdf5creation │ │ ├── .gitignore │ │ ├── demo.m │ │ └── store2hdf5.m ├── python │ ├── CMakeLists.txt │ ├── caffe │ │ ├── __init__.py │ │ ├── _caffe.cpp │ │ ├── _caffe.so │ │ ├── classifier.py │ │ ├── coord_map.py │ │ ├── detector.py │ │ ├── draw.py │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.npy │ │ ├── io.py │ │ ├── net_spec.py │ │ ├── proto │ │ │ ├── __init__.py │ │ │ └── caffe_pb2.py │ │ ├── pycaffe.py │ │ └── test │ │ │ ├── test_coord_map.py │ │ │ ├── test_io.py │ │ │ ├── test_layer_type_list.py │ │ │ ├── test_net.py │ │ │ ├── test_net_spec.py │ │ │ ├── test_python_layer.py │ │ │ ├── test_python_layer_with_param_str.py │ │ │ └── test_solver.py │ ├── classify.py │ ├── detect.py │ ├── draw_net.py │ └── requirements.txt ├── scripts │ ├── build_docs.sh │ ├── copy_notebook.py │ ├── cpp_lint.py │ ├── deploy_docs.sh │ ├── download_model_binary.py │ ├── download_model_from_gist.sh │ ├── gather_examples.sh │ ├── travis │ │ ├── build.sh │ │ ├── configure-cmake.sh │ │ ├── configure-make.sh │ │ ├── configure.sh │ │ ├── defaults.sh │ │ ├── install-deps.sh │ │ ├── install-python-deps.sh │ │ ├── setup-venv.sh │ │ └── test.sh │ └── upload_model_to_gist.sh ├── solver.prototxt ├── src │ ├── caffe │ │ ├── CMakeLists.txt │ │ ├── blob.cpp │ │ ├── common.cpp │ │ ├── data_reader.cpp │ │ ├── data_transformer.cpp │ │ ├── internal_thread.cpp │ │ ├── layer.cpp │ │ ├── layer_factory.cpp │ │ ├── layers │ │ │ ├── .scale_layer.cpp.swp │ │ │ ├── .~lock.base_conv_layer.cpp# │ │ │ ├── absval_layer.cpp │ │ │ ├── absval_layer.cu │ │ │ ├── accuracy_layer.cpp │ │ │ ├── argmax_layer.cpp │ │ │ ├── axpy_layer.cpp │ │ │ ├── axpy_layer.cu │ │ │ ├── base_conv_layer.cpp │ │ │ ├── base_data_layer.cpp │ │ │ ├── base_data_layer.cu │ │ │ ├── batch_norm_layer.cpp │ │ │ ├── batch_norm_layer.cu │ │ │ ├── batch_reindex_layer.cpp │ │ │ ├── batch_reindex_layer.cu │ │ │ ├── bias_layer.cpp │ │ │ ├── bias_layer.cu │ │ │ ├── bnll_layer.cpp │ │ │ ├── bnll_layer.cu │ │ │ ├── concat_layer.cpp │ │ │ ├── concat_layer.cu │ │ │ ├── contrastive_loss_layer.cpp │ │ │ ├── contrastive_loss_layer.cu │ │ │ ├── conv_layer.cpp │ │ │ ├── conv_layer.cu │ │ │ ├── crop_layer.cpp │ │ │ ├── crop_layer.cu │ │ │ ├── cross_entropy_loss_layer.cpp │ │ │ ├── cross_entropy_loss_layer.cu │ │ │ ├── cudnn_conv_layer.cpp │ │ │ ├── cudnn_conv_layer.cu │ │ │ ├── cudnn_lcn_layer.cpp │ │ │ ├── cudnn_lcn_layer.cu │ │ │ ├── cudnn_lrn_layer.cpp │ │ │ ├── cudnn_lrn_layer.cu │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ ├── cudnn_pooling_layer.cu │ │ │ ├── cudnn_relu_layer.cpp │ │ │ ├── cudnn_relu_layer.cu │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ ├── cudnn_softmax_layer.cu │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ ├── cudnn_tanh_layer.cu │ │ │ ├── data_layer.cpp │ │ │ ├── deconv_layer.cpp │ │ │ ├── deconv_layer.cu │ │ │ ├── dropout_layer.cpp │ │ │ ├── dropout_layer.cu │ │ │ ├── dummy_data_layer.cpp │ │ │ ├── eltwise_layer.cpp │ │ │ ├── eltwise_layer.cu │ │ │ ├── elu_layer.cpp │ │ │ ├── elu_layer.cu │ │ │ ├── embed_layer.cpp │ │ │ ├── embed_layer.cu │ │ │ ├── euclidean_loss_layer.cpp │ │ │ ├── euclidean_loss_layer.cu │ │ │ ├── exp_layer.cpp │ │ │ ├── exp_layer.cu │ │ │ ├── filter_layer.cpp │ │ │ ├── filter_layer.cu │ │ │ ├── flatten_layer.cpp │ │ │ ├── hdf5_data_layer.cpp │ │ │ ├── hdf5_data_layer.cu │ │ │ ├── hdf5_output_layer.cpp │ │ │ ├── hdf5_output_layer.cu │ │ │ ├── hinge_loss_layer.cpp │ │ │ ├── im2col_layer.cpp │ │ │ ├── im2col_layer.cu │ │ │ ├── image_data_layer.cpp │ │ │ ├── image_labelmap_data_layer.cpp │ │ │ ├── image_seg_data_layer.cpp │ │ │ ├── image_superpixelmap_data_layer.cpp │ │ │ ├── infogain_loss_layer.cpp │ │ │ ├── inner_product_layer.cpp │ │ │ ├── inner_product_layer.cu │ │ │ ├── input_layer.cpp │ │ │ ├── interp_layer.cpp │ │ │ ├── iou_loss_layer.cpp │ │ │ ├── iou_loss_layer.cu │ │ │ ├── l1_loss_layer.cpp │ │ │ ├── l1_loss_layer.cu │ │ │ ├── log_layer.cpp │ │ │ ├── log_layer.cu │ │ │ ├── loss_layer.cpp │ │ │ ├── lrn_layer.cpp │ │ │ ├── lrn_layer.cu │ │ │ ├── lstm_layer.cpp │ │ │ ├── lstm_unit_layer.cpp │ │ │ ├── lstm_unit_layer.cu │ │ │ ├── memory_data_layer.cpp │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ ├── mvn_layer.cpp │ │ │ ├── mvn_layer.cu │ │ │ ├── neuron_layer.cpp │ │ │ ├── normalize_layer.cpp │ │ │ ├── normalize_layer.cu │ │ │ ├── parameter_layer.cpp │ │ │ ├── pooling_layer.cpp │ │ │ ├── pooling_layer.cpp~ │ │ │ ├── pooling_layer.cu │ │ │ ├── power_layer.cpp │ │ │ ├── power_layer.cu │ │ │ ├── prelu_layer.cpp │ │ │ ├── prelu_layer.cu │ │ │ ├── recurrent_layer.cpp │ │ │ ├── recurrent_layer.cu │ │ │ ├── reduction_layer.cpp │ │ │ ├── reduction_layer.cu │ │ │ ├── relu_layer.cpp │ │ │ ├── relu_layer.cu │ │ │ ├── reshape_layer.cpp │ │ │ ├── rnn_layer.cpp │ │ │ ├── s_pooling_layer.cpp │ │ │ ├── scale_layer.cpp │ │ │ ├── scale_layer.cu │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── sigmoid_layer.cpp │ │ │ ├── sigmoid_layer.cu │ │ │ ├── silence_layer.cpp │ │ │ ├── silence_layer.cu │ │ │ ├── similarity_loss_layer.cpp │ │ │ ├── similarity_loss_layer.cpp~ │ │ │ ├── slice_layer.cpp │ │ │ ├── slice_layer.cu │ │ │ ├── smooth_L1_loss_layer.cpp │ │ │ ├── smooth_L1_loss_layer.cu │ │ │ ├── softmax_layer.cpp │ │ │ ├── softmax_layer.cu │ │ │ ├── softmax_loss_layer.cpp │ │ │ ├── softmax_loss_layer.cu │ │ │ ├── softmax_loss_layer_weighted.cpp │ │ │ ├── softmax_loss_layer_weighted.cpp~ │ │ │ ├── sp_loss_layer.cpp │ │ │ ├── split_layer.cpp │ │ │ ├── split_layer.cu │ │ │ ├── spp_layer.cpp │ │ │ ├── tanh_layer.cpp │ │ │ ├── tanh_layer.cu │ │ │ ├── threshold_layer.cpp │ │ │ ├── threshold_layer.cu │ │ │ ├── tile_layer.cpp │ │ │ ├── tile_layer.cu │ │ │ ├── weak_seg_data_layer.cpp │ │ │ └── window_data_layer.cpp │ │ ├── net.cpp │ │ ├── parallel.cpp │ │ ├── pooling_layer.cu │ │ ├── proto │ │ │ ├── caffe.proto │ │ │ └── caffe.proto~ │ │ ├── solver.cpp │ │ ├── solvers │ │ │ ├── adadelta_solver.cpp │ │ │ ├── adadelta_solver.cu │ │ │ ├── adagrad_solver.cpp │ │ │ ├── adagrad_solver.cu │ │ │ ├── adam_solver.cpp │ │ │ ├── adam_solver.cu │ │ │ ├── nesterov_solver.cpp │ │ │ ├── nesterov_solver.cu │ │ │ ├── rmsprop_solver.cpp │ │ │ ├── rmsprop_solver.cu │ │ │ ├── sgd_solver.cpp │ │ │ └── sgd_solver.cu │ │ ├── syncedmem.cpp │ │ ├── test │ │ │ ├── CMakeLists.txt │ │ │ ├── test_accuracy_layer.cpp │ │ │ ├── test_argmax_layer.cpp │ │ │ ├── test_batch_norm_layer.cpp │ │ │ ├── test_batch_reindex_layer.cpp │ │ │ ├── test_benchmark.cpp │ │ │ ├── test_bias_layer.cpp │ │ │ ├── test_blob.cpp │ │ │ ├── test_caffe_main.cpp │ │ │ ├── test_common.cpp │ │ │ ├── test_concat_layer.cpp │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ ├── test_convolution_layer.cpp │ │ │ ├── test_crop_layer.cpp │ │ │ ├── test_cross_entropy_loss_layer.cpp │ │ │ ├── test_data │ │ │ │ ├── generate_sample_data.py │ │ │ │ ├── sample_data.h5 │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ ├── sample_data_list.txt │ │ │ │ ├── solver_data.h5 │ │ │ │ └── solver_data_list.txt │ │ │ ├── test_data_layer.cpp │ │ │ ├── test_data_transformer.cpp │ │ │ ├── test_db.cpp │ │ │ ├── test_deconvolution_layer.cpp │ │ │ ├── test_dummy_data_layer.cpp │ │ │ ├── test_eltwise_layer.cpp │ │ │ ├── test_embed_layer.cpp │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ ├── test_filler.cpp │ │ │ ├── test_filter_layer.cpp │ │ │ ├── test_flatten_layer.cpp │ │ │ ├── test_gradient_based_solver.cpp │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ ├── test_hdf5data_layer.cpp │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ ├── test_im2col_kernel.cu │ │ │ ├── test_im2col_layer.cpp │ │ │ ├── test_image_data_layer.cpp │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ ├── test_inner_product_layer.cpp │ │ │ ├── test_internal_thread.cpp │ │ │ ├── test_io.cpp │ │ │ ├── test_iou_loss_layer.cpp │ │ │ ├── test_layer_factory.cpp │ │ │ ├── test_lrn_layer.cpp │ │ │ ├── test_lstm_layer.cpp │ │ │ ├── test_math_functions.cpp │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ ├── test_memory_data_layer.cpp │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ ├── test_mvn_layer.cpp │ │ │ ├── test_net.cpp │ │ │ ├── test_neuron_layer.cpp │ │ │ ├── test_platform.cpp │ │ │ ├── test_pooling_layer.cpp │ │ │ ├── test_power_layer.cpp │ │ │ ├── test_protobuf.cpp │ │ │ ├── test_random_number_generator.cpp │ │ │ ├── test_reduction_layer.cpp │ │ │ ├── test_reshape_layer.cpp │ │ │ ├── test_rnn_layer.cpp │ │ │ ├── test_s_pooling_layer.cpp │ │ │ ├── test_scale_layer.cpp │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── test_slice_layer.cpp │ │ │ ├── test_softmax_layer.cpp │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ ├── test_solver.cpp │ │ │ ├── test_solver_factory.cpp │ │ │ ├── test_sp_loss_layer.cpp │ │ │ ├── test_split_layer.cpp │ │ │ ├── test_spp_layer.cpp │ │ │ ├── test_stochastic_pooling.cpp │ │ │ ├── test_syncedmem.cpp │ │ │ ├── test_tanh_layer.cpp │ │ │ ├── test_threshold_layer.cpp │ │ │ ├── test_tile_layer.cpp │ │ │ ├── test_upgrade_proto.cpp │ │ │ └── test_util_blas.cpp │ │ └── util │ │ │ ├── benchmark.cpp │ │ │ ├── blocking_queue.cpp │ │ │ ├── cudnn.cpp │ │ │ ├── db.cpp │ │ │ ├── db_leveldb.cpp │ │ │ ├── db_lmdb.cpp │ │ │ ├── hdf5.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_splits.cpp │ │ │ ├── interp.cpp │ │ │ ├── interp.cu │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ ├── signal_handler.cpp │ │ │ └── upgrade_proto.cpp │ └── gtest │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc └── tools │ ├── CMakeLists.txt │ ├── caffe.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── device_query.cpp │ ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.py │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py.example │ ├── resize_and_crop_images.py │ └── summarize.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── net_speed_benchmark.cpp │ ├── test_net.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ ├── upgrade_net_proto_text.cpp │ └── upgrade_solver_proto_text.cpp └── evaluation ├── CalMAE.m ├── Enhancedmeasure.m ├── Fmeasure_calu.m ├── Fmeasure_calu_backup.m ├── ReadMe.txt ├── S_object.m ├── S_region.m ├── StructureMeasure.m ├── calculateNumber.m ├── main.m ├── main2.m ├── main3.m └── myPlot.m /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.mdb 3 | *.caffemodel 4 | *.solverstate 5 | -------------------------------------------------------------------------------- /Model/run.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | import sys 4 | caffe_root = '../caffe/' 5 | sys.path.insert(0, caffe_root + 'python') 6 | sys.path.insert(0, '../caffe/lib/') 7 | 8 | import caffe 9 | 10 | def upsample_filt(size): 11 | factor = (size + 1) // 2 12 | if size % 2 == 1: 13 | center = factor - 1 14 | else: 15 | center = factor - 0.5 16 | og = np.ogrid[:size, :size] 17 | return (1 - abs(og[0] - center) / factor) * \ 18 | (1 - abs(og[1] - center) / factor) 19 | 20 | # set parameters s.t. deconvolutional layers compute bilinear interpolation 21 | # N.B. this is for deconvolution without groups 22 | def interp_surgery(net, layers): 23 | for l in layers: 24 | m, k, h, w = net.params[l][0].data.shape 25 | if m != k: 26 | print 'input + output channels need to be the same' 27 | 28 | raise 29 | if h != w: 30 | print 'filters need to be square' 31 | raise 32 | filt = upsample_filt(h) 33 | net.params[l][0].data[range(m), range(k), :, :] = filt 34 | 35 | base_weights = './vgg16_20M.caffemodel' 36 | # init 37 | caffe.set_mode_gpu() 38 | caffe.set_device(0) 39 | 40 | solver = caffe.SGDSolver('solver.prototxt') 41 | solver.net.copy_from(base_weights) 42 | # do net surgery to set the deconvolution weights for bilinear interpolation 43 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 44 | interp_surgery(solver.net, interp_layers) 45 | 46 | # copy base weights for fine-tuning 47 | #solver.restore('./snapshot/ours_iter_4000.solverstate') 48 | 49 | 50 | solver.step(20000) 51 | -------------------------------------------------------------------------------- /Model/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: ".//train.prototxt" 2 | # lr for fine-tuning should be lower than when starting from scratch 3 | #debug_info: true 4 | base_lr: 1e-8 5 | lr_policy: "step" 6 | #power: 0.9 7 | gamma: 0.1 8 | iter_size: 10 9 | # stepsize should also be lower, as we're closer to being done 10 | stepsize: 7000 11 | average_loss: 20 12 | display: 10 13 | max_iter: 20000 14 | momentum: 0.90 15 | weight_decay: 0.0005 16 | snapshot: 5000 17 | snapshot_prefix: "../snapshot/" 18 | # uncomment the following to default to CPU mode solving 19 | # solver_mode: CPU 20 | -------------------------------------------------------------------------------- /caffe/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /caffe/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the users group in case you need help: 7 | https://groups.google.com/forum/#!forum/caffe-users 8 | -------------------------------------------------------------------------------- /caffe/caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /caffe/cmake/Modules/FindGFlags.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find GFLAGS 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GFLAGS_FOUND 8 | # GFLAGS_INCLUDE_DIRS 9 | # GFLAGS_LIBRARIES 10 | # GFLAGS_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") 15 | 16 | # We are testing only a couple of files in the include directories 17 | if(WIN32) 18 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 19 | PATHS ${GFLAGS_ROOT_DIR}/src/windows) 20 | else() 21 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 22 | PATHS ${GFLAGS_ROOT_DIR}) 23 | endif() 24 | 25 | if(MSVC) 26 | find_library(GFLAGS_LIBRARY_RELEASE 27 | NAMES libgflags 28 | PATHS ${GFLAGS_ROOT_DIR} 29 | PATH_SUFFIXES Release) 30 | 31 | find_library(GFLAGS_LIBRARY_DEBUG 32 | NAMES libgflags-debug 33 | PATHS ${GFLAGS_ROOT_DIR} 34 | PATH_SUFFIXES Debug) 35 | 36 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) 37 | else() 38 | find_library(GFLAGS_LIBRARY gflags) 39 | endif() 40 | 41 | find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) 42 | 43 | 44 | if(GFLAGS_FOUND) 45 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) 46 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) 47 | message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})") 48 | mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE 49 | GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) 50 | endif() 51 | -------------------------------------------------------------------------------- /caffe/cmake/Modules/FindGlog.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Glog 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GLOG_FOUND 8 | # GLOG_INCLUDE_DIRS 9 | # GLOG_LIBRARIES 10 | # GLOG_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") 15 | 16 | if(WIN32) 17 | find_path(GLOG_INCLUDE_DIR glog/logging.h 18 | PATHS ${GLOG_ROOT_DIR}/src/windows) 19 | else() 20 | find_path(GLOG_INCLUDE_DIR glog/logging.h 21 | PATHS ${GLOG_ROOT_DIR}) 22 | endif() 23 | 24 | if(MSVC) 25 | find_library(GLOG_LIBRARY_RELEASE libglog_static 26 | PATHS ${GLOG_ROOT_DIR} 27 | PATH_SUFFIXES Release) 28 | 29 | find_library(GLOG_LIBRARY_DEBUG libglog_static 30 | PATHS ${GLOG_ROOT_DIR} 31 | PATH_SUFFIXES Debug) 32 | 33 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) 34 | else() 35 | find_library(GLOG_LIBRARY glog 36 | PATHS ${GLOG_ROOT_DIR} 37 | PATH_SUFFIXES lib lib64) 38 | endif() 39 | 40 | find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) 41 | 42 | if(GLOG_FOUND) 43 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) 44 | set(GLOG_LIBRARIES ${GLOG_LIBRARY}) 45 | message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})") 46 | mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG 47 | GLOG_LIBRARY GLOG_INCLUDE_DIR) 48 | endif() 49 | -------------------------------------------------------------------------------- /caffe/cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /caffe/cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /caffe/cmake/Modules/FindvecLib.cmake: -------------------------------------------------------------------------------- 1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework 2 | # 3 | # The following are set after configuration is done: 4 | # VECLIB_FOUND 5 | # vecLib_INCLUDE_DIR 6 | # vecLib_LINKER_LIBS 7 | 8 | 9 | if(NOT APPLE) 10 | return() 11 | endif() 12 | 13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") 14 | 15 | find_path(vecLib_INCLUDE_DIR vecLib.h 16 | DOC "vecLib include directory" 17 | PATHS /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} 18 | /System/Library/${__veclib_include_suffix} 19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ 20 | NO_DEFAULT_PATH) 21 | 22 | include(FindPackageHandleStandardArgs) 23 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) 24 | 25 | if(VECLIB_FOUND) 26 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") 27 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") 28 | message(STATUS "Found standalone vecLib.framework") 29 | else() 30 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") 31 | message(STATUS "Found vecLib as part of Accelerate.framework") 32 | endif() 33 | 34 | mark_as_advanced(vecLib_INCLUDE_DIR) 35 | endif() 36 | -------------------------------------------------------------------------------- /caffe/cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /caffe/cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* NVIDA Cuda */ 8 | #cmakedefine HAVE_CUDA 9 | 10 | /* NVIDA cuDNN */ 11 | #cmakedefine HAVE_CUDNN 12 | #cmakedefine USE_CUDNN 13 | 14 | /* NVIDA cuDNN */ 15 | #cmakedefine CPU_ONLY 16 | 17 | /* Test device */ 18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 19 | 20 | /* Temporary (TODO: remove) */ 21 | #if 1 22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 24 | #define CMAKE_EXT ".gen.cmake" 25 | #else 26 | #define CMAKE_SOURCE_DIR "src/" 27 | #define EXAMPLES_SOURCE_DIR "examples/" 28 | #define CMAKE_EXT "" 29 | #endif 30 | 31 | /* Matlab */ 32 | #cmakedefine HAVE_MATLAB 33 | 34 | /* IO libraries */ 35 | #cmakedefine USE_OPENCV 36 | #cmakedefine USE_LEVELDB 37 | #cmakedefine USE_LMDB 38 | #cmakedefine ALLOW_LMDB_NOLOCK 39 | -------------------------------------------------------------------------------- /caffe/cmake/lint.cmake: -------------------------------------------------------------------------------- 1 | 2 | set(CMAKE_SOURCE_DIR ..) 3 | set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) 4 | set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) 5 | set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) 6 | set(LINT_DIRS include src/caffe examples tools python matlab) 7 | 8 | cmake_policy(SET CMP0009 NEW) # suppress cmake warning 9 | 10 | # find all files of interest 11 | foreach(ext ${SRC_FILE_EXTENSIONS}) 12 | foreach(dir ${LINT_DIRS}) 13 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext}) 14 | set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES}) 15 | endforeach() 16 | endforeach() 17 | 18 | # find all files that should be excluded 19 | foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) 20 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) 21 | set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) 22 | endforeach() 23 | 24 | # exclude generated pb files 25 | list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES}) 26 | 27 | execute_process( 28 | COMMAND ${LINT_COMMAND} ${LINT_SOURCES} 29 | ERROR_VARIABLE LINT_OUTPUT 30 | ERROR_STRIP_TRAILING_WHITESPACE 31 | ) 32 | 33 | string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT}) 34 | 35 | list(GET LINT_OUTPUT -1 LINT_RESULT) 36 | list(REMOVE_AT LINT_OUTPUT -1) 37 | string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT}) 38 | list(GET LINT_RESULT -1 NUM_ERRORS) 39 | if(NUM_ERRORS GREATER 0) 40 | foreach(msg ${LINT_OUTPUT}) 41 | string(FIND ${msg} "Done" result) 42 | if(result LESS 0) 43 | message(STATUS ${msg}) 44 | endif() 45 | endforeach() 46 | message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!") 47 | else() 48 | message(STATUS "Lint did not find any errors!") 49 | endif() 50 | 51 | -------------------------------------------------------------------------------- /caffe/docker/Makefile: -------------------------------------------------------------------------------- 1 | # A makefile to build the docker images for caffe. 2 | # Two caffe images will be built: 3 | # caffe:cpu --> A CPU-only build of caffe. 4 | # caffe:gpu --> A GPU-enabled build using the latest CUDA and CUDNN versions. 5 | 6 | DOCKER ?= docker 7 | 8 | all: docker_files standalone 9 | 10 | .PHONY: standalone devel 11 | 12 | standalone: cpu_standalone gpu_standalone 13 | 14 | 15 | cpu_standalone: standalone/cpu/Dockerfile 16 | $(DOCKER) build -t caffe:cpu standalone/cpu 17 | 18 | gpu_standalone: standalone/gpu/Dockerfile 19 | $(DOCKER) build -t caffe:gpu standalone/gpu 20 | 21 | docker_files: standalone_files 22 | 23 | standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile 24 | 25 | FROM_GPU = "nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04" 26 | FROM_CPU = "ubuntu:14.04" 27 | GPU_CMAKE_ARGS = -DUSE_CUDNN=1 28 | CPU_CMAKE_ARGS = -DCPU_ONLY=1 29 | 30 | # A make macro to select the CPU or GPU base image. 31 | define from_image 32 | $(if $(strip $(findstring gpu,$@)),$(FROM_GPU),$(FROM_CPU)) 33 | endef 34 | 35 | # A make macro to select the CPU or GPU build args. 36 | define build_args 37 | $(if $(strip $(findstring gpu,$@)),$(GPU_CMAKE_ARGS),$(CPU_CMAKE_ARGS)) 38 | endef 39 | 40 | # A make macro to construct the CPU or GPU Dockerfile from the template 41 | define create_docker_file 42 | @echo creating $@ 43 | @echo "FROM "$(from_image) > $@ 44 | @cat $^ | sed 's/$${CMAKE_ARGS}/$(build_args)/' >> $@ 45 | endef 46 | 47 | 48 | standalone/%/Dockerfile: templates/Dockerfile.template 49 | $(create_docker_file) 50 | 51 | -------------------------------------------------------------------------------- /caffe/docker/standalone/cpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | MAINTAINER caffe-maint@googlegroups.com 3 | 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | build-essential \ 6 | cmake \ 7 | git \ 8 | wget \ 9 | libatlas-base-dev \ 10 | libboost-all-dev \ 11 | libgflags-dev \ 12 | libgoogle-glog-dev \ 13 | libhdf5-serial-dev \ 14 | libleveldb-dev \ 15 | liblmdb-dev \ 16 | libopencv-dev \ 17 | libprotobuf-dev \ 18 | libsnappy-dev \ 19 | protobuf-compiler \ 20 | python-dev \ 21 | python-numpy \ 22 | python-pip \ 23 | python-scipy && \ 24 | rm -rf /var/lib/apt/lists/* 25 | 26 | ENV CAFFE_ROOT=/opt/caffe 27 | WORKDIR $CAFFE_ROOT 28 | 29 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 30 | ENV CLONE_TAG=master 31 | 32 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 33 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 34 | mkdir build && cd build && \ 35 | cmake -DCPU_ONLY=1 .. && \ 36 | make -j"$(nproc)" 37 | 38 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 39 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 40 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 41 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 42 | 43 | WORKDIR /workspace 44 | -------------------------------------------------------------------------------- /caffe/docker/standalone/gpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 2 | MAINTAINER caffe-maint@googlegroups.com 3 | 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | build-essential \ 6 | cmake \ 7 | git \ 8 | wget \ 9 | libatlas-base-dev \ 10 | libboost-all-dev \ 11 | libgflags-dev \ 12 | libgoogle-glog-dev \ 13 | libhdf5-serial-dev \ 14 | libleveldb-dev \ 15 | liblmdb-dev \ 16 | libopencv-dev \ 17 | libprotobuf-dev \ 18 | libsnappy-dev \ 19 | protobuf-compiler \ 20 | python-dev \ 21 | python-numpy \ 22 | python-pip \ 23 | python-scipy && \ 24 | rm -rf /var/lib/apt/lists/* 25 | 26 | ENV CAFFE_ROOT=/opt/caffe 27 | WORKDIR $CAFFE_ROOT 28 | 29 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 30 | ENV CLONE_TAG=master 31 | 32 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 33 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 34 | mkdir build && cd build && \ 35 | cmake -DUSE_CUDNN=1 .. && \ 36 | make -j"$(nproc)" 37 | 38 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 39 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 40 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 41 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 42 | 43 | WORKDIR /workspace 44 | -------------------------------------------------------------------------------- /caffe/docker/templates/Dockerfile.template: -------------------------------------------------------------------------------- 1 | MAINTAINER caffe-maint@googlegroups.com 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends \ 4 | build-essential \ 5 | cmake \ 6 | git \ 7 | wget \ 8 | libatlas-base-dev \ 9 | libboost-all-dev \ 10 | libgflags-dev \ 11 | libgoogle-glog-dev \ 12 | libhdf5-serial-dev \ 13 | libleveldb-dev \ 14 | liblmdb-dev \ 15 | libopencv-dev \ 16 | libprotobuf-dev \ 17 | libsnappy-dev \ 18 | protobuf-compiler \ 19 | python-dev \ 20 | python-numpy \ 21 | python-pip \ 22 | python-scipy && \ 23 | rm -rf /var/lib/apt/lists/* 24 | 25 | ENV CAFFE_ROOT=/opt/caffe 26 | WORKDIR $CAFFE_ROOT 27 | 28 | # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. 29 | ENV CLONE_TAG=master 30 | 31 | RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ 32 | for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ 33 | mkdir build && cd build && \ 34 | cmake ${CMAKE_ARGS} .. && \ 35 | make -j"$(nproc)" 36 | 37 | ENV PYCAFFE_ROOT $CAFFE_ROOT/python 38 | ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH 39 | ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH 40 | RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig 41 | 42 | WORKDIR /workspace 43 | -------------------------------------------------------------------------------- /caffe/docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /caffe/docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /caffe/docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /caffe/docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /caffe/docs/images/caffeine-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/images/caffeine-icon.png -------------------------------------------------------------------------------- /caffe/docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /caffe/docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/.gitignore -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/logreg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/docs/tutorial/fig/logreg.jpg -------------------------------------------------------------------------------- /caffe/examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB_RECURSE examples_srcs "${PROJECT_SOURCE_DIR}/examples/*.cpp") 2 | 3 | foreach(source_file ${examples_srcs}) 4 | # get file name 5 | get_filename_component(name ${source_file} NAME_WE) 6 | 7 | # get folder name 8 | get_filename_component(path ${source_file} PATH) 9 | get_filename_component(folder ${path} NAME_WE) 10 | 11 | add_executable(${name} ${source_file}) 12 | target_link_libraries(${name} ${Caffe_LINK}) 13 | caffe_default_properties(${name}) 14 | 15 | # set back RUNTIME_OUTPUT_DIRECTORY 16 | set_target_properties(${name} PROPERTIES 17 | RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/examples/${folder}") 18 | 19 | caffe_set_solution_folder(${name} examples) 20 | 21 | # install 22 | install(TARGETS ${name} DESTINATION bin) 23 | 24 | if(UNIX OR APPLE) 25 | # Funny command to make tutorials work 26 | # TODO: remove in future as soon as naming is standartaized everywhere 27 | set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${Caffe_POSTFIX}) 28 | add_custom_command(TARGET ${name} POST_BUILD 29 | COMMAND ln -sf "${__outname}" "${__outname}.bin") 30 | endif() 31 | endforeach() 32 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/create_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the cifar data into leveldb format. 3 | set -e 4 | 5 | EXAMPLE=examples/cifar10 6 | DATA=data/cifar10 7 | DBTYPE=lmdb 8 | 9 | echo "Creating $DBTYPE..." 10 | 11 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE 12 | 13 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE 14 | 15 | echo "Computing image mean..." 16 | 17 | ./build/tools/compute_image_mean -backend=$DBTYPE \ 18 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto 19 | 20 | echo "Done." 21 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/mean.binaryproto: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/cifar10/mean.binaryproto -------------------------------------------------------------------------------- /caffe/examples/cifar10/solver_resnet.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | train_net: "examples/cifar10/resnet_cifar10_20.prototxt" 6 | test_net: "examples/cifar10/resnet_cifar10_20_deploy.prototxt" 7 | # test_iter specifies how many forward passes the test should carry out. 8 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 9 | # covering the full 10,000 testing images. 10 | test_iter: 100 11 | # Carry out testing every 1000 training iterations. 12 | test_interval: 390 13 | # The base learning rate, momentum and the weight decay of the network. 14 | base_lr: 0.1 15 | momentum: 0.9 16 | weight_decay: 0.0001 17 | # The learning rate policy 18 | lr_policy: "multistep" 19 | gamma: 0.1 20 | #stepsize=0.1 21 | stepvalue: 46875 22 | stepvalue: 62500 23 | # Display every 200 iterations 24 | display: 50 25 | # The maximum number of iterations 26 | max_iter: 78125 27 | # snapshot intermediate results 28 | snapshot: 5000 29 | #snapshot_format: HDF5 30 | snapshot_prefix: "examples/cifar10/snapshot/cifar10" 31 | # solver mode: CPU or GPU 32 | solver_mode: GPU 33 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/train_full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train --solver=examples/cifar10/cifar10_full_solver.prototxt $@ 7 | -------------------------------------------------------------------------------- /caffe/examples/finetune_flickr_style/flickr_style.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/finetune_flickr_style/flickr_style.csv.gz -------------------------------------------------------------------------------- /caffe/examples/finetune_flickr_style/style_names.txt: -------------------------------------------------------------------------------- 1 | Detailed 2 | Pastel 3 | Melancholy 4 | Noir 5 | HDR 6 | Vintage 7 | Long Exposure 8 | Horror 9 | Sunny 10 | Bright 11 | Hazy 12 | Bokeh 13 | Serene 14 | Texture 15 | Ethereal 16 | Macro 17 | Depth of Field 18 | Geometric Composition 19 | Minimal 20 | Romantic 21 | -------------------------------------------------------------------------------- /caffe/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt" 2 | test_iter: 100 3 | test_interval: 1000 4 | base_lr: 0.001 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 20000 8 | display: 20 9 | max_iter: 100000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune" 14 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/nonlinear_auto_test.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/test.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/nonlinear_auto_train.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | top: "label" 6 | hdf5_data_param { 7 | source: "examples/hdf5_classification/data/train.txt" 8 | batch_size: 10 9 | } 10 | } 11 | layer { 12 | name: "ip1" 13 | type: "InnerProduct" 14 | bottom: "data" 15 | top: "ip1" 16 | inner_product_param { 17 | num_output: 40 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "ip1" 27 | top: "ip1" 28 | } 29 | layer { 30 | name: "ip2" 31 | type: "InnerProduct" 32 | bottom: "ip1" 33 | top: "ip2" 34 | inner_product_param { 35 | num_output: 2 36 | weight_filler { 37 | type: "xavier" 38 | } 39 | } 40 | } 41 | layer { 42 | name: "accuracy" 43 | type: "Accuracy" 44 | bottom: "ip2" 45 | bottom: "label" 46 | top: "accuracy" 47 | } 48 | layer { 49 | name: "loss" 50 | type: "SoftmaxWithLoss" 51 | bottom: "ip2" 52 | bottom: "label" 53 | top: "loss" 54 | } 55 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layer { 3 | name: "data" 4 | type: "HDF5Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | hdf5_data_param { 11 | source: "examples/hdf5_classification/data/train.txt" 12 | batch_size: 10 13 | } 14 | } 15 | layer { 16 | name: "data" 17 | type: "HDF5Data" 18 | top: "data" 19 | top: "label" 20 | include { 21 | phase: TEST 22 | } 23 | hdf5_data_param { 24 | source: "examples/hdf5_classification/data/test.txt" 25 | batch_size: 10 26 | } 27 | } 28 | layer { 29 | name: "fc1" 30 | type: "InnerProduct" 31 | bottom: "data" 32 | top: "fc1" 33 | param { 34 | lr_mult: 1 35 | decay_mult: 1 36 | } 37 | param { 38 | lr_mult: 2 39 | decay_mult: 0 40 | } 41 | inner_product_param { 42 | num_output: 2 43 | weight_filler { 44 | type: "xavier" 45 | } 46 | bias_filler { 47 | type: "constant" 48 | value: 0 49 | } 50 | } 51 | } 52 | layer { 53 | name: "loss" 54 | type: "SoftmaxWithLoss" 55 | bottom: "fc1" 56 | bottom: "label" 57 | top: "loss" 58 | } 59 | layer { 60 | name: "accuracy" 61 | type: "Accuracy" 62 | bottom: "fc1" 63 | bottom: "label" 64 | top: "accuracy" 65 | include { 66 | phase: TEST 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/create_imagenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Create the imagenet lmdb inputs 3 | # N.B. set the path to the imagenet train + val data dirs 4 | set -e 5 | 6 | EXAMPLE=examples/imagenet 7 | DATA=data/ilsvrc12 8 | TOOLS=build/tools 9 | 10 | TRAIN_DATA_ROOT=/path/to/imagenet/train/ 11 | VAL_DATA_ROOT=/path/to/imagenet/val/ 12 | 13 | # Set RESIZE=true to resize the images to 256x256. Leave as false if images have 14 | # already been resized using another tool. 15 | RESIZE=false 16 | if $RESIZE; then 17 | RESIZE_HEIGHT=256 18 | RESIZE_WIDTH=256 19 | else 20 | RESIZE_HEIGHT=0 21 | RESIZE_WIDTH=0 22 | fi 23 | 24 | if [ ! -d "$TRAIN_DATA_ROOT" ]; then 25 | echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT" 26 | echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" \ 27 | "where the ImageNet training data is stored." 28 | exit 1 29 | fi 30 | 31 | if [ ! -d "$VAL_DATA_ROOT" ]; then 32 | echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT" 33 | echo "Set the VAL_DATA_ROOT variable in create_imagenet.sh to the path" \ 34 | "where the ImageNet validation data is stored." 35 | exit 1 36 | fi 37 | 38 | echo "Creating train lmdb..." 39 | 40 | GLOG_logtostderr=1 $TOOLS/convert_imageset \ 41 | --resize_height=$RESIZE_HEIGHT \ 42 | --resize_width=$RESIZE_WIDTH \ 43 | --shuffle \ 44 | $TRAIN_DATA_ROOT \ 45 | $DATA/train.txt \ 46 | $EXAMPLE/ilsvrc12_train_lmdb 47 | 48 | echo "Creating val lmdb..." 49 | 50 | GLOG_logtostderr=1 $TOOLS/convert_imageset \ 51 | --resize_height=$RESIZE_HEIGHT \ 52 | --resize_width=$RESIZE_WIDTH \ 53 | --shuffle \ 54 | $VAL_DATA_ROOT \ 55 | $DATA/val.txt \ 56 | $EXAMPLE/ilsvrc12_val_lmdb 57 | 58 | echo "Done." 59 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/make_imagenet_mean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Compute the mean image from the imagenet training lmdb 3 | # N.B. this is available in data/ilsvrc12 4 | 5 | EXAMPLE=examples/imagenet 6 | DATA=data/ilsvrc12 7 | TOOLS=build/tools 8 | 9 | $TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \ 10 | $DATA/imagenet_mean.binaryproto 11 | 12 | echo "Done." 13 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/resume_training.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=models/bvlc_reference_caffenet/solver.prototxt \ 6 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 \ 7 | $@ 8 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/train_caffenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=models/bvlc_reference_caffenet/solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/images/cat gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/images/cat gray.jpg -------------------------------------------------------------------------------- /caffe/examples/images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/images/cat.jpg -------------------------------------------------------------------------------- /caffe/examples/images/cat_gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/images/cat_gray.jpg -------------------------------------------------------------------------------- /caffe/examples/images/fish-bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/examples/images/fish-bike.jpg -------------------------------------------------------------------------------- /caffe/examples/mnist/create_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into lmdb/leveldb format, 3 | # depending on the value assigned to $BACKEND. 4 | set -e 5 | 6 | EXAMPLE=examples/mnist 7 | DATA=data/mnist 8 | BUILD=build/examples/mnist 9 | 10 | BACKEND="lmdb" 11 | 12 | echo "Creating ${BACKEND}..." 13 | 14 | rm -rf $EXAMPLE/mnist_train_${BACKEND} 15 | rm -rf $EXAMPLE/mnist_test_${BACKEND} 16 | 17 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ 18 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND} 19 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ 20 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND} 21 | 22 | echo "Done." 23 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_adadelta_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 1.0 11 | lr_policy: "fixed" 12 | momentum: 0.95 13 | weight_decay: 0.0005 14 | # Display every 100 iterations 15 | display: 100 16 | # The maximum number of iterations 17 | max_iter: 10000 18 | # snapshot intermediate results 19 | snapshot: 5000 20 | snapshot_prefix: "examples/mnist/lenet_adadelta" 21 | # solver mode: CPU or GPU 22 | solver_mode: GPU 23 | type: "AdaDelta" 24 | delta: 1e-6 25 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_auto_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | train_net: "mnist/lenet_auto_train.prototxt" 3 | test_net: "mnist/lenet_auto_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # The base learning rate, momentum and the weight decay of the network. 11 | base_lr: 0.01 12 | momentum: 0.9 13 | weight_decay: 0.0005 14 | # The learning rate policy 15 | lr_policy: "inv" 16 | gamma: 0.0001 17 | power: 0.75 18 | # Display every 100 iterations 19 | display: 100 20 | # The maximum number of iterations 21 | max_iter: 10000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "mnist/lenet" 25 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_multistep_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "multistep" 15 | gamma: 0.9 16 | stepvalue: 5000 17 | stepvalue: 7000 18 | stepvalue: 8000 19 | stepvalue: 9000 20 | stepvalue: 9500 21 | # Display every 100 iterations 22 | display: 100 23 | # The maximum number of iterations 24 | max_iter: 10000 25 | # snapshot intermediate results 26 | snapshot: 5000 27 | snapshot_prefix: "examples/mnist/lenet_multistep" 28 | # solver mode: CPU or GPU 29 | solver_mode: GPU 30 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_solver_adam.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | # this follows "ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION" 3 | net: "examples/mnist/lenet_train_test.prototxt" 4 | # test_iter specifies how many forward passes the test should carry out. 5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 6 | # covering the full 10,000 testing images. 7 | test_iter: 100 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 500 10 | # All parameters are from the cited paper above 11 | base_lr: 0.001 12 | momentum: 0.9 13 | momentum2: 0.999 14 | # since Adam dynamically changes the learning rate, we set the base learning 15 | # rate to a fixed value 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | type: "Adam" 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_solver_rmsprop.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.0 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet_rmsprop" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | type: "RMSProp" 27 | rms_decay: 0.98 28 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder" 17 | momentum: 0.9 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver_adadelta.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 1.0 9 | lr_policy: "fixed" 10 | momentum: 0.95 11 | delta: 1e-8 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adadelta_train" 17 | # solver mode: CPU or GPU 18 | solver_mode: GPU 19 | type: "AdaDelta" 20 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "fixed" 10 | display: 100 11 | max_iter: 65000 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train" 15 | # solver mode: CPU or GPU 16 | solver_mode: GPU 17 | type: "AdaGrad" 18 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train" 17 | momentum: 0.95 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | type: "Nesterov" 21 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@ 5 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet_adam.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@ 5 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet_consolidated.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/lenet_consolidated_solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet_rmsprop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/lenet_solver_rmsprop.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder_adadelta.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder_adagrad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder_nesterov.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./build/tools/caffe train \ 5 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt $@ 6 | -------------------------------------------------------------------------------- /caffe/examples/net_surgery/conv.prototxt: -------------------------------------------------------------------------------- 1 | # Simple single-layer network to showcase editing model parameters. 2 | name: "convolution" 3 | layer { 4 | name: "data" 5 | type: "Input" 6 | top: "data" 7 | input_param { shape: { dim: 1 dim: 1 dim: 100 dim: 100 } } 8 | } 9 | layer { 10 | name: "conv" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv" 14 | convolution_param { 15 | num_output: 3 16 | kernel_size: 5 17 | stride: 1 18 | weight_filler { 19 | type: "gaussian" 20 | std: 0.01 21 | } 22 | bias_filler { 23 | type: "constant" 24 | value: 0 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /caffe/examples/pycaffe/layers/pyloss.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | 4 | 5 | class EuclideanLossLayer(caffe.Layer): 6 | """ 7 | Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer 8 | to demonstrate the class interface for developing layers in Python. 9 | """ 10 | 11 | def setup(self, bottom, top): 12 | # check input pair 13 | if len(bottom) != 2: 14 | raise Exception("Need two inputs to compute distance.") 15 | 16 | def reshape(self, bottom, top): 17 | # check input dimensions match 18 | if bottom[0].count != bottom[1].count: 19 | raise Exception("Inputs must have the same dimension.") 20 | # difference is shape of inputs 21 | self.diff = np.zeros_like(bottom[0].data, dtype=np.float32) 22 | # loss output is scalar 23 | top[0].reshape(1) 24 | 25 | def forward(self, bottom, top): 26 | self.diff[...] = bottom[0].data - bottom[1].data 27 | top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2. 28 | 29 | def backward(self, top, propagate_down, bottom): 30 | for i in range(2): 31 | if not propagate_down[i]: 32 | continue 33 | if i == 0: 34 | sign = 1 35 | else: 36 | sign = -1 37 | bottom[i].diff[...] = sign * self.diff / bottom[i].num 38 | -------------------------------------------------------------------------------- /caffe/examples/pycaffe/linreg.prototxt: -------------------------------------------------------------------------------- 1 | name: 'LinearRegressionExample' 2 | # define a simple network for linear regression on dummy data 3 | # that computes the loss by a PythonLayer. 4 | layer { 5 | type: 'DummyData' 6 | name: 'x' 7 | top: 'x' 8 | dummy_data_param { 9 | shape: { dim: 10 dim: 3 dim: 2 } 10 | data_filler: { type: 'gaussian' } 11 | } 12 | } 13 | layer { 14 | type: 'DummyData' 15 | name: 'y' 16 | top: 'y' 17 | dummy_data_param { 18 | shape: { dim: 10 dim: 3 dim: 2 } 19 | data_filler: { type: 'gaussian' } 20 | } 21 | } 22 | # include InnerProduct layers for parameters 23 | # so the net will need backward 24 | layer { 25 | type: 'InnerProduct' 26 | name: 'ipx' 27 | top: 'ipx' 28 | bottom: 'x' 29 | inner_product_param { 30 | num_output: 10 31 | weight_filler { type: 'xavier' } 32 | } 33 | } 34 | layer { 35 | type: 'InnerProduct' 36 | name: 'ipy' 37 | top: 'ipy' 38 | bottom: 'y' 39 | inner_product_param { 40 | num_output: 10 41 | weight_filler { type: 'xavier' } 42 | } 43 | } 44 | layer { 45 | type: 'Python' 46 | name: 'loss' 47 | top: 'loss' 48 | bottom: 'ipx' 49 | bottom: 'ipy' 50 | python_param { 51 | # the module name -- usually the filename -- that needs to be in $PYTHONPATH 52 | module: 'pyloss' 53 | # the layer name -- the class name in the module 54 | layer: 'EuclideanLossLayer' 55 | } 56 | # set loss weight so Caffe knows this is a loss layer. 57 | # since PythonLayer inherits directly from Layer, this isn't automatically 58 | # known to Caffe 59 | loss_weight: 1 60 | } 61 | -------------------------------------------------------------------------------- /caffe/examples/siamese/create_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into leveldb format. 3 | set -e 4 | 5 | EXAMPLES=./build/examples/siamese 6 | DATA=./data/mnist 7 | 8 | echo "Creating leveldb..." 9 | 10 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb 11 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb 12 | 13 | $EXAMPLES/convert_mnist_siamese_data.bin \ 14 | $DATA/train-images-idx3-ubyte \ 15 | $DATA/train-labels-idx1-ubyte \ 16 | ./examples/siamese/mnist_siamese_train_leveldb 17 | $EXAMPLES/convert_mnist_siamese_data.bin \ 18 | $DATA/t10k-images-idx3-ubyte \ 19 | $DATA/t10k-labels-idx1-ubyte \ 20 | ./examples/siamese/mnist_siamese_test_leveldb 21 | 22 | echo "Done." 23 | -------------------------------------------------------------------------------- /caffe/examples/siamese/mnist_siamese_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/siamese/mnist_siamese_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0000 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 50000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/siamese/mnist_siamese" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/siamese/train_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -e 3 | 4 | TOOLS=./build/tools 5 | 6 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt $@ 7 | -------------------------------------------------------------------------------- /caffe/examples/web_demo/exifutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script handles the skimage exif problem. 3 | """ 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | ORIENTATIONS = { # used in apply_orientation 9 | 2: (Image.FLIP_LEFT_RIGHT,), 10 | 3: (Image.ROTATE_180,), 11 | 4: (Image.FLIP_TOP_BOTTOM,), 12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 13 | 6: (Image.ROTATE_270,), 14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 15 | 8: (Image.ROTATE_90,) 16 | } 17 | 18 | 19 | def open_oriented_im(im_path): 20 | im = Image.open(im_path) 21 | if hasattr(im, '_getexif'): 22 | exif = im._getexif() 23 | if exif is not None and 274 in exif: 24 | orientation = exif[274] 25 | im = apply_orientation(im, orientation) 26 | img = np.asarray(im).astype(np.float32) / 255. 27 | if img.ndim == 2: 28 | img = img[:, :, np.newaxis] 29 | img = np.tile(img, (1, 1, 3)) 30 | elif img.shape[2] == 4: 31 | img = img[:, :, :3] 32 | return img 33 | 34 | 35 | def apply_orientation(im, orientation): 36 | if orientation in ORIENTATIONS: 37 | for method in ORIENTATIONS[orientation]: 38 | im = im.transpose(method) 39 | return im 40 | -------------------------------------------------------------------------------- /caffe/examples/web_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | werkzeug 2 | flask 3 | tornado 4 | numpy 5 | pandas 6 | pillow 7 | pyyaml 8 | -------------------------------------------------------------------------------- /caffe/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/solver_factory.hpp" 17 | #include "caffe/util/benchmark.hpp" 18 | #include "caffe/util/io.hpp" 19 | #include "caffe/util/upgrade_proto.hpp" 20 | 21 | #endif // CAFFE_CAFFE_HPP_ 22 | -------------------------------------------------------------------------------- /caffe/include/caffe/common.cuh: -------------------------------------------------------------------------------- 1 | // Copyright 2014 George Papandreou 2 | 3 | #ifndef CAFFE_COMMON_CUH_ 4 | #define CAFFE_COMMON_CUH_ 5 | 6 | #include 7 | 8 | #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 9 | #else 10 | // CUDA: atomicAdd is not defined for doubles 11 | static __inline__ __device__ double atomicAdd(double *address, double val) { 12 | unsigned long long int* address_as_ull = (unsigned long long int*)address; 13 | unsigned long long int old = *address_as_ull, assumed; 14 | if (val==0.0) 15 | return __longlong_as_double(old); 16 | do { 17 | assumed = old; 18 | old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); 19 | } while (assumed != old); 20 | return __longlong_as_double(old); 21 | } 22 | #endif 23 | #endif 24 | -------------------------------------------------------------------------------- /caffe/include/caffe/internal_thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_ 2 | #define CAFFE_INTERNAL_THREAD_HPP_ 3 | 4 | #include "caffe/common.hpp" 5 | 6 | /** 7 | Forward declare boost::thread instead of including boost/thread.hpp 8 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. 9 | */ 10 | namespace boost { class thread; } 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * Virtual class encapsulate boost::thread for use in base class 16 | * The child class will acquire the ability to run a single thread, 17 | * by reimplementing the virtual function InternalThreadEntry. 18 | */ 19 | class InternalThread { 20 | public: 21 | InternalThread() : thread_() {} 22 | virtual ~InternalThread(); 23 | 24 | /** 25 | * Caffe's thread local state will be initialized using the current 26 | * thread values, e.g. device id, solver index etc. The random seed 27 | * is initialized using caffe_rng_rand. 28 | */ 29 | void StartInternalThread(); 30 | 31 | /** Will not return until the internal thread has exited. */ 32 | void StopInternalThread(); 33 | 34 | bool is_started() const; 35 | 36 | protected: 37 | /* Implement this method in your subclass 38 | with the code you want your thread to run. */ 39 | virtual void InternalThreadEntry() {} 40 | 41 | /* Should be tested when running loops to exit when requested. */ 42 | bool must_stop(); 43 | 44 | private: 45 | void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count, 46 | bool root_solver); 47 | 48 | shared_ptr thread_; 49 | }; 50 | 51 | } // namespace caffe 52 | 53 | #endif // CAFFE_INTERNAL_THREAD_HPP_ 54 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/.softmax_loss_layer.hpp.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/include/caffe/layers/.softmax_loss_layer.hpp.swp -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_lcn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LCN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | #include "caffe/layers/power_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | template 17 | class CuDNNLCNLayer : public LRNLayer { 18 | public: 19 | explicit CuDNNLCNLayer(const LayerParameter& param) 20 | : LRNLayer(param), handles_setup_(false), tempDataSize(0), 21 | tempData1(NULL), tempData2(NULL) {} 22 | virtual void LayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual void Reshape(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual ~CuDNNLCNLayer(); 27 | 28 | protected: 29 | virtual void Forward_gpu(const vector*>& bottom, 30 | const vector*>& top); 31 | virtual void Backward_gpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | 34 | bool handles_setup_; 35 | cudnnHandle_t handle_; 36 | cudnnLRNDescriptor_t norm_desc_; 37 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 38 | 39 | int size_, pre_pad_; 40 | Dtype alpha_, beta_, k_; 41 | 42 | size_t tempDataSize; 43 | void *tempData1, *tempData2; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_LCN_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_lrn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LRN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | template 16 | class CuDNNLRNLayer : public LRNLayer { 17 | public: 18 | explicit CuDNNLRNLayer(const LayerParameter& param) 19 | : LRNLayer(param), handles_setup_(false) {} 20 | virtual void LayerSetUp(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual ~CuDNNLRNLayer(); 25 | 26 | protected: 27 | virtual void Forward_gpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | 32 | bool handles_setup_; 33 | cudnnHandle_t handle_; 34 | cudnnLRNDescriptor_t norm_desc_; 35 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 36 | 37 | int size_; 38 | Dtype alpha_, beta_, k_; 39 | }; 40 | #endif 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_CUDNN_LRN_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_pooling_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_ 2 | #define CAFFE_CUDNN_POOLING_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/pooling_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /* 16 | * @brief cuDNN implementation of PoolingLayer. 17 | * Fallback to PoolingLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNPoolingLayer : public PoolingLayer { 21 | public: 22 | explicit CuDNNPoolingLayer(const LayerParameter& param) 23 | : PoolingLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNPoolingLayer(); 29 | // Currently, cuDNN does not support the extra top blob. 30 | virtual inline int MinTopBlobs() const { return -1; } 31 | virtual inline int ExactNumTopBlobs() const { return 1; } 32 | 33 | protected: 34 | virtual void Forward_gpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | bool handles_setup_; 40 | cudnnHandle_t handle_; 41 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 42 | cudnnPoolingDescriptor_t pooling_desc_; 43 | cudnnPoolingMode_t mode_; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_POOLING_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_relu_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ 2 | #define CAFFE_CUDNN_RELU_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/relu_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of ReLULayer. 18 | */ 19 | template 20 | class CuDNNReLULayer : public ReLULayer { 21 | public: 22 | explicit CuDNNReLULayer(const LayerParameter& param) 23 | : ReLULayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNReLULayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_RELU_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_sigmoid_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of SigmoidLayer. 18 | */ 19 | template 20 | class CuDNNSigmoidLayer : public SigmoidLayer { 21 | public: 22 | explicit CuDNNSigmoidLayer(const LayerParameter& param) 23 | : SigmoidLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSigmoidLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/softmax_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /** 16 | * @brief cuDNN implementation of SoftmaxLayer. 17 | * Fallback to SoftmaxLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNSoftmaxLayer : public SoftmaxLayer { 21 | public: 22 | explicit CuDNNSoftmaxLayer(const LayerParameter& param) 23 | : SoftmaxLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSoftmaxLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/cudnn_tanh_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ 2 | #define CAFFE_CUDNN_TANH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/tanh_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of TanHLayer. 18 | */ 19 | template 20 | class CuDNNTanHLayer : public TanHLayer { 21 | public: 22 | explicit CuDNNTanHLayer(const LayerParameter& param) 23 | : TanHLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNTanHLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_TANH_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class DataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit DataLayer(const LayerParameter& param); 21 | virtual ~DataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "Data"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 2; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | }; 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_DATA_LAYER_HPP_ 40 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/image_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from image files. 19 | * 20 | * TODO(dox): thorough documentation for Forward and proto params. 21 | */ 22 | template 23 | class ImageDataLayer : public BasePrefetchingDataLayer { 24 | public: 25 | explicit ImageDataLayer(const LayerParameter& param) 26 | : BasePrefetchingDataLayer(param) {} 27 | virtual ~ImageDataLayer(); 28 | virtual void DataLayerSetUp(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "ImageData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int ExactNumTopBlobs() const { return 2; } 34 | 35 | protected: 36 | shared_ptr prefetch_rng_; 37 | virtual void ShuffleImages(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | vector > lines_; 41 | int lines_id_; 42 | }; 43 | 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/image_labelmap_data_layer.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef CAFFE_IMAGE_LABELMAP_DATA_LAYER_HPP_ 3 | #define CAFFE_IMAGE_LABELMAP_DATA_LAYER_HPP_ 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include "caffe/blob.hpp" 10 | #include "caffe/data_transformer.hpp" 11 | #include "caffe/internal_thread.hpp" 12 | #include "caffe/layer.hpp" 13 | #include "caffe/layers/base_data_layer.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | 16 | namespace caffe { 17 | 18 | 19 | template 20 | class ImageLabelmapDataLayer : public BasePrefetchingLabelmapDataLayer { 21 | public: 22 | explicit ImageLabelmapDataLayer(const LayerParameter& param) 23 | : BasePrefetchingLabelmapDataLayer(param) {} 24 | virtual ~ImageLabelmapDataLayer(); 25 | virtual void DataLayerSetUp(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "ImageLabelmapData"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 0; } 30 | virtual inline int ExactNumTopBlobs() const { return -1; } //could be three if considering label 31 | 32 | protected: 33 | shared_ptr prefetch_rng_; 34 | virtual void ShuffleImages(); 35 | virtual void load_batch(LabelmapBatch* batch); 36 | 37 | vector > lines_; 38 | int lines_id_; 39 | bool normalize_; 40 | }; 41 | 42 | } 43 | 44 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/image_seg_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | 16 | namespace caffe { 17 | 18 | template 19 | class ImageSegDataLayer : public ImageDimPrefetchingDataLayer { 20 | public: 21 | explicit ImageSegDataLayer(const LayerParameter& param) 22 | : ImageDimPrefetchingDataLayer(param) {} 23 | virtual ~ImageSegDataLayer(); 24 | virtual void DataLayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | 27 | virtual inline const char* type() const { return "ImageSegData"; } 28 | virtual inline int ExactNumBottomBlobs() const { return 0; } 29 | virtual inline int ExactNumTopBlobs() const { return 2; } 30 | virtual inline bool AutoTopBlobs() const { return true; } 31 | 32 | protected: 33 | virtual void ShuffleImages(); 34 | virtual void load_batch(Batch* batch); 35 | 36 | Blob transformed_label_; 37 | shared_ptr prefetch_rng_; 38 | std::vector > lines_; 39 | int lines_id_; 40 | }; 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/image_superpixelmap_data_layer.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef CAFFE_IMAGE_LABELMAP_DATA_LAYER_HPP_ 3 | #define CAFFE_IMAGE_LABELMAP_DATA_LAYER_HPP_ 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include "caffe/blob.hpp" 10 | #include "caffe/data_transformer.hpp" 11 | #include "caffe/internal_thread.hpp" 12 | #include "caffe/layer.hpp" 13 | #include "caffe/layers/base_data_layer.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | 16 | namespace caffe { 17 | struct data_pair 18 | { 19 | string source_filename; 20 | string groundtruth_filename; 21 | string superpixel_filename; 22 | }; 23 | 24 | template 25 | class ImageSuperpixelmapDataLayer : public BasePrefetchingSuperpixelmapDataLayer { 26 | public: 27 | explicit ImageSuperpixelmapDataLayer(const LayerParameter& param) 28 | : BasePrefetchingSuperpixelmapDataLayer(param) {} 29 | virtual ~ImageSuperpixelmapDataLayer(); 30 | virtual void DataLayerSetUp(const vector*>& bottom, 31 | const vector*>& top); 32 | 33 | virtual inline const char* type() const { return "ImageSuperpixelmapData"; } 34 | virtual inline int ExactNumBottomBlobs() const { return 0; } 35 | virtual inline int ExactNumTopBlobs() const { return -1; } //could be three if considering label 36 | 37 | protected: 38 | shared_ptr prefetch_rng_; 39 | virtual void ShuffleImages(); 40 | virtual void load_batch(SuperpixelmapBatch* batch); 41 | vector lines_; 42 | int lines_id_; 43 | bool normalize_; 44 | }; 45 | 46 | } 47 | 48 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 49 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/input_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INPUT_LAYER_HPP_ 2 | #define CAFFE_INPUT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Provides data to the Net by assigning tops directly. 14 | * 15 | * This data layer is a container that merely holds the data assigned to it; 16 | * forward, backward, and reshape are all no-ops. 17 | */ 18 | template 19 | class InputLayer : public Layer { 20 | public: 21 | explicit InputLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // Data layers should be shared by multiple solvers in parallel 26 | virtual inline bool ShareInParallel() const { return true; } 27 | // Data layers have no bottoms, so reshaping is trivial. 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top) {} 30 | 31 | virtual inline const char* type() const { return "Input"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int MinTopBlobs() const { return 1; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top) {} 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) {} 40 | }; 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_INPUT_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/l1_loss_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_L1_LOSS_LAYER_HPP_ 2 | #define CAFFE_L1_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | #include "caffe/blob.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | #include "caffe/layers/loss_layer.hpp" 10 | 11 | namespace caffe { 12 | template 13 | class L1LossLayer : public LossLayer { 14 | public: 15 | explicit L1LossLayer(const LayerParameter& param) 16 | : LossLayer(param), diff_() {} 17 | virtual void Reshape(const vector*>& bottom, 18 | const vector*>& top); 19 | virtual inline const char* type() const { return "L1Loss"; } 20 | 21 | virtual inline bool AllowForceBackward(const int bottom_index) const { 22 | return true; 23 | } 24 | 25 | protected: 26 | virtual void Forward_cpu(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual void Forward_gpu(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual void Backward_cpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | Blob diff_; 37 | Blob errors_; 38 | 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif //CAFFE_L1_LOSS_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/mvn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MVN_LAYER_HPP_ 2 | #define CAFFE_MVN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Normalizes the input to have 0-mean and/or unit (1) variance. 14 | * 15 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class MVNLayer : public Layer { 19 | public: 20 | explicit MVNLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | 25 | virtual inline const char* type() const { return "MVN"; } 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | 29 | protected: 30 | virtual void Forward_cpu(const vector*>& bottom, 31 | const vector*>& top); 32 | virtual void Forward_gpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | Blob mean_, variance_, temp_; 40 | 41 | /// sum_multiplier is used to carry out sum using BLAS 42 | Blob sum_multiplier_; 43 | Dtype eps_; 44 | }; 45 | 46 | } // namespace caffe 47 | 48 | #endif // CAFFE_MVN_LAYER_HPP_ 49 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/parameter_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_PARAMETER_LAYER_HPP_ 2 | #define CAFFE_PARAMETER_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | class ParameterLayer : public Layer { 12 | public: 13 | explicit ParameterLayer(const LayerParameter& param) 14 | : Layer(param) {} 15 | virtual void LayerSetUp(const vector*>& bottom, 16 | const vector*>& top) { 17 | if (this->blobs_.size() > 0) { 18 | LOG(INFO) << "Skipping parameter initialization"; 19 | } else { 20 | this->blobs_.resize(1); 21 | this->blobs_[0].reset(new Blob()); 22 | this->blobs_[0]->Reshape(this->layer_param_.parameter_param().shape()); 23 | } 24 | top[0]->Reshape(this->layer_param_.parameter_param().shape()); 25 | } 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top) { } 28 | virtual inline const char* type() const { return "Parameter"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 0; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top) { 35 | top[0]->ShareData(*(this->blobs_[0])); 36 | top[0]->ShareDiff(*(this->blobs_[0])); 37 | } 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) 40 | { } 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/rnn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNN_LAYER_HPP_ 2 | #define CAFFE_RNN_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/recurrent_layer.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | template class RecurrentLayer; 18 | 19 | /** 20 | * @brief Processes time-varying inputs using a simple recurrent neural network 21 | * (RNN). Implemented as a network unrolling the RNN computation in time. 22 | * 23 | * Given time-varying inputs @f$ x_t @f$, computes hidden state @f$ 24 | * h_t := \tanh[ W_{hh} h_{t_1} + W_{xh} x_t + b_h ] 25 | * @f$, and outputs @f$ 26 | * o_t := \tanh[ W_{ho} h_t + b_o ] 27 | * @f$. 28 | */ 29 | template 30 | class RNNLayer : public RecurrentLayer { 31 | public: 32 | explicit RNNLayer(const LayerParameter& param) 33 | : RecurrentLayer(param) {} 34 | 35 | virtual inline const char* type() const { return "RNN"; } 36 | 37 | protected: 38 | virtual void FillUnrolledNet(NetParameter* net_param) const; 39 | virtual void RecurrentInputBlobNames(vector* names) const; 40 | virtual void RecurrentOutputBlobNames(vector* names) const; 41 | virtual void RecurrentInputShapes(vector* shapes) const; 42 | virtual void OutputBlobNames(vector* names) const; 43 | }; 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_RNN_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/silence_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SILENCE_LAYER_HPP_ 2 | #define CAFFE_SILENCE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Ignores bottom blobs while producing no top blobs. (This is useful 14 | * to suppress outputs during testing.) 15 | */ 16 | template 17 | class SilenceLayer : public Layer { 18 | public: 19 | explicit SilenceLayer(const LayerParameter& param) 20 | : Layer(param) {} 21 | virtual void Reshape(const vector*>& bottom, 22 | const vector*>& top) {} 23 | 24 | virtual inline const char* type() const { return "Silence"; } 25 | virtual inline int MinBottomBlobs() const { return 1; } 26 | virtual inline int ExactNumTopBlobs() const { return 0; } 27 | 28 | protected: 29 | virtual void Forward_cpu(const vector*>& bottom, 30 | const vector*>& top) {} 31 | // We can't define Forward_gpu here, since STUB_GPU will provide 32 | // its own definition for CPU_ONLY mode. 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_SILENCE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/similarity_loss_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 2 | #define CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | #include "caffe/blob.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | #include "caffe/layers/loss_layer.hpp" 9 | #include "caffe/util/rng.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | class SimilarityLossLayer : public LossLayer { 15 | public: 16 | explicit SimilarityLossLayer(const LayerParameter& param) 17 | : LossLayer(param) {} 18 | virtual void LayerSetUp(const vector*>& bottom, 19 | const vector*>& top); 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual inline const char* type() const { return "SimilarityLoss"; } 23 | virtual inline int ExactNumBottomBlobs() const { return 2; } 24 | virtual inline int ExactNumTopBlobs() const { return 1; } 25 | 26 | protected: 27 | virtual void Forward_cpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_cpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | Blob transpose_bottom; 32 | int numpixel; 33 | int height_; 34 | int width_; 35 | int num_per_region_; 36 | int numfeat_; 37 | int counter_pos_, counter_neg_; 38 | vector > > all_labels; 39 | shared_ptr rng_; 40 | }; 41 | 42 | } // namespace caffe 43 | #endif // CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/similarity_loss_layer.hpp~: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 2 | #define CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 3 | 4 | #include 5 | #include "caffe/blob.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | #include "caffe/layers/loss_layer.hpp" 9 | #include "caffe/util/rng.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | class SimilarityLossLayer : public LossLayer { 15 | public: 16 | explicit SimilarityLossLayer(const LayerParameter& param) 17 | : LossLayer(param) {} 18 | virtual void LayerSetUp(const vector*>& bottom, 19 | const vector*>& top); 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual inline const char* type() const { return "SimilarityLoss"; } 23 | virtual inline int ExactNumBottomBlobs() const { return 3; } 24 | virtual inline int ExactNumTopBlobs() const { return 1; } 25 | 26 | protected: 27 | virtual void Forward_cpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_cpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | Blob transpose_bottom; 32 | int numpixel; 33 | int height_; 34 | int width_; 35 | int num_per_region_; 36 | int numfeat_; 37 | int counter_pos_, counter_neg_; 38 | vector > > all_labels; 39 | shared_ptr rng_; 40 | }; 41 | 42 | } // namespace caffe 43 | #endif // CAFFE_SIMILARITY_LOSS_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/split_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SPLIT_LAYER_HPP_ 2 | #define CAFFE_SPLIT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Creates a "split" path in the network by copying the bottom Blob 14 | * into multiple top Blob%s to be used by multiple consuming layers. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class SplitLayer : public Layer { 20 | public: 21 | explicit SplitLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline const char* type() const { return "Split"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 1; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | 30 | protected: 31 | virtual void Forward_cpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | 40 | int count_; 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_SPLIT_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/tile_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_TILE_LAYER_HPP_ 2 | #define CAFFE_TILE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Copy a Blob along specified dimensions. 14 | */ 15 | template 16 | class TileLayer : public Layer { 17 | public: 18 | explicit TileLayer(const LayerParameter& param) 19 | : Layer(param) {} 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | 23 | virtual inline const char* type() const { return "Tile"; } 24 | virtual inline int ExactNumBottomBlobs() const { return 1; } 25 | virtual inline int ExactNumTopBlobs() const { return 1; } 26 | 27 | protected: 28 | virtual void Forward_cpu(const vector*>& bottom, 29 | const vector*>& top); 30 | virtual void Forward_gpu(const vector*>& bottom, 31 | const vector*>& top); 32 | 33 | virtual void Backward_cpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | virtual void Backward_gpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | 38 | unsigned int axis_, tiles_, outer_dim_, inner_dim_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_TILE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/layers/weak_seg_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | 16 | namespace caffe { 17 | 18 | template 19 | class WeakSegDataLayer : public WeakSegPrefetchingDataLayer { 20 | public: 21 | explicit WeakSegDataLayer(const LayerParameter& param) 22 | : WeakSegPrefetchingDataLayer(param) {} 23 | virtual ~WeakSegDataLayer(); 24 | virtual void DataLayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | 27 | virtual inline const char* type() const { return "WeakSegData"; } 28 | virtual inline int ExactNumBottomBlobs() const { return 0; } 29 | virtual inline int ExactNumTopBlobs() const { return 2; } 30 | virtual inline bool AutoTopBlobs() const { return true; } 31 | 32 | protected: 33 | virtual void ShuffleImages(); 34 | virtual void load_batch(WeakSegBatch* batch); 35 | 36 | Blob transformed_label_; 37 | shared_ptr prefetch_rng_; 38 | vector > lines_; 39 | int lines_id_; 40 | }; 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_IMAGE_SEG_DATA_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /caffe/lib/add_weight.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | import cv2 4 | 5 | class addWeight(caffe.Layer): 6 | """ 7 | bottom[0]: feature map 8 | bottom[1]: weight map 9 | """ 10 | def setup(self, bottom, top): 11 | if len(bottom) != 2: 12 | raise Exception("The number of bottom muse be two") 13 | 14 | if len(top) != 1: 15 | raise Exception("The number of top must be one") 16 | 17 | def reshape(self, bottom, top): 18 | top[0].reshape(*bottom[0].shape) 19 | 20 | def forward(self, bottom, top): 21 | 22 | self.bottom_1 = bottom[1].data 23 | self.bottom_0 = bottom[0].data 24 | 25 | 26 | top[0].data[...] = bottom[1].data * bottom[0].data 27 | 28 | def backward(self, top, propagate_down, bottom): 29 | tmp = np.ones(bottom[0].data.shape) 30 | bottom[0].diff[...] = tmp * self.bottom_1 * top[0].diff 31 | bottom[1].diff[...] = (top[0].diff * self.bottom_0).sum(axis=1)[np.newaxis, ...] 32 | -------------------------------------------------------------------------------- /caffe/lib/add_weight.py~: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | import cv2 4 | 5 | class addWeight(caffe.Layer): 6 | """ 7 | bottom[0]: feature map 8 | bottom[1]: weight map 9 | """ 10 | def setup(self, bottom, top): 11 | if len(bottom) != 2: 12 | raise Exception("The number of bottom muse be two") 13 | 14 | if len(top) != 1: 15 | raise Exception("The number of top must be one") 16 | 17 | def reshape(self, bottom, top): 18 | top[0].reshape(*bottom[0].shape) 19 | 20 | def forward(self, bottom, top): 21 | 22 | self.bottom_1 = bottom[1].data 23 | self.bottom_0 = bottom[0].data 24 | 25 | 26 | top[0].data[...] = bottom[1].data * bottom[0].data 27 | 28 | def backward(self, top, propagate_down, bottom): 29 | tmp = np.ones(bottom[0].data.shape) 30 | bottom[0].diff[...] = tmp * self.bottom_1 * top[0].diff 31 | bottom[1].diff[...] = (top[0].diff * self.bottom_0).sum(axis=1)[np.newaxis, ...] 32 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/+test/test_io.m: -------------------------------------------------------------------------------- 1 | classdef test_io < matlab.unittest.TestCase 2 | methods (Test) 3 | function test_read_write_mean(self) 4 | % randomly generate mean data 5 | width = 200; 6 | height = 300; 7 | channels = 3; 8 | mean_data_write = 255 * rand(width, height, channels, 'single'); 9 | % write mean data to binary proto 10 | mean_proto_file = tempname(); 11 | caffe.io.write_mean(mean_data_write, mean_proto_file); 12 | % read mean data from saved binary proto and test whether they are equal 13 | mean_data_read = caffe.io.read_mean(mean_proto_file); 14 | self.verifyEqual(mean_data_write, mean_data_read) 15 | delete(mean_proto_file); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/+test/test_solver.m: -------------------------------------------------------------------------------- 1 | classdef test_solver < matlab.unittest.TestCase 2 | 3 | properties 4 | num_output 5 | solver 6 | end 7 | 8 | methods 9 | function self = test_solver() 10 | self.num_output = 13; 11 | model_file = caffe.test.test_net.simple_net_file(self.num_output); 12 | solver_file = tempname(); 13 | 14 | fid = fopen(solver_file, 'w'); 15 | fprintf(fid, [ ... 16 | 'net: "' model_file '"\n' ... 17 | 'test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n' ... 18 | 'weight_decay: 0.0005 lr_policy: "inv" gamma: 0.0001 power: 0.75\n' ... 19 | 'display: 100 max_iter: 100 snapshot_after_train: false\n' ]); 20 | fclose(fid); 21 | 22 | self.solver = caffe.Solver(solver_file); 23 | % also make sure get_solver runs 24 | caffe.get_solver(solver_file); 25 | caffe.set_mode_cpu(); 26 | % fill in valid labels 27 | self.solver.net.blobs('label').set_data(randi( ... 28 | self.num_output - 1, self.solver.net.blobs('label').shape)); 29 | self.solver.test_nets(1).blobs('label').set_data(randi( ... 30 | self.num_output - 1, self.solver.test_nets(1).blobs('label').shape)); 31 | 32 | delete(solver_file); 33 | delete(model_file); 34 | end 35 | end 36 | methods (Test) 37 | function test_solve(self) 38 | self.verifyEqual(self.solver.iter(), 0) 39 | self.solver.step(30); 40 | self.verifyEqual(self.solver.iter(), 30) 41 | self.solver.solve() 42 | self.verifyEqual(self.solver.iter(), 100) 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/Layer.m: -------------------------------------------------------------------------------- 1 | classdef Layer < handle 2 | % Wrapper class of caffe::Layer in matlab 3 | 4 | properties (Access = private) 5 | hLayer_self 6 | attributes 7 | % attributes fields: 8 | % hBlob_blobs 9 | end 10 | properties (SetAccess = private) 11 | params 12 | end 13 | 14 | methods 15 | function self = Layer(hLayer_layer) 16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle'); 17 | 18 | % setup self handle and attributes 19 | self.hLayer_self = hLayer_layer; 20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self); 21 | 22 | % setup weights 23 | self.params = caffe.Blob.empty(); 24 | for n = 1:length(self.attributes.hBlob_blobs) 25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n)); 26 | end 27 | end 28 | function layer_type = type(self) 29 | layer_type = caffe_('layer_get_type', self.hLayer_self); 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/get_net.m: -------------------------------------------------------------------------------- 1 | function net = get_net(varargin) 2 | % net = get_net(model_file, phase_name) or 3 | % net = get_net(model_file, weights_file, phase_name) 4 | % Construct a net from model_file, and load weights from weights_file 5 | % phase_name can only be 'train' or 'test' 6 | 7 | CHECK(nargin == 2 || nargin == 3, ['usage: ' ... 8 | 'net = get_net(model_file, phase_name) or ' ... 9 | 'net = get_net(model_file, weights_file, phase_name)']); 10 | if nargin == 3 11 | model_file = varargin{1}; 12 | weights_file = varargin{2}; 13 | phase_name = varargin{3}; 14 | elseif nargin == 2 15 | model_file = varargin{1}; 16 | phase_name = varargin{2}; 17 | end 18 | 19 | CHECK(ischar(model_file), 'model_file must be a string'); 20 | CHECK(ischar(phase_name), 'phase_name must be a string'); 21 | CHECK_FILE_EXIST(model_file); 22 | CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ... 23 | sprintf('phase_name can only be %strain%s or %stest%s', ... 24 | char(39), char(39), char(39), char(39))); 25 | 26 | % construct caffe net from model_file 27 | hNet = caffe_('get_net', model_file, phase_name); 28 | net = caffe.Net(hNet); 29 | 30 | % load weights from weights_file 31 | if nargin == 3 32 | CHECK(ischar(weights_file), 'weights_file must be a string'); 33 | CHECK_FILE_EXIST(weights_file); 34 | net.copy_from(weights_file); 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/get_solver.m: -------------------------------------------------------------------------------- 1 | function solver = get_solver(solver_file) 2 | % solver = get_solver(solver_file) 3 | % Construct a Solver object from solver_file 4 | 5 | CHECK(ischar(solver_file), 'solver_file must be a string'); 6 | CHECK_FILE_EXIST(solver_file); 7 | pSolver = caffe_('get_solver', solver_file); 8 | solver = caffe.Solver(pSolver); 9 | 10 | end 11 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /caffe/matlab/+caffe/private/CHECK.m: -------------------------------------------------------------------------------- 1 | function CHECK(expr, error_msg) 2 | 3 | if ~expr 4 | error(error_msg); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/private/CHECK_FILE_EXIST.m: -------------------------------------------------------------------------------- 1 | function CHECK_FILE_EXIST(filename) 2 | 3 | if exist(filename, 'file') == 0 4 | error('%s does not exist', filename); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/private/caffe_.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/matlab/+caffe/private/caffe_.mexa64 -------------------------------------------------------------------------------- /caffe/matlab/+caffe/private/is_valid_handle.m: -------------------------------------------------------------------------------- 1 | function valid = is_valid_handle(hObj) 2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') 3 | % Check if a handle is valid (has the right data type and init_key matches) 4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++; 5 | 6 | % a handle is a struct array with the following fields 7 | % (uint64) ptr : the pointer to the C++ object 8 | % (double) init_key : caffe initialization key 9 | 10 | persistent init_key; 11 | if isempty(init_key) 12 | init_key = caffe_('get_init_key'); 13 | end 14 | 15 | % is_valid_handle('get_new_init_key') to get new init_key from C++; 16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key') 17 | init_key = caffe_('get_init_key'); 18 | return 19 | else 20 | % check whether data types are correct and init_key matches 21 | valid = isstruct(hObj) ... 22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ... 23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ... 24 | && hObj.init_key == init_key; 25 | end 26 | 27 | end 28 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/reset_all.m: -------------------------------------------------------------------------------- 1 | function reset_all() 2 | % reset_all() 3 | % clear all solvers and stand-alone nets and reset Caffe to initial status 4 | 5 | caffe_('reset'); 6 | is_valid_handle('get_new_init_key'); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/run_tests.m: -------------------------------------------------------------------------------- 1 | function results = run_tests() 2 | % results = run_tests() 3 | % run all tests in this caffe matlab wrapper package 4 | 5 | % use CPU for testing 6 | caffe.set_mode_cpu(); 7 | 8 | % reset caffe before testing 9 | caffe.reset_all(); 10 | 11 | % put all test cases here 12 | results = [... 13 | run(caffe.test.test_net) ... 14 | run(caffe.test.test_solver) ... 15 | run(caffe.test.test_io) ]; 16 | 17 | % reset caffe after testing 18 | caffe.reset_all(); 19 | 20 | end 21 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/set_device.m: -------------------------------------------------------------------------------- 1 | function set_device(device_id) 2 | % set_device(device_id) 3 | % set Caffe's GPU device ID 4 | 5 | CHECK(isscalar(device_id) && device_id >= 0, ... 6 | 'device_id must be non-negative integer'); 7 | device_id = double(device_id); 8 | 9 | caffe_('set_device', device_id); 10 | 11 | end 12 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/set_mode_cpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_cpu() 2 | % set_mode_cpu() 3 | % set Caffe to CPU mode 4 | 5 | caffe_('set_mode_cpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/set_mode_gpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_gpu() 2 | % set_mode_gpu() 3 | % set Caffe to GPU mode 4 | 5 | caffe_('set_mode_gpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe/matlab/+caffe/version.m: -------------------------------------------------------------------------------- 1 | function version_str = version() 2 | % version() 3 | % show Caffe's version. 4 | 5 | version_str = caffe_('version'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /caffe/matlab/hdf5creation/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | list.txt 3 | -------------------------------------------------------------------------------- /caffe/python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(NOT HAVE_PYTHON) 2 | message(STATUS "Python interface is disabled or not all required dependencies found. Building without it...") 3 | return() 4 | endif() 5 | 6 | include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) 7 | file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) 8 | 9 | add_library(pycaffe SHARED ${python_srcs}) 10 | target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) 11 | set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") 12 | caffe_default_properties(pycaffe) 13 | 14 | if(UNIX OR APPLE) 15 | set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") 16 | add_custom_command(TARGET pycaffe POST_BUILD 17 | COMMAND ln -sf $ "${__linkname}" 18 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/python/caffe/proto 19 | COMMAND touch ${PROJECT_SOURCE_DIR}/python/caffe/proto/__init__.py 20 | COMMAND cp ${proto_gen_folder}/*.py ${PROJECT_SOURCE_DIR}/python/caffe/proto/ 21 | COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/_caffe${Caffe_POSTFIX}.so") 22 | endif() 23 | 24 | # ---[ Install 25 | # scripts 26 | file(GLOB python_files *.py requirements.txt) 27 | install(FILES ${python_files} DESTINATION python) 28 | 29 | # module 30 | install(DIRECTORY caffe 31 | DESTINATION python 32 | FILES_MATCHING 33 | PATTERN "*.py" 34 | PATTERN "ilsvrc_2012_mean.npy" 35 | PATTERN "test" EXCLUDE 36 | ) 37 | 38 | # _caffe.so 39 | install(TARGETS pycaffe DESTINATION python/caffe) 40 | 41 | -------------------------------------------------------------------------------- /caffe/python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver 2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed 3 | from ._caffe import __version__ 4 | from .proto.caffe_pb2 import TRAIN, TEST 5 | from .classifier import Classifier 6 | from .detector import Detector 7 | from . import io 8 | from .net_spec import layers, params, NetSpec, to_proto 9 | -------------------------------------------------------------------------------- /caffe/python/caffe/_caffe.so: -------------------------------------------------------------------------------- 1 | /media/ubuntu/disk/Project/ContrastPrior/caffe/build/lib/_caffe.so -------------------------------------------------------------------------------- /caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /caffe/python/caffe/proto/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/python/caffe/proto/__init__.py -------------------------------------------------------------------------------- /caffe/python/caffe/test/test_layer_type_list.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import caffe 4 | 5 | class TestLayerTypeList(unittest.TestCase): 6 | 7 | def test_standard_types(self): 8 | #removing 'Data' from list 9 | for type_name in ['Data', 'Convolution', 'InnerProduct']: 10 | self.assertIn(type_name, caffe.layer_type_list(), 11 | '%s not in layer_type_list()' % type_name) 12 | -------------------------------------------------------------------------------- /caffe/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | matplotlib>=1.3.1 6 | ipython>=3.0.0 7 | h5py>=2.2.0 8 | leveldb>=0.191 9 | networkx>=1.8.1 10 | nose>=1.3.0 11 | pandas>=0.12.0 12 | python-dateutil>=1.4,<2 13 | protobuf>=2.5.0 14 | python-gflags>=2.0 15 | pyyaml>=3.10 16 | Pillow>=2.3.0 17 | six>=1.1.0 -------------------------------------------------------------------------------- /caffe/scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build_docs.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Generate developer docs. 16 | make docs 17 | 18 | # Display docs using web server. 19 | cd docs 20 | jekyll serve -w -s . -d _site --port=$PORT 21 | -------------------------------------------------------------------------------- /caffe/scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /caffe/scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip 22 | unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR 23 | rm $MODEL_DIR/gist.zip 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /caffe/scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /caffe/scripts/travis/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # build the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if ! $WITH_CMAKE ; then 8 | make --jobs $NUM_THREADS all test pycaffe warn 9 | else 10 | cd build 11 | make --jobs $NUM_THREADS all test.testbin 12 | fi 13 | make lint 14 | -------------------------------------------------------------------------------- /caffe/scripts/travis/configure-cmake.sh: -------------------------------------------------------------------------------- 1 | # CMake configuration 2 | 3 | mkdir -p build 4 | cd build 5 | 6 | ARGS="-DCMAKE_BUILD_TYPE=Release -DBLAS=Open" 7 | 8 | if $WITH_PYTHON3 ; then 9 | ARGS="$ARGS -Dpython_version=3" 10 | fi 11 | 12 | if $WITH_IO ; then 13 | ARGS="$ARGS -DUSE_OPENCV=On -DUSE_LMDB=On -DUSE_LEVELDB=On" 14 | else 15 | ARGS="$ARGS -DUSE_OPENCV=Off -DUSE_LMDB=Off -DUSE_LEVELDB=Off" 16 | fi 17 | 18 | if $WITH_CUDA ; then 19 | # Only build SM50 20 | ARGS="$ARGS -DCPU_ONLY=Off -DCUDA_ARCH_NAME=Manual -DCUDA_ARCH_BIN=\"50\" -DCUDA_ARCH_PTX=\"\"" 21 | else 22 | ARGS="$ARGS -DCPU_ONLY=On" 23 | fi 24 | 25 | if $WITH_CUDNN ; then 26 | ARGS="$ARGS -DUSE_CUDNN=On" 27 | else 28 | ARGS="$ARGS -DUSE_CUDNN=Off" 29 | fi 30 | 31 | cmake .. $ARGS 32 | 33 | -------------------------------------------------------------------------------- /caffe/scripts/travis/configure-make.sh: -------------------------------------------------------------------------------- 1 | # raw Makefile configuration 2 | 3 | LINE () { 4 | echo "$@" >> Makefile.config 5 | } 6 | 7 | cp Makefile.config.example Makefile.config 8 | 9 | LINE "BLAS := open" 10 | LINE "WITH_PYTHON_LAYER := 1" 11 | 12 | if $WITH_PYTHON3 ; then 13 | # TODO(lukeyeager) this path is currently disabled because of test errors like: 14 | # ImportError: dynamic module does not define init function (PyInit__caffe) 15 | LINE "PYTHON_LIBRARIES := python3.4m boost_python-py34" 16 | LINE "PYTHON_INCLUDE := /usr/include/python3.4 /usr/lib/python3/dist-packages/numpy/core/include" 17 | LINE "INCLUDE_DIRS := \$(INCLUDE_DIRS) \$(PYTHON_INCLUDE)" 18 | fi 19 | 20 | if ! $WITH_IO ; then 21 | LINE "USE_OPENCV := 0" 22 | LINE "USE_LEVELDB := 0" 23 | LINE "USE_LMDB := 0" 24 | fi 25 | 26 | if $WITH_CUDA ; then 27 | # Only build SM50 28 | LINE "CUDA_ARCH := -gencode arch=compute_50,code=sm_50" 29 | else 30 | LINE "CPU_ONLY := 1" 31 | fi 32 | 33 | if $WITH_CUDNN ; then 34 | LINE "USE_CUDNN := 1" 35 | fi 36 | 37 | -------------------------------------------------------------------------------- /caffe/scripts/travis/configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # configure the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if ! $WITH_CMAKE ; then 8 | source $BASEDIR/configure-make.sh 9 | else 10 | source $BASEDIR/configure-cmake.sh 11 | fi 12 | -------------------------------------------------------------------------------- /caffe/scripts/travis/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set default environment variables 3 | 4 | set -e 5 | 6 | WITH_CMAKE=${WITH_CMAKE:-false} 7 | WITH_PYTHON3=${WITH_PYTHON3:-false} 8 | WITH_IO=${WITH_IO:-true} 9 | WITH_CUDA=${WITH_CUDA:-false} 10 | WITH_CUDNN=${WITH_CUDNN:-false} 11 | -------------------------------------------------------------------------------- /caffe/scripts/travis/install-python-deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # install extra Python dependencies 3 | # (must come after setup-venv) 4 | 5 | BASEDIR=$(dirname $0) 6 | source $BASEDIR/defaults.sh 7 | 8 | if ! $WITH_PYTHON3 ; then 9 | # Python2 10 | : 11 | else 12 | # Python3 13 | pip install --pre protobuf==3.0.0b3 14 | fi 15 | -------------------------------------------------------------------------------- /caffe/scripts/travis/setup-venv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # setup a Python virtualenv 3 | # (must come after install-deps) 4 | 5 | BASEDIR=$(dirname $0) 6 | source $BASEDIR/defaults.sh 7 | 8 | VENV_DIR=${1:-~/venv} 9 | 10 | # setup our own virtualenv 11 | if $WITH_PYTHON3; then 12 | PYTHON_EXE='/usr/bin/python3' 13 | else 14 | PYTHON_EXE='/usr/bin/python2' 15 | fi 16 | 17 | # use --system-site-packages so that Python will use deb packages 18 | virtualenv $VENV_DIR -p $PYTHON_EXE --system-site-packages 19 | -------------------------------------------------------------------------------- /caffe/scripts/travis/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # test the project 3 | 4 | BASEDIR=$(dirname $0) 5 | source $BASEDIR/defaults.sh 6 | 7 | if $WITH_CUDA ; then 8 | echo "Skipping tests for CUDA build" 9 | exit 0 10 | fi 11 | 12 | if ! $WITH_CMAKE ; then 13 | make runtest 14 | make pytest 15 | else 16 | cd build 17 | make runtest 18 | make pytest 19 | fi 20 | -------------------------------------------------------------------------------- /caffe/scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /caffe/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "./train_joint10_2fuse_residual.prototxt" 2 | # lr for fine-tuning should be lower than when starting from scratch 3 | #debug_info: true 4 | base_lr: 1e-7 5 | lr_policy: "step" 6 | #power: 0.9 7 | gamma: 0.1 8 | iter_size: 10 9 | # stepsize should also be lower, as we're closer to being done 10 | stepsize: 7000 11 | average_loss: 20 12 | display: 10 13 | max_iter: 10000 14 | momentum: 0.90 15 | weight_decay: 0.0005 16 | snapshot: 5000 17 | snapshot_prefix: "/opt/snapshot/depth_saliency/rfs/multiply/model_2fuse_residual_again_2/" 18 | # uncomment the following to default to CPU mode solving 19 | # solver_mode: CPU 20 | -------------------------------------------------------------------------------- /caffe/src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | set_target_properties(caffe PROPERTIES 24 | VERSION ${CAFFE_TARGET_VERSION} 25 | SOVERSION ${CAFFE_TARGET_SOVERSION} 26 | ) 27 | 28 | # ---[ Tests 29 | add_subdirectory(test) 30 | 31 | # ---[ Install 32 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 33 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 34 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 35 | 36 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 37 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 38 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 39 | 40 | 41 | -------------------------------------------------------------------------------- /caffe/src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void Layer::InitMutex() { 8 | forward_mutex_.reset(new boost::mutex()); 9 | } 10 | 11 | template 12 | void Layer::Lock() { 13 | if (IsShared()) { 14 | forward_mutex_->lock(); 15 | } 16 | } 17 | 18 | template 19 | void Layer::Unlock() { 20 | if (IsShared()) { 21 | forward_mutex_->unlock(); 22 | } 23 | } 24 | 25 | INSTANTIATE_CLASS(Layer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/.scale_layer.cpp.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/src/caffe/layers/.scale_layer.cpp.swp -------------------------------------------------------------------------------- /caffe/src/caffe/layers/.~lock.base_conv_layer.cpp#: -------------------------------------------------------------------------------- 1 | ,star,star,05.04.2018 13:51,file:///home/star/.config/libreoffice/4; -------------------------------------------------------------------------------- /caffe/src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | NeuronLayer::LayerSetUp(bottom, top); 12 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 13 | "allow in-place computation."; 14 | } 15 | 16 | template 17 | void AbsValLayer::Forward_cpu( 18 | const vector*>& bottom, const vector*>& top) { 19 | const int count = top[0]->count(); 20 | Dtype* top_data = top[0]->mutable_cpu_data(); 21 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 22 | } 23 | 24 | template 25 | void AbsValLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const int count = top[0]->count(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | if (propagate_down[0]) { 30 | const Dtype* bottom_data = bottom[0]->cpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | caffe_cpu_sign(count, bottom_data, bottom_diff); 33 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 34 | } 35 | } 36 | 37 | #ifdef CPU_ONLY 38 | STUB_GPU(AbsValLayer); 39 | #endif 40 | 41 | INSTANTIATE_CLASS(AbsValLayer); 42 | REGISTER_LAYER_CLASS(AbsVal); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/bnll_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | const float kBNLL_THRESHOLD = 50.; 9 | 10 | template 11 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = bottom_data[i] > 0 ? 18 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 19 | log(1. + exp(bottom_data[i])); 20 | } 21 | } 22 | 23 | template 24 | void BNLLLayer::Backward_cpu(const vector*>& top, 25 | const vector& propagate_down, 26 | const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* bottom_data = bottom[0]->cpu_data(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 31 | const int count = bottom[0]->count(); 32 | Dtype expval; 33 | for (int i = 0; i < count; ++i) { 34 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 35 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(BNLLLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(BNLLLayer); 45 | REGISTER_LAYER_CLASS(BNLL); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_lcn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lcn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLCNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnDivisiveNormalizationForward( 15 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | NULL, // srcMeansData 19 | this->tempData1, this->tempData2, 20 | cudnn::dataType::zero, 21 | top_desc_, top_data) ); 22 | } 23 | 24 | template 25 | void CuDNNLCNLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const Dtype* top_diff = top[0]->gpu_diff(); 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnDivisiveNormalizationBackward( 33 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 34 | cudnn::dataType::one, 35 | bottom_desc_, bottom_data, 36 | NULL, top_diff, // NULL - srcMeansData 37 | this->tempData1, this->tempData2, 38 | cudnn::dataType::zero, 39 | bottom_desc_, bottom_diff, 40 | NULL) ); 41 | } 42 | 43 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLCNLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_lrn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnLRNCrossChannelForward( 15 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | cudnn::dataType::zero, 19 | top_desc_, top_data) ); 20 | } 21 | 22 | template 23 | void CuDNNLRNLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, const vector*>& bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | const Dtype* top_data = top[0]->gpu_data(); 27 | const Dtype* bottom_data = bottom[0]->gpu_data(); 28 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 29 | 30 | CUDNN_CHECK(cudnnLRNCrossChannelBackward( 31 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 32 | cudnn::dataType::one, 33 | top_desc_, top_data, 34 | top_desc_, top_diff, 35 | bottom_desc_, bottom_data, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff) ); 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLRNLayer); 41 | 42 | }; // namespace caffe 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | PoolingLayer::LayerSetUp(bottom, top); 12 | CUDNN_CHECK(cudnnCreate(&handle_)); 13 | cudnn::createTensor4dDesc(&bottom_desc_); 14 | cudnn::createTensor4dDesc(&top_desc_); 15 | cudnn::createPoolingDesc(&pooling_desc_, 16 | this->layer_param_.pooling_param().pool(), &mode_, 17 | this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, 18 | this->stride_h_, this->stride_w_); 19 | handles_setup_ = true; 20 | } 21 | 22 | template 23 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom, 24 | const vector*>& top) { 25 | PoolingLayer::Reshape(bottom, top); 26 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 27 | this->channels_, this->height_, this->width_); 28 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 29 | this->channels_, this->pooled_height_, this->pooled_width_); 30 | } 31 | 32 | template 33 | CuDNNPoolingLayer::~CuDNNPoolingLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(bottom_desc_); 38 | cudnnDestroyTensorDescriptor(top_desc_); 39 | cudnnDestroyPoolingDescriptor(pooling_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 14 | cudnn::dataType::one, 15 | bottom_desc_, bottom_data, 16 | cudnn::dataType::zero, 17 | top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, const vector*>& bottom) { 23 | if (!propagate_down[0]) { 24 | return; 25 | } 26 | const Dtype* top_diff = top[0]->gpu_diff(); 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* bottom_data = bottom[0]->gpu_data(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 31 | cudnn::dataType::one, 32 | top_desc_, top_data, top_desc_, top_diff, 33 | bottom_desc_, bottom_data, 34 | cudnn::dataType::zero, 35 | bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | ReLULayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_RELU); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | ReLULayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNReLULayer::~CuDNNReLULayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroyActivationDescriptor(this->activ_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNReLULayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | SigmoidLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, 17 | CUDNN_ACTIVATION_SIGMOID); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SigmoidLayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensorDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | SoftmaxLayer::LayerSetUp(bottom, top); 14 | // Initialize CUDNN. 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SoftmaxLayer::Reshape(bottom, top); 25 | int N = this->outer_num_; 26 | int K = bottom[0]->shape(this->softmax_axis_); 27 | int H = this->inner_num_; 28 | int W = 1; 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(bottom_desc_); 39 | cudnnDestroyTensorDescriptor(top_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_softmax_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, 16 | CUDNN_SOFTMAX_MODE_CHANNEL, 17 | cudnn::dataType::one, 18 | bottom_desc_, bottom_data, 19 | cudnn::dataType::zero, 20 | top_desc_, top_data)); 21 | } 22 | 23 | template 24 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | if (propagate_down[0]) { 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* top_diff = top[0]->gpu_diff(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, 33 | CUDNN_SOFTMAX_MODE_CHANNEL, 34 | cudnn::dataType::one, 35 | top_desc_, top_data, top_desc_, top_diff, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff)); 38 | } 39 | } 40 | 41 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSoftmaxLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_tanh_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | TanHLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_TANH); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | TanHLayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNTanHLayer::~CuDNNTanHLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroy(this->handle_); 40 | } 41 | 42 | INSTANTIATE_CLASS(CuDNNTanHLayer); 43 | 44 | } // namespace caffe 45 | #endif 46 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/elu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/elu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ELULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype alpha = this->layer_param_.elu_param().alpha(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + alpha * (exp(std::min(bottom_data[i], Dtype(0))) - Dtype(1)); 18 | } 19 | } 20 | 21 | template 22 | void ELULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_data = top[0]->cpu_data(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 30 | const int count = bottom[0]->count(); 31 | Dtype alpha = this->layer_param_.elu_param().alpha(); 32 | for (int i = 0; i < count; ++i) { 33 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 34 | + (alpha + top_data[i]) * (bottom_data[i] <= 0)); 35 | } 36 | } 37 | } 38 | 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(ELULayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(ELULayer); 45 | REGISTER_LAYER_CLASS(ELU); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/euclidean_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/euclidean_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | int count = bottom[0]->count(); 12 | caffe_gpu_sub( 13 | count, 14 | bottom[0]->gpu_data(), 15 | bottom[1]->gpu_data(), 16 | diff_.mutable_gpu_data()); 17 | Dtype dot; 18 | caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); 19 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 20 | top[0]->mutable_cpu_data()[0] = loss; 21 | } 22 | 23 | template 24 | void EuclideanLossLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | for (int i = 0; i < 2; ++i) { 27 | if (propagate_down[i]) { 28 | const Dtype sign = (i == 0) ? 1 : -1; 29 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 30 | caffe_gpu_axpby( 31 | bottom[i]->count(), // count 32 | alpha, // alpha 33 | diff_.gpu_data(), // a 34 | Dtype(0), // beta 35 | bottom[i]->mutable_gpu_diff()); // b 36 | } 37 | } 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer); 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/exp_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ExpLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | const Dtype* bottom_data = bottom[0]->gpu_data(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | if (inner_scale_ == Dtype(1)) { 15 | caffe_gpu_exp(count, bottom_data, top_data); 16 | } else { 17 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 18 | caffe_gpu_exp(count, top_data, top_data); 19 | } 20 | if (outer_scale_ != Dtype(1)) { 21 | caffe_gpu_scal(count, outer_scale_, top_data); 22 | } 23 | } 24 | 25 | template 26 | void ExpLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | if (!propagate_down[0]) { return; } 29 | const int count = bottom[0]->count(); 30 | const Dtype* top_data = top[0]->gpu_data(); 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 33 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 34 | if (inner_scale_ != Dtype(1)) { 35 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 36 | } 37 | } 38 | 39 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 40 | 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/flatten_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void FlattenLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 11 | "allow in-place computation."; 12 | const int start_axis = bottom[0]->CanonicalAxisIndex( 13 | this->layer_param_.flatten_param().axis()); 14 | const int end_axis = bottom[0]->CanonicalAxisIndex( 15 | this->layer_param_.flatten_param().end_axis()); 16 | vector top_shape; 17 | for (int i = 0; i < start_axis; ++i) { 18 | top_shape.push_back(bottom[0]->shape(i)); 19 | } 20 | const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); 21 | top_shape.push_back(flattened_dim); 22 | for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { 23 | top_shape.push_back(bottom[0]->shape(i)); 24 | } 25 | top[0]->Reshape(top_shape); 26 | CHECK_EQ(top[0]->count(), bottom[0]->count()); 27 | } 28 | 29 | template 30 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 31 | const vector*>& top) { 32 | top[0]->ShareData(*bottom[0]); 33 | } 34 | 35 | template 36 | void FlattenLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom) { 38 | bottom[0]->ShareDiff(*top[0]); 39 | } 40 | 41 | INSTANTIATE_CLASS(FlattenLayer); 42 | REGISTER_LAYER_CLASS(Flatten); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/layers/hdf5_output_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | CHECK_GE(bottom.size(), 2); 14 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 15 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 16 | bottom[0]->height(), bottom[0]->width()); 17 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 18 | bottom[1]->height(), bottom[1]->width()); 19 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 20 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 21 | 22 | for (int i = 0; i < bottom[0]->num(); ++i) { 23 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 24 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 25 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 26 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 27 | } 28 | SaveBlobs(); 29 | } 30 | 31 | template 32 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | return; 35 | } 36 | 37 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); 38 | 39 | } // namespace caffe 40 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/input_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/input_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void InputLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | const int num_top = top.size(); 11 | const InputParameter& param = this->layer_param_.input_param(); 12 | const int num_shape = param.shape_size(); 13 | CHECK(num_shape == 0 || num_shape == 1 || num_shape == num_top) 14 | << "Must specify 'shape' once, once per top blob, or not at all: " 15 | << num_top << " tops vs. " << num_shape << " shapes."; 16 | if (num_shape > 0) { 17 | for (int i = 0; i < num_top; ++i) { 18 | const int shape_index = (param.shape_size() == 1) ? 0 : i; 19 | top[i]->Reshape(param.shape(shape_index)); 20 | } 21 | } 22 | } 23 | 24 | INSTANTIATE_CLASS(InputLayer); 25 | REGISTER_LAYER_CLASS(Input); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/l1_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/l1_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void L1LossLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | caffe_gpu_sub( 13 | count, 14 | bottom[0]->gpu_data(), 15 | bottom[1]->gpu_data(), 16 | diff_.mutable_gpu_data()); 17 | 18 | caffe_gpu_abs(count, diff_.gpu_data(), errors_.mutable_gpu_data()); 19 | Dtype loss; 20 | caffe_gpu_asum(count, errors_.gpu_data(), &loss); 21 | top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); 22 | } 23 | 24 | template 25 | void L1LossLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const int count = bottom[0]->count(); 28 | for (int i = 0; i < 2; ++i) { 29 | if (propagate_down[i]) { 30 | const Dtype sign = (i == 0) ? 1 : -1; 31 | caffe_gpu_sign(count, diff_.gpu_data(), diff_.mutable_gpu_data()); 32 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 33 | caffe_gpu_axpby( 34 | count, // count 35 | alpha, // alpha 36 | diff_.gpu_data(), // a 37 | Dtype(0), // beta 38 | bottom[i]->mutable_gpu_diff()); // b 39 | } 40 | } 41 | } 42 | 43 | INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/loss_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void LossLayer::LayerSetUp( 9 | const vector*>& bottom, const vector*>& top) { 10 | // LossLayers have a non-zero (1) loss by default. 11 | if (this->layer_param_.loss_weight_size() == 0) { 12 | this->layer_param_.add_loss_weight(Dtype(1)); 13 | } 14 | } 15 | 16 | template 17 | void LossLayer::Reshape( 18 | const vector*>& bottom, const vector*>& top) { 19 | CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0)) 20 | << "The data and label should have the same first dimension."; 21 | vector loss_shape(0); // Loss layers output a scalar; 0 axes. 22 | top[0]->Reshape(loss_shape); 23 | } 24 | 25 | INSTANTIATE_CLASS(LossLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/parameter_layer.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/layers/parameter_layer.hpp" 2 | 3 | namespace caffe { 4 | 5 | INSTANTIATE_CLASS(ParameterLayer); 6 | REGISTER_LAYER_CLASS(Parameter); 7 | 8 | } // namespace caffe 9 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/recurrent_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/blob.hpp" 4 | #include "caffe/common.hpp" 5 | #include "caffe/filler.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/layers/recurrent_layer.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void RecurrentLayer::Forward_gpu(const vector*>& bottom, 14 | const vector*>& top) { 15 | // Hacky fix for test time... reshare all the shared blobs. 16 | // TODO: somehow make this work non-hackily. 17 | if (this->phase_ == TEST) { 18 | unrolled_net_->ShareWeights(); 19 | } 20 | 21 | DCHECK_EQ(recur_input_blobs_.size(), recur_output_blobs_.size()); 22 | if (!expose_hidden_) { 23 | for (int i = 0; i < recur_input_blobs_.size(); ++i) { 24 | const int count = recur_input_blobs_[i]->count(); 25 | DCHECK_EQ(count, recur_output_blobs_[i]->count()); 26 | const Dtype* timestep_T_data = recur_output_blobs_[i]->gpu_data(); 27 | Dtype* timestep_0_data = recur_input_blobs_[i]->mutable_gpu_data(); 28 | caffe_copy(count, timestep_T_data, timestep_0_data); 29 | } 30 | } 31 | 32 | unrolled_net_->ForwardTo(last_layer_index_); 33 | 34 | if (expose_hidden_) { 35 | const int top_offset = output_blobs_.size(); 36 | for (int i = top_offset, j = 0; i < top.size(); ++i, ++j) { 37 | top[i]->ShareData(*recur_output_blobs_[j]); 38 | } 39 | } 40 | } 41 | 42 | INSTANTIATE_LAYER_GPU_FORWARD(RecurrentLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ReLULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 18 | } 19 | } 20 | 21 | template 22 | void ReLULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 31 | for (int i = 0; i < count; ++i) { 32 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 33 | + negative_slope * (bottom_data[i] <= 0)); 34 | } 35 | } 36 | } 37 | 38 | 39 | #ifdef CPU_ONLY 40 | STUB_GPU(ReLULayer); 41 | #endif 42 | 43 | INSTANTIATE_CLASS(ReLULayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | inline Dtype sigmoid(Dtype x) { 10 | return 1. / (1. + exp(-x)); 11 | } 12 | 13 | template 14 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 15 | const vector*>& top) { 16 | const Dtype* bottom_data = bottom[0]->cpu_data(); 17 | Dtype* top_data = top[0]->mutable_cpu_data(); 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | top_data[i] = sigmoid(bottom_data[i]); 21 | } 22 | } 23 | 24 | template 25 | void SigmoidLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | for (int i = 0; i < count; ++i) { 34 | const Dtype sigmoid_x = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(SigmoidLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(SigmoidLayer); 45 | 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void TanHLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = tanh(bottom_data[i]); 18 | } 19 | } 20 | 21 | template 22 | void TanHLayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* top_data = top[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype tanhx; 31 | for (int i = 0; i < count; ++i) { 32 | tanhx = top_data[i]; 33 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 34 | } 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(TanHLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(TanHLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/adadelta_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2, 8 | Dtype momentum, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi; 12 | gi = gi * sqrt((h2[i] + delta) / (hi + delta)); 13 | h2[i] = momentum * h2[i] + (1-momentum) * gi * gi; 14 | g[i] = local_rate * gi; 15 | } 16 | } 17 | template 18 | void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum, 19 | Dtype delta, Dtype local_rate) { 20 | AdaDeltaUpdate // NOLINT_NEXT_LINE(whitespace/operators) 21 | <<>>( 22 | N, g, h, h2, momentum, delta, local_rate); 23 | CUDA_POST_KERNEL_CHECK; 24 | } 25 | template void adadelta_update_gpu(int , float*, float*, float*, 26 | float, float, float); 27 | template void adadelta_update_gpu(int, double*, double*, double*, 28 | double, double, double); 29 | 30 | } // namespace caffe 31 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/adagrad_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, 8 | Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = h[i] + gi*gi; 12 | g[i] = local_rate * gi / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, 17 | Dtype local_rate) { 18 | AdaGradUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void adagrad_update_gpu(int, float*, float*, float, float); 24 | template void adagrad_update_gpu(int, double*, double*, double, double); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/adam_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, 8 | Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float mi = m[i] = m[i]*beta1 + gi*(1-beta1); 12 | float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); 13 | g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); 14 | } 15 | } 16 | template 17 | void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, 18 | Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 19 | AdamUpdate // NOLINT_NEXT_LINE(whitespace/operators) 20 | <<>>( 21 | N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); 22 | CUDA_POST_KERNEL_CHECK; 23 | } 24 | template void adam_update_gpu(int, float*, float*, float*, 25 | float, float, float, float); 26 | template void adam_update_gpu(int, double*, double*, double*, 27 | double, double, double, double); 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/nesterov_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float hi = h[i]; 11 | float hi_new = h[i] = momentum * hi + local_rate * g[i]; 12 | g[i] = (1+momentum) * hi_new - momentum * hi; 13 | } 14 | } 15 | template 16 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 17 | Dtype local_rate) { 18 | NesterovUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, momentum, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void nesterov_update_gpu(int, float*, float*, float, float); 24 | template void nesterov_update_gpu(int, double*, double*, double, 25 | double); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/rmsprop_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype rms_decay, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi; 12 | g[i] = local_rate * g[i] / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay, 17 | Dtype delta, Dtype local_rate) { 18 | RMSPropUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, rms_decay, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void rmsprop_update_gpu(int, float*, float*, float, float, 24 | float); 25 | template void rmsprop_update_gpu(int, double*, double*, double, double, 26 | double); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe/src/caffe/solvers/sgd_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | g[i] = h[i] = momentum*h[i] + local_rate*g[i]; 11 | } 12 | } 13 | template 14 | void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 15 | Dtype local_rate) { 16 | SGDUpdate // NOLINT_NEXT_LINE(whitespace/operators) 17 | <<>>( 18 | N, g, h, momentum, local_rate); 19 | CUDA_POST_KERNEL_CHECK; 20 | } 21 | template void sgd_update_gpu(int, float*, float*, float, float); 22 | template void sgd_update_gpu(int, double*, double*, double, double); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # The option allows to include in build only selected test files and exclude all others 2 | # Usage example: 3 | # cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" 4 | set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") 5 | caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) 6 | caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) 7 | 8 | # For 'make runtest' target we don't need to embed test data paths to 9 | # source files, because test target is executed in source directory 10 | # That's why the lines below are commented. TODO: remove them 11 | 12 | # definition needed to include CMake generated files 13 | #add_definitions(-DCMAKE_BUILD) 14 | 15 | # generates test_data/sample_data_list.txt.gen.cmake 16 | #caffe_configure_testdatafile(test_data/sample_data_list.txt) 17 | 18 | set(the_target test.testbin) 19 | set(test_args --gtest_shuffle) 20 | 21 | if(HAVE_CUDA) 22 | caffe_cuda_compile(test_cuda_objs ${test_cuda}) 23 | list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) 24 | else() 25 | list(APPEND test_args --gtest_filter="-*GPU*") 26 | endif() 27 | 28 | # ---[ Adding test target 29 | add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) 30 | target_link_libraries(${the_target} gtest ${Caffe_LINK}) 31 | caffe_default_properties(${the_target}) 32 | caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") 33 | 34 | # ---[ Adding runtest 35 | add_custom_target(runtest COMMAND ${the_target} ${test_args} 36 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) 37 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | #include "caffe/test/test_caffe_main.hpp" 3 | 4 | namespace caffe { 5 | #ifndef CPU_ONLY 6 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 7 | #endif 8 | } 9 | 10 | #ifndef CPU_ONLY 11 | using caffe::CAFFE_TEST_CUDA_PROP; 12 | #endif 13 | 14 | int main(int argc, char** argv) { 15 | ::testing::InitGoogleTest(&argc, argv); 16 | caffe::GlobalInit(&argc, &argv); 17 | #ifndef CPU_ONLY 18 | // Before starting testing, let's first print out a few cuda defice info. 19 | int device; 20 | cudaGetDeviceCount(&device); 21 | cout << "Cuda number of devices: " << device << endl; 22 | if (argc > 1) { 23 | // Use the given device 24 | device = atoi(argv[1]); 25 | cudaSetDevice(device); 26 | cout << "Setting to use device " << device << endl; 27 | } else if (CUDA_TEST_DEVICE >= 0) { 28 | // Use the device assigned in build configuration; but with a lower priority 29 | device = CUDA_TEST_DEVICE; 30 | } 31 | cudaGetDevice(&device); 32 | cout << "Current device id: " << device << endl; 33 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 34 | cout << "Current device name: " << CAFFE_TEST_CUDA_PROP.name << endl; 35 | #endif 36 | // invoke the test. 37 | return RUN_ALL_TESTS(); 38 | } 39 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JXingZhao/ContrastPrior/c0e9530ea47b7f8bfd5a25097a17a1f4019299ed/caffe/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | 12 | class InternalThreadTest : public ::testing::Test {}; 13 | 14 | TEST_F(InternalThreadTest, TestStartAndExit) { 15 | InternalThread thread; 16 | EXPECT_FALSE(thread.is_started()); 17 | thread.StartInternalThread(); 18 | EXPECT_TRUE(thread.is_started()); 19 | thread.StopInternalThread(); 20 | EXPECT_FALSE(thread.is_started()); 21 | } 22 | 23 | class TestThreadA : public InternalThread { 24 | void InternalThreadEntry() { 25 | EXPECT_EQ(4244559767, caffe_rng_rand()); 26 | } 27 | }; 28 | 29 | class TestThreadB : public InternalThread { 30 | void InternalThreadEntry() { 31 | EXPECT_EQ(1726478280, caffe_rng_rand()); 32 | } 33 | }; 34 | 35 | TEST_F(InternalThreadTest, TestRandomSeed) { 36 | TestThreadA t1; 37 | Caffe::set_random_seed(9658361); 38 | t1.StartInternalThread(); 39 | t1.StopInternalThread(); 40 | 41 | TestThreadA t2; 42 | Caffe::set_random_seed(9658361); 43 | t2.StartInternalThread(); 44 | t2.StopInternalThread(); 45 | 46 | TestThreadB t3; 47 | Caffe::set_random_seed(3435563); 48 | t3.StartInternalThread(); 49 | t3.StopInternalThread(); 50 | } 51 | 52 | } // namespace caffe 53 | 54 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_layer_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "gtest/gtest.h" 6 | 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/layer_factory.hpp" 10 | #include "caffe/util/db.hpp" 11 | #include "caffe/util/io.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class LayerFactoryTest : public MultiDeviceTest {}; 19 | 20 | TYPED_TEST_CASE(LayerFactoryTest, TestDtypesAndDevices); 21 | 22 | TYPED_TEST(LayerFactoryTest, TestCreateLayer) { 23 | typedef typename TypeParam::Dtype Dtype; 24 | typename LayerRegistry::CreatorRegistry& registry = 25 | LayerRegistry::Registry(); 26 | shared_ptr > layer; 27 | for (typename LayerRegistry::CreatorRegistry::iterator iter = 28 | registry.begin(); iter != registry.end(); ++iter) { 29 | // Special case: PythonLayer is checked by pytest 30 | if (iter->first == "Python") { continue; } 31 | LayerParameter layer_param; 32 | // Data layers expect a DB 33 | if (iter->first == "Data") { 34 | #ifdef USE_LEVELDB 35 | string tmp; 36 | MakeTempDir(&tmp); 37 | boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); 38 | db->Open(tmp, db::NEW); 39 | db->Close(); 40 | layer_param.mutable_data_param()->set_source(tmp); 41 | #else 42 | continue; 43 | #endif // USE_LEVELDB 44 | } 45 | layer_param.set_type(iter->first); 46 | layer = LayerRegistry::CreateLayer(layer_param); 47 | EXPECT_EQ(iter->first, layer->type()); 48 | } 49 | } 50 | 51 | } // namespace caffe 52 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_solver_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/common.hpp" 9 | #include "caffe/solver.hpp" 10 | #include "caffe/solver_factory.hpp" 11 | 12 | #include "caffe/test/test_caffe_main.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | class SolverFactoryTest : public MultiDeviceTest { 18 | protected: 19 | SolverParameter simple_solver_param() { 20 | const string solver_proto = 21 | "train_net_param { " 22 | " layer { " 23 | " name: 'data' type: 'DummyData' top: 'data' " 24 | " dummy_data_param { shape { dim: 1 } } " 25 | " } " 26 | "} "; 27 | SolverParameter solver_param; 28 | CHECK(google::protobuf::TextFormat::ParseFromString( 29 | solver_proto, &solver_param)); 30 | return solver_param; 31 | } 32 | }; 33 | 34 | TYPED_TEST_CASE(SolverFactoryTest, TestDtypesAndDevices); 35 | 36 | TYPED_TEST(SolverFactoryTest, TestCreateSolver) { 37 | typedef typename TypeParam::Dtype Dtype; 38 | typename SolverRegistry::CreatorRegistry& registry = 39 | SolverRegistry::Registry(); 40 | shared_ptr > solver; 41 | SolverParameter solver_param = this->simple_solver_param(); 42 | for (typename SolverRegistry::CreatorRegistry::iterator iter = 43 | registry.begin(); iter != registry.end(); ++iter) { 44 | solver_param.set_type(iter->first); 45 | solver.reset(SolverRegistry::CreateSolver(solver_param)); 46 | EXPECT_EQ(iter->first, solver->type()); 47 | } 48 | } 49 | 50 | } // namespace caffe 51 | -------------------------------------------------------------------------------- /caffe/src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /caffe/src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /caffe/src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /caffe/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /caffe/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /caffe/tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/extra/launch_resize_and_crop_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer 3 | 4 | # If you encounter error that the address already in use, kill the process. 5 | # 11235 is the port of server process 6 | # https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py 7 | # sudo netstat -ap | grep 11235 8 | # The last column of the output is PID/Program name 9 | # kill -9 PID 10 | # Second solution: 11 | # nmap localhost 12 | # fuser -k 11235/tcp 13 | # Or just wait a few seconds. 14 | 15 | ## Launch your Mapreduce locally 16 | # num_clients: number of processes 17 | # image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. 18 | # input: the file containing one image path relative to input_folder each line 19 | # input_folder: where are the original images 20 | # output_folder: where to save the resized and cropped images 21 | ./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 22 | 23 | ## Launch your Mapreduce with MPI 24 | # mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 25 | -------------------------------------------------------------------------------- /caffe/tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/upgrade_net_proto_binary.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromBinaryFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input binary file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | WriteProtoToBinaryFile(net_param, argv[2]); 47 | 48 | LOG(INFO) << "Wrote upgraded NetParameter binary proto to " << argv[2]; 49 | return !success; 50 | } 51 | -------------------------------------------------------------------------------- /caffe/tools/upgrade_net_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromTextFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input text file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | // Save new format prototxt. 47 | WriteProtoToTextFile(net_param, argv[2]); 48 | 49 | LOG(INFO) << "Wrote upgraded NetParameter text proto to " << argv[2]; 50 | return !success; 51 | } 52 | -------------------------------------------------------------------------------- /evaluation/CalMAE.m: -------------------------------------------------------------------------------- 1 | function mae = CalMAE(smap, gtImg) 2 | % Code Author: Wangjiang Zhu 3 | % Email: wangjiang88119@gmail.com 4 | % Date: 3/24/2014 5 | if size(smap, 1) ~= size(gtImg, 1) || size(smap, 2) ~= size(gtImg, 2) 6 | error('Saliency map and gt Image have different sizes!\n'); 7 | end 8 | 9 | if ~islogical(gtImg) 10 | gtImg = gtImg(:,:,1) > 128; 11 | end 12 | 13 | smap = im2double(smap(:,:,1)); 14 | fgPixels = smap(gtImg); 15 | fgErrSum = length(fgPixels) - sum(fgPixels); 16 | bgErrSum = sum(smap(~gtImg)); 17 | mae = (fgErrSum + bgErrSum) / numel(gtImg); -------------------------------------------------------------------------------- /evaluation/Fmeasure_calu.m: -------------------------------------------------------------------------------- 1 | %% 2 | function Fmeasure = Fmeasure_calu(sMap,gtMap,gtsize, threshold) 3 | %threshold = 2* mean(sMap(:)) ; 4 | %if ( threshold > 1 ) 5 | % threshold = 1; 6 | %end 7 | 8 | EPS = 1e-4; 9 | %max(max(sMap)) 10 | %threshold 11 | 12 | Label3 = zeros( gtsize ); 13 | Label3( sMap>=threshold ) = 1; 14 | 15 | NumRec = sum(sum(Label3)); 16 | %NumRec = length( find( Label3==1 ) ); 17 | LabelAnd = Label3 & gtMap; 18 | %NumAnd = length( find ( LabelAnd==1 ) ); 19 | NumAnd = sum(sum(LabelAnd)); 20 | num_obj = sum(sum(gtMap)); 21 | 22 | if NumAnd == 0 23 | PreFtem = 0; 24 | RecallFtem = 0; 25 | FmeasureF = 0; 26 | else 27 | PreFtem = (NumAnd + EPS)/(NumRec + EPS); 28 | RecallFtem = (NumAnd + EPS)/(num_obj + EPS); 29 | FmeasureF = ( ( 1.3* PreFtem * RecallFtem) / ( .3 * PreFtem + RecallFtem ) ); 30 | end 31 | Fmeasure = [PreFtem, RecallFtem, FmeasureF]; 32 | 33 | -------------------------------------------------------------------------------- /evaluation/Fmeasure_calu_backup.m: -------------------------------------------------------------------------------- 1 | %% 2 | function Fmeasure = Fmeasure_calu(sMap,gtMap,gtsize, threshold) 3 | %threshold = 2* mean(sMap(:)) ; 4 | if ( threshold > 1 ) 5 | threshold = 1; 6 | end 7 | 8 | Label3 = zeros( gtsize ); 9 | Label3( sMap>=threshold ) = 1; 10 | 11 | NumRec = length( find( Label3==1 ) ); 12 | LabelAnd = Label3 & gtMap; 13 | NumAnd = length( find ( LabelAnd==1 ) ); 14 | num_obj = sum(sum(gtMap)); 15 | 16 | if NumAnd == 0 17 | PreFtem = 0; 18 | RecallFtem = 0; 19 | FmeasureF = 0; 20 | else 21 | PreFtem = NumAnd/NumRec; 22 | RecallFtem = NumAnd/num_obj; 23 | FmeasureF = ( ( 1.3* PreFtem * RecallFtem ) / ( .3 * PreFtem + RecallFtem ) ); 24 | end 25 | 26 | Fmeasure = [PreFtem, RecallFtem, FmeasureF]; 27 | 28 | -------------------------------------------------------------------------------- /evaluation/ReadMe.txt: -------------------------------------------------------------------------------- 1 | % Author: Deng-Ping Fan 2 | % Date: 2018/9/8 3 | % E-mail: dengpingfan@mail.nankai.edu.cn 4 | % HomePage: http://dpfan.net/ 5 | 6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 7 | Just run the main.m to get the Smeasure, F-measure (Adaptive threshold), MAE score. 8 | If the you use our code please cite the related works. 9 | 10 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 11 | 12 | @inproceedings{fan2017structure, 13 | title={{Structure-measure: A New Way to Evaluate Foreground Maps}}, 14 | author={Fan, Deng-Ping and Cheng, Ming-Ming and Liu, Yun and Li, Tao and Borji, Ali}, 15 | booktitle={IEEE International Conference on Computer Vision (ICCV)}, 16 | pages = {4548-4557}, 17 | year={2017}, 18 | note={\url{http://dpfan.net/smeasure/}}, 19 | organization={IEEE} 20 | } 21 | 22 | @inproceedings{Fan2018Enhanced, 23 | author={Fan, Deng-Ping and Gong, Cheng and Cao, Yang and Ren, Bo and Cheng, Ming-Ming and Borji, Ali}, 24 | title={{Enhanced-alignment Measure for Binary Foreground Map Evaluation}}, 25 | booktitle={International Joint Conference on Artificial Intelligence (IJCAI)}, 26 | pages={698--704}, 27 | note={\url{http://dpfan.net/e-measure/}}, 28 | year={2018} 29 | } 30 | -------------------------------------------------------------------------------- /evaluation/StructureMeasure.m: -------------------------------------------------------------------------------- 1 | function Q = StructureMeasure(prediction,GT) 2 | % StructureMeasure computes the similarity between the foreground map and 3 | % ground truth(as proposed in "Structure-measure: A new way to evaluate 4 | % foreground maps" [Deng-Ping Fan et. al - ICCV 2017]) 5 | % Usage: 6 | % Q = StructureMeasure(prediction,GT) 7 | % Input: 8 | % prediction - Binary/Non binary foreground map with values in the range 9 | % [0 1]. Type: double. 10 | % GT - Binary ground truth. Type: logical. 11 | % Output: 12 | % Q - The computed similarity score 13 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 14 | 15 | % Check input 16 | if (~isa(prediction,'double')) 17 | error('The prediction should be double type...'); 18 | end 19 | if ((max(prediction(:))>1) || min(prediction(:))<0) 20 | error('The prediction should be in the range of [0 1]...'); 21 | end 22 | if (~islogical(GT)) 23 | error('GT should be logical type...'); 24 | end 25 | 26 | y = mean2(GT); 27 | 28 | if (y==0)% if the GT is completely black 29 | x = mean2(prediction); 30 | Q = 1.0 - x; %only calculate the area of intersection 31 | elseif(y==1)%if the GT is completely white 32 | x = mean2(prediction); 33 | Q = x; %only calcualte the area of intersection 34 | else 35 | alpha = 0.5; 36 | Q = alpha*S_object(prediction,GT)+(1-alpha)*S_region(prediction,GT); 37 | if (Q<0) 38 | Q=0; 39 | end 40 | end 41 | 42 | end 43 | -------------------------------------------------------------------------------- /evaluation/calculateNumber.m: -------------------------------------------------------------------------------- 1 | function [NUM,file,fileExt] = calculateNumber(imgPath) 2 | imgExt = {'*.bmp', '*.jpg', '*.png'}; 3 | k=1; 4 | d1 = dir([imgPath char(imgExt(k))]); 5 | file = {d1(~[d1.isdir]).name}; 6 | if isempty(file) 7 | k = k + 1; 8 | d1 = dir([imgPath char(imgExt(k))]); 9 | file = {d1(~[d1.isdir]).name}; 10 | if isempty(file) 11 | k = k + 1; 12 | d1 = dir([imgPath char(imgExt(k))]); 13 | file = {d1(~[d1.isdir]).name}; 14 | NUM = length(file); 15 | else 16 | NUM = length(file); 17 | end 18 | else 19 | NUM = length(file); 20 | end 21 | fileExt = char(imgExt(k)); 22 | end -------------------------------------------------------------------------------- /evaluation/myPlot.m: -------------------------------------------------------------------------------- 1 | close all; clear; clc; 2 | load('Result.mat'); 3 | 4 | ss_sm = sort(Smeasure); 5 | 6 | [sizePlot,countPlot] = HistPlot(Smeasure,10); 7 | plot(sizePlot,countPlot,'b','LineWidth',1); 8 | hold on; 9 | [sizePlot2,countPlot2] = HistPlot(Fmeasure,10); 10 | plot(sizePlot2,countPlot2,'y','LineWidth',1); 11 | 12 | 13 | function [sizePlot,countPlot] = HistPlot(str, number) 14 | [count,sizeRatio] = histcounts(str,number); 15 | 16 | count = count/sum(count) * 100; 17 | sizeRatio(:,1) = []; 18 | %plot(sizeRatio,count); 19 | 20 | sizePlot = sizeRatio(1,1):0.01:sizeRatio(length(sizeRatio)); 21 | countPlot = spline(sizeRatio,count,sizePlot); 22 | 23 | end 24 | --------------------------------------------------------------------------------