├── .dockerignore ├── Dockerfile ├── LICENSE ├── README.md ├── caffe ├── .gitignore ├── CMakeLists.txt ├── CMakeScripts │ ├── FindAtlas.cmake │ ├── FindGFlags.cmake │ ├── FindGlog.cmake │ ├── FindLAPACK.cmake │ ├── FindLMDB.cmake │ ├── FindLevelDB.cmake │ ├── FindMKL.cmake │ ├── FindOpenBLAS.cmake │ ├── FindSnappy.cmake │ └── lint.cmake ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config.example ├── README.md ├── caffe.cloc ├── docs │ ├── CNAME │ ├── README.md │ ├── _config.yml │ ├── _layouts │ │ └── default.html │ ├── development.md │ ├── images │ │ └── GitHub-Mark-64px.png │ ├── index.md │ ├── installation.md │ ├── model_zoo.md │ ├── performance_hardware.md │ ├── stylesheets │ │ ├── pygment_trac.css │ │ ├── reset.css │ │ └── styles.css │ └── tutorial │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig │ │ ├── .gitignore │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ ├── forward_backward.png │ │ ├── layer.jpg │ │ └── logreg.jpg │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md ├── examples │ ├── CMakeLists.txt │ ├── cifar10 │ │ ├── cifar10_full.prototxt │ │ ├── cifar10_full_solver.prototxt │ │ ├── cifar10_full_solver_lr1.prototxt │ │ ├── cifar10_full_solver_lr2.prototxt │ │ ├── cifar10_full_train_test.prototxt │ │ ├── cifar10_quick.prototxt │ │ ├── cifar10_quick_solver.prototxt │ │ ├── cifar10_quick_solver_lr1.prototxt │ │ ├── cifar10_quick_train_test.prototxt │ │ ├── convert_cifar_data.cpp │ │ ├── create_cifar10.sh │ │ ├── readme.md │ │ ├── train_full.sh │ │ └── train_quick.sh │ ├── classification.ipynb │ ├── detection.ipynb │ ├── feature_extraction │ │ ├── imagenet_val.prototxt │ │ └── readme.md │ ├── filter_visualization.ipynb │ ├── finetune_flickr_style │ │ ├── assemble_data.py │ │ ├── flickr_style.csv.gz │ │ └── readme.md │ ├── finetune_pascal_detection │ │ ├── pascal_finetune_solver.prototxt │ │ └── pascal_finetune_trainval_test.prototxt │ ├── hdf5_classification.ipynb │ ├── hdf5_classification │ │ ├── solver.prototxt │ │ ├── solver2.prototxt │ │ ├── train_val.prototxt │ │ └── train_val2.prototxt │ ├── imagenet │ │ ├── bvlc_caffenet_full_conv.prototxt │ │ ├── create_imagenet.sh │ │ ├── make_imagenet_mean.sh │ │ ├── readme.md │ │ ├── resume_training.sh │ │ └── train_caffenet.sh │ ├── images │ │ ├── cat.jpg │ │ └── fish-bike.jpg │ ├── mnist │ │ ├── convert_mnist_data.cpp │ │ ├── create_mnist.sh │ │ ├── lenet.prototxt │ │ ├── lenet_consolidated_solver.prototxt │ │ ├── lenet_solver.prototxt │ │ ├── lenet_train_test.prototxt │ │ ├── mnist_autoencoder.prototxt │ │ ├── mnist_autoencoder_solver.prototxt │ │ ├── mnist_autoencoder_solver_adagrad.prototxt │ │ ├── mnist_autoencoder_solver_nesterov.prototxt │ │ ├── readme.md │ │ ├── train_lenet.sh │ │ ├── train_lenet_consolidated.sh │ │ ├── train_mnist_autoencoder.sh │ │ ├── train_mnist_autoencoder_adagrad.sh │ │ └── train_mnist_autoencoder_nesterov.sh │ ├── net_surgery.ipynb │ ├── siamese │ │ ├── convert_mnist_siamese_data.cpp │ │ ├── create_mnist_siamese.sh │ │ ├── mnist_siamese.ipynb │ │ ├── mnist_siamese.prototxt │ │ ├── mnist_siamese_solver.prototxt │ │ ├── mnist_siamese_train_test.prototxt │ │ ├── readme.md │ │ └── train_mnist_siamese.sh │ └── web_demo │ │ ├── app.py │ │ ├── exifutil.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ └── templates │ │ └── index.html ├── include │ └── caffe │ │ ├── .DS_Store │ │ ├── blob.hpp │ │ ├── caffe.hpp │ │ ├── common.hpp │ │ ├── common_layers.hpp │ │ ├── data_layers.hpp │ │ ├── data_transformer.hpp │ │ ├── filler.hpp │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── loss_layers.hpp │ │ ├── net.hpp │ │ ├── neuron_layers.hpp │ │ ├── solver.hpp │ │ ├── syncedmem.hpp │ │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ │ ├── util │ │ ├── benchmark.hpp │ │ ├── cudnn.hpp │ │ ├── device_alternate.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── rng.hpp │ │ ├── thread.hpp │ │ └── upgrade_proto.hpp │ │ └── vision_layers.hpp ├── matlab │ ├── CMakeLists.txt │ └── caffe │ │ ├── ilsvrc_2012_mean.mat │ │ ├── matcaffe.cpp │ │ ├── matcaffe_batch.m │ │ ├── matcaffe_demo.m │ │ ├── matcaffe_init.m │ │ ├── prepare_batch.m │ │ ├── print_cell.m │ │ └── read_cell.m ├── python │ ├── CMakeLists.txt │ ├── caffe │ │ ├── __init__.py │ │ ├── _caffe.cpp │ │ ├── _caffe.hpp │ │ ├── classifier.py │ │ ├── detector.py │ │ ├── draw.py │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.npy │ │ ├── io.py │ │ └── pycaffe.py │ ├── classify.py │ ├── detect.py │ ├── draw_net.py │ └── requirements.txt ├── scripts │ ├── build_docs.sh │ ├── copy_notebook.py │ ├── cpp_lint.py │ ├── deploy_docs.sh │ ├── download_model_binary.py │ ├── download_model_from_gist.sh │ ├── gather_examples.sh │ ├── travis │ │ ├── travis_build_and_test.sh │ │ ├── travis_install.sh │ │ └── travis_setup_makefile_config.sh │ └── upload_model_to_gist.sh ├── src │ ├── caffe │ │ ├── CMakeLists.txt │ │ ├── blob.cpp │ │ ├── common.cpp │ │ ├── data_transformer.cpp │ │ ├── internal_thread.cpp │ │ ├── layer_factory.cpp │ │ ├── layers │ │ │ ├── JPEGDataLayer.cpp │ │ │ ├── absval_layer.cpp │ │ │ ├── absval_layer.cu │ │ │ ├── accuracy_layer.cpp │ │ │ ├── argmax_layer.cpp │ │ │ ├── base_data_layer.cpp │ │ │ ├── base_data_layer.cu │ │ │ ├── bnll_layer.cpp │ │ │ ├── bnll_layer.cu │ │ │ ├── clustering_loss_layer.cpp │ │ │ ├── clustering_loss_layer.cu │ │ │ ├── concat_layer.cpp │ │ │ ├── concat_layer.cu │ │ │ ├── contrastive_loss_layer.cpp │ │ │ ├── contrastive_loss_layer.cu │ │ │ ├── conv_layer.cpp │ │ │ ├── conv_layer.cu │ │ │ ├── cross_loss_layer.cpp │ │ │ ├── cross_loss_layer.cu │ │ │ ├── cudnn_conv_layer.cpp │ │ │ ├── cudnn_conv_layer.cu │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ ├── cudnn_pooling_layer.cu │ │ │ ├── cudnn_relu_layer.cpp │ │ │ ├── cudnn_relu_layer.cu │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ ├── cudnn_softmax_layer.cu │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ ├── cudnn_tanh_layer.cu │ │ │ ├── data_layer.cpp │ │ │ ├── dropout_layer.cpp │ │ │ ├── dropout_layer.cu │ │ │ ├── dummy_data_layer.cpp │ │ │ ├── eltwise_layer.cpp │ │ │ ├── eltwise_layer.cu │ │ │ ├── entropy_t_loss_layer.cpp │ │ │ ├── entropy_t_loss_layer.cu │ │ │ ├── euclidean_loss_layer.cpp │ │ │ ├── euclidean_loss_layer.cu │ │ │ ├── flatten_layer.cpp │ │ │ ├── flatten_layer.cu │ │ │ ├── gmm_loss_layer.cpp │ │ │ ├── gmm_loss_layer.cu │ │ │ ├── hdf5_data_layer.cpp │ │ │ ├── hdf5_data_layer.cu │ │ │ ├── hdf5_output_layer.cpp │ │ │ ├── hdf5_output_layer.cu │ │ │ ├── hinge_loss_layer.cpp │ │ │ ├── im2col_layer.cpp │ │ │ ├── im2col_layer.cu │ │ │ ├── image_data_layer.cpp │ │ │ ├── infogain_loss_layer.cpp │ │ │ ├── inner_product_layer.cpp │ │ │ ├── inner_product_layer.cu │ │ │ ├── kmeans_loss_layer.cpp │ │ │ ├── kmeans_loss_layer.cu │ │ │ ├── l1_loss_layer.cpp │ │ │ ├── l1_loss_layer.cu │ │ │ ├── loss_layer.cpp │ │ │ ├── lrn_layer.cpp │ │ │ ├── lrn_layer.cu │ │ │ ├── memory_data_layer.cpp │ │ │ ├── multi_softmax_loss.cpp │ │ │ ├── multi_softmax_loss.cu │ │ │ ├── multi_t_loss_layer.cpp │ │ │ ├── multi_t_loss_layer.cu │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ ├── mvn_layer.cpp │ │ │ ├── mvn_layer.cu │ │ │ ├── neuron_layer.cpp │ │ │ ├── pooling_layer.cpp │ │ │ ├── pooling_layer.cu │ │ │ ├── power_layer.cpp │ │ │ ├── power_layer.cu │ │ │ ├── relu_layer.cpp │ │ │ ├── relu_layer.cu │ │ │ ├── resize_layer.cpp │ │ │ ├── resize_layer.cu │ │ │ ├── shuffling_data_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ │ ├── sigmoid_layer.cpp │ │ │ ├── sigmoid_layer.cu │ │ │ ├── silence_layer.cpp │ │ │ ├── silence_layer.cu │ │ │ ├── slice_layer.cpp │ │ │ ├── slice_layer.cu │ │ │ ├── softmax_cross_entropy_layer.cu │ │ │ ├── softmax_cross_entropy_loss.cpp │ │ │ ├── softmax_layer.cpp │ │ │ ├── softmax_layer.cu │ │ │ ├── softmax_loss_layer.cpp │ │ │ ├── softmax_loss_layer.cu │ │ │ ├── split_layer.cpp │ │ │ ├── split_layer.cu │ │ │ ├── spring_loss_layer.cpp │ │ │ ├── spring_loss_layer.cu │ │ │ ├── t_loss_layer.cpp │ │ │ ├── t_loss_layer.cu │ │ │ ├── tanh_layer.cpp │ │ │ ├── tanh_layer.cu │ │ │ ├── threshold_layer.cpp │ │ │ ├── threshold_layer.cu │ │ │ └── window_data_layer.cpp │ │ ├── net.cpp │ │ ├── proto │ │ │ ├── CMakeLists.txt │ │ │ ├── caffe.proto │ │ │ └── caffe_pretty_print.proto │ │ ├── solver.cpp │ │ ├── syncedmem.cpp │ │ ├── test │ │ │ ├── CMakeLists.txt │ │ │ ├── cmake_test_defines.hpp.in │ │ │ ├── test_accuracy_layer.cpp │ │ │ ├── test_argmax_layer.cpp │ │ │ ├── test_benchmark.cpp │ │ │ ├── test_blob.cpp │ │ │ ├── test_caffe_main.cpp │ │ │ ├── test_clustering_loss_layer.cpp │ │ │ ├── test_common.cpp │ │ │ ├── test_concat_layer.cpp │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ ├── test_convolution_layer.cpp │ │ │ ├── test_cross_loss_layer.cpp │ │ │ ├── test_data │ │ │ │ ├── generate_sample_data.py │ │ │ │ ├── sample_data.h5 │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ ├── sample_data_list.txt │ │ │ │ └── sample_data_list.txt.in │ │ │ ├── test_data_layer.cpp │ │ │ ├── test_dummy_data_layer.cpp │ │ │ ├── test_eltwise_layer.cpp │ │ │ ├── test_entropy_t_loss_layer.cpp │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ ├── test_filler.cpp │ │ │ ├── test_flatten_layer.cpp │ │ │ ├── test_gmm_loss_layer.cpp │ │ │ ├── test_gradient_based_solver.cpp │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ ├── test_hdf5data_layer.cpp │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ ├── test_im2col_kernel.cu │ │ │ ├── test_im2col_layer.cpp │ │ │ ├── test_image_data_layer.cpp │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ ├── test_inner_product_layer.cpp │ │ │ ├── test_internal_thread.cpp │ │ │ ├── test_kmeans_loss_layer.cpp │ │ │ ├── test_l1_loss_layer.cpp │ │ │ ├── test_lrn_layer.cpp │ │ │ ├── test_math_functions.cpp │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ ├── test_memory_data_layer.cpp │ │ │ ├── test_multi_softmax_loss.cpp │ │ │ ├── test_multi_t_loss_layer.cpp │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ ├── test_mvn_layer.cpp │ │ │ ├── test_net.cpp │ │ │ ├── test_neuron_layer.cpp │ │ │ ├── test_platform.cpp │ │ │ ├── test_pooling_layer.cpp │ │ │ ├── test_power_layer.cpp │ │ │ ├── test_protobuf.cpp │ │ │ ├── test_random_number_generator.cpp │ │ │ ├── test_resize_layer.cpp │ │ │ ├── test_shuffling_data_layer.cpp │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── test_slice_layer.cpp │ │ │ ├── test_softmax_cross_entropy_loss.cpp │ │ │ ├── test_softmax_layer.cpp │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ ├── test_solver.cpp │ │ │ ├── test_split_layer.cpp │ │ │ ├── test_spring_loss_layer.cpp │ │ │ ├── test_stochastic_pooling.cpp │ │ │ ├── test_syncedmem.cpp │ │ │ ├── test_threshold_layer.cpp │ │ │ ├── test_upgrade_proto.cpp │ │ │ └── test_util_blas.cpp │ │ └── util │ │ │ ├── benchmark.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_splits.cpp │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ └── upgrade_proto.cpp │ └── gtest │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc └── tools │ ├── CMakeLists.txt │ ├── caffe.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── device_query.cpp │ ├── dump_network.cpp │ ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py.example │ └── resize_and_crop_images.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── net_speed_benchmark.cpp │ ├── test_net.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ └── upgrade_net_proto_text.cpp ├── dec ├── dec.py ├── exp │ ├── mnist │ │ └── save_iter_100000.caffemodel │ ├── reutersidf │ │ └── save_iter_100000.caffemodel │ ├── reutersidf10k │ │ └── save_iter_100000.caffemodel │ ├── stl │ │ └── save_iter_100000.caffemodel │ └── test │ │ └── .gitignore ├── make_mnist_data.py ├── make_reuters_data.py ├── make_stl_data.py ├── pretrain.py └── setup_features.py ├── mnist └── get_data.sh ├── reuters └── get_data.sh └── stl └── get_data.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | *.caffemodel 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM bfolkens/docker-opencv:2.4.12-cuda7.0-cudnn4 2 | 3 | # Install some dep packages 4 | 5 | ENV CAFFE_PACKAGES libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler gfortran libjpeg62 libfreeimage-dev python-dev \ 6 | python-pip python-scipy python-matplotlib python-scikits-learn ipython python-h5py python-leveldb python-networkx python-nose python-pandas \ 7 | python-dateutil python-protobuf python-yaml python-gflags python-skimage python-sympy cython \ 8 | libgoogle-glog-dev libbz2-dev libxml2-dev libxslt-dev libffi-dev libssl-dev libgflags-dev liblmdb-dev libboost1.54-all-dev libatlas-base-dev 9 | 10 | RUN apt-get update && \ 11 | apt-get install -y software-properties-common python-software-properties git wget build-essential pkg-config bc unzip cmake && \ 12 | add-apt-repository ppa:boost-latest/ppa && \ 13 | apt-get install -y $CAFFE_PACKAGES && \ 14 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 15 | 16 | RUN pip install -U leveldb # fix GH Issue #7 17 | 18 | # Copy the source files over and build the project 19 | 20 | COPY . /usr/local/src/dec 21 | WORKDIR /usr/local/src/dec 22 | 23 | RUN cd /usr/local/src/dec/caffe && \ 24 | cp Makefile.config.example Makefile.config && \ 25 | make -j"$(nproc)" all 26 | 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Junyuan Xie 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep Embedded Clustering 2 | 3 | This package implements the algorithm described in paper "Unsupervised Deep Embedding for Clustering Analysis". It depends on opencv, numpy, scipy and Caffe. 4 | 5 | This implementation is intended for reproducing the results in the paper. If you only want to try the algorithm and find caffe too difficault to install, there is an easier to use experimental implementation in MXNet: https://github.com/dmlc/mxnet/blob/master/example/dec/dec.py, but note that results can be different from the paper. MXNet is a flexible deep learning library with fewer dependencies. You are welcome to try it. Installation guide can be found here: https://mxnet.readthedocs.org/en/latest/build.html. Once you install MXNet, simple go into directory examples/dec and run `python dec.py`. 6 | 7 | ## Usage 8 | To run, please first build our custom version of Caffe included in this package following the official guide: http://caffe.berkeleyvision.org/installation.html. 9 | 10 | Then download the data set you want to experiment on. We provide scripts for downloading the datasets used in the paper. For example you can download MNIST by `cd mnist; ./get_data.sh`. Once download completes, run `cd dec; python make_mnist_data.py` to prepare data for Caffe. 11 | 12 | After data is ready, run `python dec.py DB` to run experiment on with DB. DB can be one of mnist, stl, reutersidf10k, reutersidf. We provide pretrained autoencoder weights with this package. You can use dec/pretrain.py to train your own autoencoder. Please read source for usage info. 13 | 14 | ### Docker 15 | 16 | A Dockerfile has been provided to create a sterile development environment easily. To build the environment, run `docker build --rm -t dec .` and then `docker run --rm -it dec bash` to shell into the running container. Alternatively, [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) can be used to enable GPU capability. 17 | 18 | -------------------------------------------------------------------------------- /caffe/.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | 3 | # Compiled Object files 4 | *.slo 5 | *.lo 6 | *.o 7 | *.cuo 8 | 9 | # Compiled Dynamic libraries 10 | *.so 11 | *.dylib 12 | 13 | # Compiled Static libraries 14 | *.lai 15 | *.la 16 | *.a 17 | 18 | # Compiled protocol buffers 19 | *.pb.h 20 | *.pb.cc 21 | *_pb2.py 22 | 23 | # Compiled python 24 | *.pyc 25 | 26 | # Compiled MATLAB 27 | *.mex* 28 | 29 | # build, distribute, and bins 30 | build 31 | .build_debug/* 32 | .build_release/* 33 | distribute/* 34 | *.testbin 35 | *.bin 36 | python/caffe/proto/ 37 | 38 | # Editor temporaries 39 | *.swp 40 | *~ 41 | 42 | # IPython notebook checkpoints 43 | .ipynb_checkpoints 44 | 45 | ## Caffe 46 | 47 | # User's build configuration 48 | Makefile.config 49 | 50 | # Data and models are either 51 | # 1. reference, and not casually committed 52 | # 2. custom, and live on their own unless they're deliberated contributed 53 | data/* 54 | models/* 55 | *.caffemodel 56 | *.solverstate 57 | *.binaryproto 58 | *leveldb 59 | *lmdb 60 | 61 | # LevelDB files 62 | *.sst 63 | *.ldb 64 | LOCK 65 | LOG* 66 | CURRENT 67 | MANIFEST-* 68 | 69 | # Generated documentation 70 | docs/_site 71 | docs/gathered 72 | _site 73 | doxygen 74 | docs/dev 75 | 76 | # Sublime Text settings 77 | *.sublime-workspace 78 | *.sublime-project 79 | 80 | # Eclipse Project settings 81 | *.*project 82 | 83 | # CMake generated files 84 | *.gen.cmake 85 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindAtlas.cmake: -------------------------------------------------------------------------------- 1 | # Find the Atlas (and Lapack) libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Atlas_ROOT_DIR: Base directory where all Atlas components are found 5 | # 6 | # The following are set after configuration is done: 7 | # Atlas_FOUND 8 | # Atlas_INCLUDE_DIRS 9 | # Atlas_LIBRARIES 10 | # Atlas_LIBRARYRARY_DIRS 11 | 12 | set(Atlas_INCLUDE_SEARCH_PATHS 13 | /usr/include/atlas 14 | /usr/include/atlas-base 15 | $ENV{Atlas_ROOT_DIR} 16 | $ENV{Atlas_ROOT_DIR}/include 17 | ) 18 | 19 | set(Atlas_LIB_SEARCH_PATHS 20 | /usr/lib/atlas 21 | /usr/lib/atlas-base 22 | $ENV{Atlas_ROOT_DIR} 23 | $ENV{Atlas_ROOT_DIR}/lib 24 | ) 25 | 26 | find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 27 | find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 28 | find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) 29 | find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 30 | find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 31 | 32 | set(LOOKED_FOR 33 | 34 | Atlas_CBLAS_INCLUDE_DIR 35 | Atlas_CLAPACK_INCLUDE_DIR 36 | 37 | Atlas_CBLAS_LIBRARY 38 | Atlas_BLAS_LIBRARY 39 | Atlas_LAPACK_LIBRARY 40 | ) 41 | 42 | include(FindPackageHandleStandardArgs) 43 | find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR}) 44 | 45 | if(ATLAS_FOUND) 46 | 47 | mark_as_advanced(${LOOKED_FOR}) 48 | 49 | set(Atlas_INCLUDE_DIR 50 | ${Atlas_CBLAS_INCLUDE_DIR} 51 | ${Atlas_CLAPACK_INCLUDE_DIR} 52 | ) 53 | 54 | set(Atlas_LIBRARIES 55 | ${Atlas_LAPACK_LIBRARY} 56 | ${Atlas_CBLAS_LIBRARY} 57 | ${Atlas_BLAS_LIBRARY} 58 | ) 59 | 60 | endif(ATLAS_FOUND) 61 | 62 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindGFlags.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find GFLAGS 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GFLAGS_FOUND 8 | # GFLAGS_INCLUDE_DIRS 9 | # GFLAGS_LIBRARIES 10 | # GFLAGS_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") 15 | 16 | # We are testing only a couple of files in the include directories 17 | if(WIN32) 18 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 19 | PATHS ${GFLAGS_ROOT_DIR}/src/windows) 20 | else() 21 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 22 | PATHS ${GFLAGS_ROOT_DIR}) 23 | endif() 24 | 25 | if(MSVC) 26 | find_library(GFLAGS_LIBRARY_RELEASE 27 | NAMES libgflags 28 | PATHS ${GFLAGS_ROOT_DIR} 29 | PATH_SUFFIXES Release) 30 | 31 | find_library(GFLAGS_LIBRARY_DEBUG 32 | NAMES libgflags-debug 33 | PATHS ${GFLAGS_ROOT_DIR} 34 | PATH_SUFFIXES Debug) 35 | 36 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) 37 | else() 38 | find_library(GFLAGS_LIBRARY gflags) 39 | endif() 40 | 41 | find_package_handle_standard_args(GFLAGS DEFAULT_MSG 42 | GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) 43 | 44 | 45 | if(GFLAGS_FOUND) 46 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) 47 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) 48 | endif() 49 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindGlog.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Glog 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GLOG_FOUND 8 | # GLOG_INCLUDE_DIRS 9 | # GLOG_LIBRARIES 10 | # GLOG_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") 15 | 16 | if(WIN32) 17 | find_path(GLOG_INCLUDE_DIR glog/logging.h 18 | PATHS ${GLOG_ROOT_DIR}/src/windows) 19 | else() 20 | find_path(GLOG_INCLUDE_DIR glog/logging.h 21 | PATHS ${GLOG_ROOT_DIR}) 22 | endif() 23 | 24 | if(MSVC) 25 | find_library(GLOG_LIBRARY_RELEASE libglog_static 26 | PATHS ${GLOG_ROOT_DIR} 27 | PATH_SUFFIXES Release) 28 | 29 | find_library(GLOG_LIBRARY_DEBUG libglog_static 30 | PATHS ${GLOG_ROOT_DIR} 31 | PATH_SUFFIXES Debug) 32 | 33 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) 34 | else() 35 | find_library(GLOG_LIBRARY glog 36 | PATHS ${GLOG_ROOT_DIR} 37 | PATH_SUFFIXES 38 | lib 39 | lib64) 40 | endif() 41 | 42 | find_package_handle_standard_args(GLOG DEFAULT_MSG 43 | GLOG_INCLUDE_DIR GLOG_LIBRARY) 44 | 45 | if(GLOG_FOUND) 46 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) 47 | set(GLOG_LIBRARIES ${GLOG_LIBRARY}) 48 | endif() 49 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | if (LMDB_INCLUDE_DIR AND LMDB_LIBRARIES) 16 | # Already in cache, be silent 17 | set(LMDB_FIND_QUIETLY TRUE) 18 | endif (LMDB_INCLUDE_DIR AND LMDB_LIBRARIES) 19 | 20 | find_path(LMDB_INCLUDE_DIR NAMES "lmdb.h" HINTS "$ENV{LMDB_DIR}/include") 21 | find_library(LMDB_LIBRARIES NAMES lmdb HINTS $ENV{LMDB_DIR}/lib ) 22 | MESSAGE(STATUS "LMDB lib: " ${LMDB_LIBRARIES} ) 23 | MESSAGE(STATUS "LMDB include: " ${LMDB_INCLUDE} ) 24 | 25 | include(FindPackageHandleStandardArgs) 26 | FIND_PACKAGE_HANDLE_STANDARD_ARGS(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 27 | 28 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 29 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindLevelDB.cmake: -------------------------------------------------------------------------------- 1 | # - Find LevelDB 2 | # 3 | # LEVELDB_INCLUDE - Where to find leveldb/db.h 4 | # LEVELDB_LIBS - List of libraries when using LevelDB. 5 | # LEVELDB_FOUND - True if LevelDB found. 6 | 7 | get_filename_component(module_file_path ${CMAKE_CURRENT_LIST_FILE} PATH) 8 | 9 | # Look for the header file. 10 | find_path(LEVELDB_INCLUDE NAMES leveldb/db.h PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include DOC "Path in which the file leveldb/db.h is located." ) 11 | mark_as_advanced(LEVELDB_INCLUDE) 12 | 13 | # Look for the library. 14 | # Does this work on UNIX systems? (LINUX) 15 | find_library(LEVELDB_LIBS NAMES leveldb PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib DOC "Path to leveldb library." ) 16 | mark_as_advanced(LEVELDB_LIBS) 17 | 18 | # Copy the results to the output variables. 19 | if (LEVELDB_INCLUDE AND LEVELDB_LIBS) 20 | message(STATUS "Found leveldb in ${LEVELDB_INCLUDE} ${LEVELDB_LIBS}") 21 | set(LEVELDB_FOUND 1) 22 | include(CheckCXXSourceCompiles) 23 | set(CMAKE_REQUIRED_LIBRARY ${LEVELDB_LIBS} pthread) 24 | set(CMAKE_REQUIRED_INCLUDES ${LEVELDB_INCLUDE}) 25 | else () 26 | set(LEVELDB_FOUND 0) 27 | endif () 28 | 29 | # Report the results. 30 | if (NOT LEVELDB_FOUND) 31 | set(LEVELDB_DIR_MESSAGE "LEVELDB was not found. Make sure LEVELDB_LIBS and LEVELDB_INCLUDE are set.") 32 | if (LEVELDB_FIND_REQUIRED) 33 | message(FATAL_ERROR "${LEVELDB_DIR_MESSAGE}") 34 | elseif (NOT LEVELDB_FIND_QUIETLY) 35 | message(STATUS "${LEVELDB_DIR_MESSAGE}") 36 | endif () 37 | endif () -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindOpenBLAS.cmake: -------------------------------------------------------------------------------- 1 | 2 | 3 | SET(Open_BLAS_INCLUDE_SEARCH_PATHS 4 | /usr/include 5 | /usr/include/openblas-base 6 | /usr/local/include 7 | /usr/local/include/openblas-base 8 | /opt/OpenBLAS/include 9 | $ENV{OpenBLAS_HOME} 10 | $ENV{OpenBLAS_HOME}/include 11 | ) 12 | 13 | SET(Open_BLAS_LIB_SEARCH_PATHS 14 | /lib/ 15 | /lib/openblas-base 16 | /lib64/ 17 | /usr/lib 18 | /usr/lib/openblas-base 19 | /usr/lib64 20 | /usr/local/lib 21 | /usr/local/lib64 22 | /opt/OpenBLAS/lib 23 | $ENV{OpenBLAS}cd 24 | $ENV{OpenBLAS}/lib 25 | $ENV{OpenBLAS_HOME} 26 | $ENV{OpenBLAS_HOME}/lib 27 | ) 28 | 29 | FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS}) 30 | FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS}) 31 | 32 | SET(OpenBLAS_FOUND ON) 33 | 34 | # Check include files 35 | IF(NOT OpenBLAS_INCLUDE_DIR) 36 | SET(OpenBLAS_FOUND OFF) 37 | MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off") 38 | ENDIF() 39 | 40 | # Check libraries 41 | IF(NOT OpenBLAS_LIB) 42 | SET(OpenBLAS_FOUND OFF) 43 | MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off") 44 | ENDIF() 45 | 46 | IF (OpenBLAS_FOUND) 47 | IF (NOT OpenBLAS_FIND_QUIETLY) 48 | MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}") 49 | MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}") 50 | ENDIF (NOT OpenBLAS_FIND_QUIETLY) 51 | ELSE (OpenBLAS_FOUND) 52 | IF (OpenBLAS_FIND_REQUIRED) 53 | MESSAGE(FATAL_ERROR "Could not find OpenBLAS") 54 | ENDIF (OpenBLAS_FIND_REQUIRED) 55 | ENDIF (OpenBLAS_FOUND) 56 | 57 | MARK_AS_ADVANCED( 58 | OpenBLAS_INCLUDE_DIR 59 | OpenBLAS_LIB 60 | OpenBLAS 61 | ) 62 | 63 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # Snappy_FOUND 8 | # Snappy_INCLUDE_DIRS 9 | # Snappy_LIBS 10 | 11 | find_path(SNAPPY_INCLUDE_DIR 12 | NAMES snappy.h 13 | HINTS ${SNAPPY_ROOT_DIR} 14 | ${SNAPPY_ROOT_DIR}/include 15 | ) 16 | 17 | find_library(SNAPPY_LIBS 18 | NAMES snappy 19 | HINTS ${SNAPPY_ROOT_DIR} 20 | ${SNAPPY_ROOT_DIR}/lib 21 | ) 22 | 23 | include(FindPackageHandleStandardArgs) 24 | find_package_handle_standard_args(Snappy 25 | DEFAULT_MSG 26 | SNAPPY_LIBS 27 | SNAPPY_INCLUDE_DIR 28 | ) 29 | 30 | mark_as_advanced( 31 | SNAPPY_LIBS 32 | SNAPPY_INCLUDE_DIR 33 | ) 34 | -------------------------------------------------------------------------------- /caffe/CMakeScripts/lint.cmake: -------------------------------------------------------------------------------- 1 | 2 | set(CMAKE_SOURCE_DIR ../) 3 | set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) 4 | set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) 5 | set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) 6 | set(LINT_DIRS include src/caffe examples tools python matlab) 7 | 8 | # find all files of interest 9 | foreach(ext ${SRC_FILE_EXTENSIONS}) 10 | foreach(dir ${LINT_DIRS}) 11 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext}) 12 | set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES}) 13 | endforeach() 14 | endforeach() 15 | 16 | # find all files that should be excluded 17 | foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) 18 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) 19 | set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) 20 | endforeach() 21 | 22 | # exclude generated pb files 23 | list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES}) 24 | 25 | execute_process( 26 | COMMAND ${LINT_COMMAND} ${LINT_SOURCES} 27 | ERROR_VARIABLE LINT_OUTPUT 28 | ERROR_STRIP_TRAILING_WHITESPACE 29 | ) 30 | 31 | string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT}) 32 | 33 | list(GET LINT_OUTPUT -1 LINT_RESULT) 34 | list(REMOVE_AT LINT_OUTPUT -1) 35 | string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT}) 36 | list(GET LINT_RESULT -1 NUM_ERRORS) 37 | if(NUM_ERRORS GREATER 0) 38 | foreach(msg ${LINT_OUTPUT}) 39 | string(FIND ${msg} "Done" result) 40 | if(result LESS 0) 41 | message(STATUS ${msg}) 42 | endif() 43 | endforeach() 44 | message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!") 45 | else() 46 | message(STATUS "Lint did not find any errors!") 47 | endif() 48 | 49 | -------------------------------------------------------------------------------- /caffe/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /caffe/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the issue tracker in case you need help: 7 | https://github.com/BVLC/caffe/issues 8 | -------------------------------------------------------------------------------- /caffe/LICENSE: -------------------------------------------------------------------------------- 1 | COPYRIGHT 2 | 3 | All contributions by the University of California: 4 | Copyright (c) 2014, The Regents of the University of California (Regents) 5 | All rights reserved. 6 | 7 | All other contributions: 8 | Copyright (c) 2014, the respective contributors 9 | All rights reserved. 10 | 11 | Caffe uses a shared copyright model: each contributor holds copyright over 12 | their contributions to Caffe. The project versioning records all such 13 | contribution and copyright details. If a contributor wants to further mark 14 | their specific copyright on a particular contribution, they should indicate 15 | their copyright solely in the commit message of the change when it is 16 | committed. 17 | 18 | LICENSE 19 | 20 | Redistribution and use in source and binary forms, with or without 21 | modification, are permitted provided that the following conditions are met: 22 | 23 | 1. Redistributions of source code must retain the above copyright notice, this 24 | list of conditions and the following disclaimer. 25 | 2. Redistributions in binary form must reproduce the above copyright notice, 26 | this list of conditions and the following disclaimer in the documentation 27 | and/or other materials provided with the distribution. 28 | 29 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 30 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 32 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 33 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 34 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 35 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 36 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 38 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 | 40 | CONTRIBUTION AGREEMENT 41 | 42 | By contributing to the BVLC/caffe repository through pull-request, comment, 43 | or otherwise, the contributor releases their content to the 44 | license and copyright terms herein. 45 | -------------------------------------------------------------------------------- /caffe/README.md: -------------------------------------------------------------------------------- 1 | # Caffe 2 | 3 | Caffe is a deep learning framework developed with cleanliness, readability, and speed in mind.
4 | Consult the [project website](http://caffe.berkeleyvision.org) for all documentation. 5 | -------------------------------------------------------------------------------- /caffe/caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /caffe/docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /caffe/docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /caffe/docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /caffe/docs/_layouts/default.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 10 | 11 | Caffe {% if page contains 'title' %}| {{ page.title }}{% endif %} 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 23 | 24 | 33 |
34 |
35 |

Caffe

36 |

37 | Deep learning framework developed by Yangqing Jia / BVLC 38 |

39 | 44 |
45 |
46 | 47 | {{ content }} 48 | 49 |
50 |
51 | 52 | 53 | -------------------------------------------------------------------------------- /caffe/docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /caffe/docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /caffe/docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/.gitignore -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /caffe/docs/tutorial/fig/logreg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/docs/tutorial/fig/logreg.jpg -------------------------------------------------------------------------------- /caffe/examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Examples ) 2 | 3 | file(GLOB_RECURSE EXAMPLES_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 4 | 5 | foreach(source ${EXAMPLES_SOURCES}) 6 | # get file name 7 | get_filename_component(name ${source} NAME_WE) 8 | 9 | #get folder name 10 | get_filename_component(path ${source} PATH) 11 | get_filename_component(folder ${path} NAME_WE) 12 | 13 | add_executable(${name} ${source}) 14 | target_link_libraries(${name} caffe) 15 | set_target_properties(${name} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${folder}) 16 | endforeach(source) 17 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_full_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 60000 22 | # snapshot intermediate results 23 | snapshot: 10000 24 | snapshot_prefix: "examples/cifar10/cifar10_full" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_full_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.0001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 65000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "examples/cifar10/cifar10_full" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_full_solver_lr2.prototxt: -------------------------------------------------------------------------------- 1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10 2 | # then another factor of 10 after 10 more epochs (5000 iters) 3 | 4 | # The train/test net protocol buffer definition 5 | net: "examples/cifar10/cifar10_full_train_test.prototxt" 6 | # test_iter specifies how many forward passes the test should carry out. 7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations, 8 | # covering the full 10,000 testing images. 9 | test_iter: 100 10 | # Carry out testing every 1000 training iterations. 11 | test_interval: 1000 12 | # The base learning rate, momentum and the weight decay of the network. 13 | base_lr: 0.00001 14 | momentum: 0.9 15 | weight_decay: 0.004 16 | # The learning rate policy 17 | lr_policy: "fixed" 18 | # Display every 200 iterations 19 | display: 200 20 | # The maximum number of iterations 21 | max_iter: 70000 22 | # snapshot intermediate results 23 | snapshot: 5000 24 | snapshot_prefix: "examples/cifar10/cifar10_full" 25 | # solver mode: CPU or GPU 26 | solver_mode: GPU 27 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_quick.prototxt: -------------------------------------------------------------------------------- 1 | name: "CIFAR10_quick_test" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 32 6 | input_dim: 32 7 | layers { 8 | name: "conv1" 9 | type: CONVOLUTION 10 | bottom: "data" 11 | top: "conv1" 12 | blobs_lr: 1 13 | blobs_lr: 2 14 | convolution_param { 15 | num_output: 32 16 | pad: 2 17 | kernel_size: 5 18 | stride: 1 19 | } 20 | } 21 | layers { 22 | name: "pool1" 23 | type: POOLING 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 3 29 | stride: 2 30 | } 31 | } 32 | layers { 33 | name: "relu1" 34 | type: RELU 35 | bottom: "pool1" 36 | top: "pool1" 37 | } 38 | layers { 39 | name: "conv2" 40 | type: CONVOLUTION 41 | bottom: "pool1" 42 | top: "conv2" 43 | blobs_lr: 1 44 | blobs_lr: 2 45 | convolution_param { 46 | num_output: 32 47 | pad: 2 48 | kernel_size: 5 49 | stride: 1 50 | } 51 | } 52 | layers { 53 | name: "relu2" 54 | type: RELU 55 | bottom: "conv2" 56 | top: "conv2" 57 | } 58 | layers { 59 | name: "pool2" 60 | type: POOLING 61 | bottom: "conv2" 62 | top: "pool2" 63 | pooling_param { 64 | pool: AVE 65 | kernel_size: 3 66 | stride: 2 67 | } 68 | } 69 | layers { 70 | name: "conv3" 71 | type: CONVOLUTION 72 | bottom: "pool2" 73 | top: "conv3" 74 | blobs_lr: 1 75 | blobs_lr: 2 76 | convolution_param { 77 | num_output: 64 78 | pad: 2 79 | kernel_size: 5 80 | stride: 1 81 | } 82 | } 83 | layers { 84 | name: "relu3" 85 | type: RELU 86 | bottom: "conv3" 87 | top: "conv3" 88 | } 89 | layers { 90 | name: "pool3" 91 | type: POOLING 92 | bottom: "conv3" 93 | top: "pool3" 94 | pooling_param { 95 | pool: AVE 96 | kernel_size: 3 97 | stride: 2 98 | } 99 | } 100 | layers { 101 | name: "ip1" 102 | type: INNER_PRODUCT 103 | bottom: "pool3" 104 | top: "ip1" 105 | blobs_lr: 1 106 | blobs_lr: 2 107 | inner_product_param { 108 | num_output: 64 109 | } 110 | } 111 | layers { 112 | name: "ip2" 113 | type: INNER_PRODUCT 114 | bottom: "ip1" 115 | top: "ip2" 116 | blobs_lr: 1 117 | blobs_lr: 2 118 | inner_product_param { 119 | num_output: 10 120 | } 121 | } 122 | layers { 123 | name: "prob" 124 | type: SOFTMAX 125 | bottom: "ip2" 126 | top: "prob" 127 | } 128 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 4000 21 | # snapshot intermediate results 22 | snapshot: 4000 23 | snapshot_prefix: "examples/cifar10/cifar10_quick" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/cifar10_quick_solver_lr1.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | test_iter: 100 9 | # Carry out testing every 500 training iterations. 10 | test_interval: 500 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 0.0001 13 | momentum: 0.9 14 | weight_decay: 0.004 15 | # The learning rate policy 16 | lr_policy: "fixed" 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 5000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/cifar10/cifar10_quick" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/create_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the cifar data into leveldb format. 3 | 4 | EXAMPLE=examples/cifar10 5 | DATA=data/cifar10 6 | 7 | echo "Creating leveldb..." 8 | 9 | rm -rf $EXAMPLE/cifar10_train_leveldb $EXAMPLE/cifar10_test_leveldb 10 | 11 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE 12 | 13 | echo "Computing image mean..." 14 | 15 | ./build/tools/compute_image_mean $EXAMPLE/cifar10_train_leveldb \ 16 | $EXAMPLE/mean.binaryproto leveldb 17 | 18 | echo "Done." 19 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/train_full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_full_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate 12 | 13 | # reduce learning rate by factor of 10 14 | $TOOLS/caffe train \ 15 | --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \ 16 | --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate 17 | -------------------------------------------------------------------------------- /caffe/examples/cifar10/train_quick.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train \ 6 | --solver=examples/cifar10/cifar10_quick_solver.prototxt 7 | 8 | # reduce learning rate by factor of 10 after 8 epochs 9 | $TOOLS/caffe train \ 10 | --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ 11 | --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate 12 | -------------------------------------------------------------------------------- /caffe/examples/finetune_flickr_style/flickr_style.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/examples/finetune_flickr_style/flickr_style.csv.gz -------------------------------------------------------------------------------- /caffe/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt" 2 | test_iter: 100 3 | test_interval: 1000 4 | base_lr: 0.001 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 20000 8 | display: 20 9 | max_iter: 100000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune" 14 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/hdf5_classification/train_val.prototxt" 2 | test_iter: 1000 3 | test_interval: 1000 4 | base_lr: 0.01 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 5000 8 | display: 1000 9 | max_iter: 10000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/hdf5_classification/data/train" 14 | solver_mode: CPU 15 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/solver2.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/hdf5_classification/train_val2.prototxt" 2 | test_iter: 1000 3 | test_interval: 1000 4 | base_lr: 0.01 5 | lr_policy: "step" 6 | gamma: 0.1 7 | stepsize: 5000 8 | display: 1000 9 | max_iter: 10000 10 | momentum: 0.9 11 | weight_decay: 0.0005 12 | snapshot: 10000 13 | snapshot_prefix: "examples/hdf5_classification/data/train" 14 | solver_mode: CPU 15 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layers { 3 | name: "data" 4 | type: HDF5_DATA 5 | top: "data" 6 | top: "label" 7 | hdf5_data_param { 8 | source: "examples/hdf5_classification/data/train.txt" 9 | batch_size: 10 10 | } 11 | include: { phase: TRAIN } 12 | } 13 | layers { 14 | name: "data" 15 | type: HDF5_DATA 16 | top: "data" 17 | top: "label" 18 | hdf5_data_param { 19 | source: "examples/hdf5_classification/data/test.txt" 20 | batch_size: 10 21 | } 22 | include: { phase: TEST } 23 | } 24 | layers { 25 | name: "fc1" 26 | type: INNER_PRODUCT 27 | bottom: "data" 28 | top: "fc1" 29 | blobs_lr: 1 30 | blobs_lr: 2 31 | weight_decay: 1 32 | weight_decay: 0 33 | inner_product_param { 34 | num_output: 2 35 | weight_filler { 36 | type: "gaussian" 37 | std: 0.01 38 | } 39 | bias_filler { 40 | type: "constant" 41 | value: 0 42 | } 43 | } 44 | } 45 | layers { 46 | name: "loss" 47 | type: SOFTMAX_LOSS 48 | bottom: "fc1" 49 | bottom: "label" 50 | top: "loss" 51 | } 52 | layers { 53 | name: "accuracy" 54 | type: ACCURACY 55 | bottom: "fc1" 56 | bottom: "label" 57 | top: "accuracy" 58 | include: { phase: TEST } 59 | } 60 | -------------------------------------------------------------------------------- /caffe/examples/hdf5_classification/train_val2.prototxt: -------------------------------------------------------------------------------- 1 | name: "LogisticRegressionNet" 2 | layers { 3 | name: "data" 4 | type: HDF5_DATA 5 | top: "data" 6 | top: "label" 7 | hdf5_data_param { 8 | source: "examples/hdf5_classification/data/train.txt" 9 | batch_size: 10 10 | } 11 | include: { phase: TRAIN } 12 | } 13 | layers { 14 | name: "data" 15 | type: HDF5_DATA 16 | top: "data" 17 | top: "label" 18 | hdf5_data_param { 19 | source: "examples/hdf5_classification/data/test.txt" 20 | batch_size: 10 21 | } 22 | include: { phase: TEST } 23 | } 24 | layers { 25 | name: "fc1" 26 | type: INNER_PRODUCT 27 | bottom: "data" 28 | top: "fc1" 29 | blobs_lr: 1 30 | blobs_lr: 2 31 | weight_decay: 1 32 | weight_decay: 0 33 | inner_product_param { 34 | num_output: 40 35 | weight_filler { 36 | type: "gaussian" 37 | std: 0.01 38 | } 39 | bias_filler { 40 | type: "constant" 41 | value: 0 42 | } 43 | } 44 | } 45 | layers { 46 | name: "relu1" 47 | type: RELU 48 | bottom: "fc1" 49 | top: "fc1" 50 | } 51 | layers { 52 | name: "fc2" 53 | type: INNER_PRODUCT 54 | bottom: "fc1" 55 | top: "fc2" 56 | blobs_lr: 1 57 | blobs_lr: 2 58 | weight_decay: 1 59 | weight_decay: 0 60 | inner_product_param { 61 | num_output: 2 62 | weight_filler { 63 | type: "gaussian" 64 | std: 0.01 65 | } 66 | bias_filler { 67 | type: "constant" 68 | value: 0 69 | } 70 | } 71 | } 72 | layers { 73 | name: "loss" 74 | type: SOFTMAX_LOSS 75 | bottom: "fc2" 76 | bottom: "label" 77 | top: "loss" 78 | } 79 | layers { 80 | name: "accuracy" 81 | type: ACCURACY 82 | bottom: "fc2" 83 | bottom: "label" 84 | top: "accuracy" 85 | include: { phase: TEST } 86 | } 87 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/create_imagenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Create the imagenet lmdb inputs 3 | # N.B. set the path to the imagenet train + val data dirs 4 | 5 | EXAMPLE=examples/imagenet 6 | DATA=data/ilsvrc12 7 | TOOLS=build/tools 8 | 9 | TRAIN_DATA_ROOT=/path/to/imagenet/train/ 10 | VAL_DATA_ROOT=/path/to/imagenet/val/ 11 | 12 | # Set RESIZE=true to resize the images to 256x256. Leave as false if images have 13 | # already been resized using another tool. 14 | RESIZE=false 15 | if $RESIZE; then 16 | RESIZE_HEIGHT=256 17 | RESIZE_WIDTH=256 18 | else 19 | RESIZE_HEIGHT=0 20 | RESIZE_WIDTH=0 21 | fi 22 | 23 | if [ ! -d "$TRAIN_DATA_ROOT" ]; then 24 | echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT" 25 | echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" \ 26 | "where the ImageNet training data is stored." 27 | exit 1 28 | fi 29 | 30 | if [ ! -d "$VAL_DATA_ROOT" ]; then 31 | echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT" 32 | echo "Set the VAL_DATA_ROOT variable in create_imagenet.sh to the path" \ 33 | "where the ImageNet validation data is stored." 34 | exit 1 35 | fi 36 | 37 | echo "Creating train lmdb..." 38 | 39 | GLOG_logtostderr=1 $TOOLS/convert_imageset \ 40 | --resize_height=$RESIZE_HEIGHT \ 41 | --resize_width=$RESIZE_WIDTH \ 42 | --shuffle \ 43 | $TRAIN_DATA_ROOT \ 44 | $DATA/train.txt \ 45 | $EXAMPLE/ilsvrc12_train_lmdb 46 | 47 | echo "Creating val lmdb..." 48 | 49 | GLOG_logtostderr=1 $TOOLS/convert_imageset \ 50 | --resize_height=$RESIZE_HEIGHT \ 51 | --resize_width=$RESIZE_WIDTH \ 52 | --shuffle \ 53 | $VAL_DATA_ROOT \ 54 | $DATA/val.txt \ 55 | $EXAMPLE/ilsvrc12_val_lmdb 56 | 57 | echo "Done." 58 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/make_imagenet_mean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # Compute the mean image from the imagenet training leveldb 3 | # N.B. this is available in data/ilsvrc12 4 | 5 | ./build/tools/compute_image_mean examples/imagenet/ilsvrc12_train_leveldb \ 6 | data/ilsvrc12/imagenet_mean.binaryproto 7 | 8 | echo "Done." 9 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/resume_training.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt \ 5 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate 6 | -------------------------------------------------------------------------------- /caffe/examples/imagenet/train_caffenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=models/bvlc_reference_caffenet/solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe/examples/images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/examples/images/cat.jpg -------------------------------------------------------------------------------- /caffe/examples/images/fish-bike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/examples/images/fish-bike.jpg -------------------------------------------------------------------------------- /caffe/examples/mnist/create_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into lmdb/leveldb format, 3 | # depending on the value assigned to $BACKEND. 4 | 5 | EXAMPLE=examples/mnist 6 | DATA=data/mnist 7 | BUILD=build/examples/mnist 8 | 9 | BACKEND="lmdb" 10 | 11 | echo "Creating ${BACKEND}..." 12 | 13 | rm -rf $EXAMPLE/mnist_train_${BACKEND} 14 | rm -rf $EXAMPLE/mnist_test_${BACKEND} 15 | 16 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \ 17 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND} 18 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \ 19 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND} 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet.prototxt: -------------------------------------------------------------------------------- 1 | name: "LeNet" 2 | input: "data" 3 | input_dim: 64 4 | input_dim: 1 5 | input_dim: 28 6 | input_dim: 28 7 | layers { 8 | name: "conv1" 9 | type: CONVOLUTION 10 | bottom: "data" 11 | top: "conv1" 12 | blobs_lr: 1 13 | blobs_lr: 2 14 | convolution_param { 15 | num_output: 20 16 | kernel_size: 5 17 | stride: 1 18 | weight_filler { 19 | type: "xavier" 20 | } 21 | bias_filler { 22 | type: "constant" 23 | } 24 | } 25 | } 26 | layers { 27 | name: "pool1" 28 | type: POOLING 29 | bottom: "conv1" 30 | top: "pool1" 31 | pooling_param { 32 | pool: MAX 33 | kernel_size: 2 34 | stride: 2 35 | } 36 | } 37 | layers { 38 | name: "conv2" 39 | type: CONVOLUTION 40 | bottom: "pool1" 41 | top: "conv2" 42 | blobs_lr: 1 43 | blobs_lr: 2 44 | convolution_param { 45 | num_output: 50 46 | kernel_size: 5 47 | stride: 1 48 | weight_filler { 49 | type: "xavier" 50 | } 51 | bias_filler { 52 | type: "constant" 53 | } 54 | } 55 | } 56 | layers { 57 | name: "pool2" 58 | type: POOLING 59 | bottom: "conv2" 60 | top: "pool2" 61 | pooling_param { 62 | pool: MAX 63 | kernel_size: 2 64 | stride: 2 65 | } 66 | } 67 | layers { 68 | name: "ip1" 69 | type: INNER_PRODUCT 70 | bottom: "pool2" 71 | top: "ip1" 72 | blobs_lr: 1 73 | blobs_lr: 2 74 | inner_product_param { 75 | num_output: 500 76 | weight_filler { 77 | type: "xavier" 78 | } 79 | bias_filler { 80 | type: "constant" 81 | } 82 | } 83 | } 84 | layers { 85 | name: "relu1" 86 | type: RELU 87 | bottom: "ip1" 88 | top: "ip1" 89 | } 90 | layers { 91 | name: "ip2" 92 | type: INNER_PRODUCT 93 | bottom: "ip1" 94 | top: "ip2" 95 | blobs_lr: 1 96 | blobs_lr: 2 97 | inner_product_param { 98 | num_output: 10 99 | weight_filler { 100 | type: "xavier" 101 | } 102 | bias_filler { 103 | type: "constant" 104 | } 105 | } 106 | } 107 | layers { 108 | name: "prob" 109 | type: SOFTMAX 110 | bottom: "ip2" 111 | top: "prob" 112 | } 113 | -------------------------------------------------------------------------------- /caffe/examples/mnist/lenet_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/mnist/lenet_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0005 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 10000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/mnist/lenet" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder" 17 | momentum: 0.9 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "fixed" 10 | display: 100 11 | max_iter: 65000 12 | weight_decay: 0.0005 13 | snapshot: 10000 14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train" 15 | # solver mode: CPU or GPU 16 | solver_mode: GPU 17 | solver_type: ADAGRAD 18 | -------------------------------------------------------------------------------- /caffe/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt: -------------------------------------------------------------------------------- 1 | net: "examples/mnist/mnist_autoencoder.prototxt" 2 | test_state: { stage: 'test-on-train' } 3 | test_iter: 500 4 | test_state: { stage: 'test-on-test' } 5 | test_iter: 100 6 | test_interval: 500 7 | test_compute_loss: true 8 | base_lr: 0.01 9 | lr_policy: "step" 10 | gamma: 0.1 11 | stepsize: 10000 12 | display: 100 13 | max_iter: 65000 14 | weight_decay: 0.0005 15 | snapshot: 10000 16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train" 17 | momentum: 0.95 18 | # solver mode: CPU or GPU 19 | solver_mode: GPU 20 | solver_type: NESTEROV 21 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt 4 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_lenet_consolidated.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/lenet_consolidated_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt 5 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder_adagrad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt 5 | -------------------------------------------------------------------------------- /caffe/examples/mnist/train_mnist_autoencoder_nesterov.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./build/tools/caffe train \ 4 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt 5 | -------------------------------------------------------------------------------- /caffe/examples/siamese/create_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # This script converts the mnist data into leveldb format. 3 | 4 | EXAMPLES=./build/examples/siamese 5 | DATA=./data/mnist 6 | 7 | echo "Creating leveldb..." 8 | 9 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb 10 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb 11 | 12 | $EXAMPLES/convert_mnist_siamese_data.bin \ 13 | $DATA/train-images-idx3-ubyte \ 14 | $DATA/train-labels-idx1-ubyte \ 15 | ./examples/siamese/mnist_siamese_train_leveldb 16 | $EXAMPLES/convert_mnist_siamese_data.bin \ 17 | $DATA/t10k-images-idx3-ubyte \ 18 | $DATA/t10k-labels-idx1-ubyte \ 19 | ./examples/siamese/mnist_siamese_test_leveldb 20 | 21 | echo "Done." 22 | -------------------------------------------------------------------------------- /caffe/examples/siamese/mnist_siamese.prototxt: -------------------------------------------------------------------------------- 1 | name: "mnist_siamese" 2 | input: "data" 3 | input_dim: 10000 4 | input_dim: 1 5 | input_dim: 28 6 | input_dim: 28 7 | 8 | layers { 9 | name: "conv1" 10 | type: CONVOLUTION 11 | bottom: "data" 12 | top: "conv1" 13 | blobs_lr: 1 14 | blobs_lr: 2 15 | convolution_param { 16 | num_output: 20 17 | kernel_size: 5 18 | stride: 1 19 | } 20 | } 21 | layers { 22 | name: "pool1" 23 | type: POOLING 24 | bottom: "conv1" 25 | top: "pool1" 26 | pooling_param { 27 | pool: MAX 28 | kernel_size: 2 29 | stride: 2 30 | } 31 | } 32 | layers { 33 | name: "conv2" 34 | type: CONVOLUTION 35 | bottom: "pool1" 36 | top: "conv2" 37 | blobs_lr: 1 38 | blobs_lr: 2 39 | convolution_param { 40 | num_output: 50 41 | kernel_size: 5 42 | stride: 1 43 | } 44 | } 45 | layers { 46 | name: "pool2" 47 | type: POOLING 48 | bottom: "conv2" 49 | top: "pool2" 50 | pooling_param { 51 | pool: MAX 52 | kernel_size: 2 53 | stride: 2 54 | } 55 | } 56 | layers { 57 | name: "ip1" 58 | type: INNER_PRODUCT 59 | bottom: "pool2" 60 | top: "ip1" 61 | blobs_lr: 1 62 | blobs_lr: 2 63 | inner_product_param { 64 | num_output: 500 65 | } 66 | } 67 | layers { 68 | name: "relu1" 69 | type: RELU 70 | bottom: "ip1" 71 | top: "ip1" 72 | } 73 | layers { 74 | name: "ip2" 75 | type: INNER_PRODUCT 76 | bottom: "ip1" 77 | top: "ip2" 78 | blobs_lr: 1 79 | blobs_lr: 2 80 | inner_product_param { 81 | num_output: 10 82 | } 83 | } 84 | 85 | layers { 86 | name: "feat" 87 | type: INNER_PRODUCT 88 | bottom: "ip2" 89 | top: "feat" 90 | blobs_lr: 1 91 | blobs_lr: 2 92 | inner_product_param { 93 | num_output: 2 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /caffe/examples/siamese/mnist_siamese_solver.prototxt: -------------------------------------------------------------------------------- 1 | # The train/test net protocol buffer definition 2 | net: "examples/siamese/mnist_siamese_train_test.prototxt" 3 | # test_iter specifies how many forward passes the test should carry out. 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 5 | # covering the full 10,000 testing images. 6 | test_iter: 100 7 | # Carry out testing every 500 training iterations. 8 | test_interval: 500 9 | # The base learning rate, momentum and the weight decay of the network. 10 | base_lr: 0.01 11 | momentum: 0.9 12 | weight_decay: 0.0000 13 | # The learning rate policy 14 | lr_policy: "inv" 15 | gamma: 0.0001 16 | power: 0.75 17 | # Display every 100 iterations 18 | display: 100 19 | # The maximum number of iterations 20 | max_iter: 50000 21 | # snapshot intermediate results 22 | snapshot: 5000 23 | snapshot_prefix: "examples/siamese/mnist_siamese" 24 | # solver mode: CPU or GPU 25 | solver_mode: GPU 26 | -------------------------------------------------------------------------------- /caffe/examples/siamese/train_mnist_siamese.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | TOOLS=./build/tools 4 | 5 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt 6 | -------------------------------------------------------------------------------- /caffe/examples/web_demo/exifutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script handles the skimage exif problem. 3 | """ 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | ORIENTATIONS = { # used in apply_orientation 9 | 2: (Image.FLIP_LEFT_RIGHT,), 10 | 3: (Image.ROTATE_180,), 11 | 4: (Image.FLIP_TOP_BOTTOM,), 12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 13 | 6: (Image.ROTATE_270,), 14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 15 | 8: (Image.ROTATE_90,) 16 | } 17 | 18 | 19 | def open_oriented_im(im_path): 20 | im = Image.open(im_path) 21 | if hasattr(im, '_getexif'): 22 | exif = im._getexif() 23 | if exif is not None and 274 in exif: 24 | orientation = exif[274] 25 | im = apply_orientation(im, orientation) 26 | img = np.asarray(im).astype(np.float32) / 255. 27 | if img.ndim == 2: 28 | img = img[:, :, np.newaxis] 29 | img = np.tile(img, (1, 1, 3)) 30 | elif img.shape[2] == 4: 31 | img = img[:, :, :3] 32 | return img 33 | 34 | 35 | def apply_orientation(im, orientation): 36 | if orientation in ORIENTATIONS: 37 | for method in ORIENTATIONS[orientation]: 38 | im = im.transpose(method) 39 | return im 40 | -------------------------------------------------------------------------------- /caffe/examples/web_demo/readme.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Web demo 3 | description: Image classification demo running as a Flask web server. 4 | category: example 5 | include_in_docs: true 6 | priority: 10 7 | --- 8 | 9 | # Web Demo 10 | 11 | ## Requirements 12 | 13 | The demo server requires Python with some dependencies. 14 | To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](/installation.html)). 15 | 16 | Make sure that you have obtained the Reference CaffeNet Model and the ImageNet Auxiliary Data: 17 | 18 | ./scripts/download_model_binary.py models/bvlc_reference_caffenet 19 | ./data/ilsvrc12/get_ilsvrc_aux.sh 20 | 21 | NOTE: if you run into trouble, try re-downloading the auxiliary files. 22 | 23 | ## Run 24 | 25 | Running `python examples/web_demo/app.py` will bring up the demo server, accessible at `http://0.0.0.0:5000`. 26 | You can enable debug mode of the web server, or switch to a different port: 27 | 28 | % python examples/web_demo/app.py -h 29 | Usage: app.py [options] 30 | 31 | Options: 32 | -h, --help show this help message and exit 33 | -d, --debug enable debug mode 34 | -p PORT, --port=PORT which port to serve content on 35 | 36 | ## How are the "maximally accurate" results generated? 37 | 38 | In a nutshell: ImageNet predictions are made at the leaf nodes, but the organization of the project allows leaf nodes to be united via more general parent nodes, with 'entity' at the very top. 39 | To give "maximally accurate" results, we "back off" from maximally specific predictions to maintain a high accuracy. 40 | The `bet_file` that is loaded in the demo provides the graph structure and names of all relevant ImageNet nodes as well as measures of information gain between them. 41 | Please see the "Hedging your bets" paper from [CVPR 2012](http://www.image-net.org/projects/hedging/) for further information. 42 | -------------------------------------------------------------------------------- /caffe/examples/web_demo/requirements.txt: -------------------------------------------------------------------------------- 1 | werkzeug 2 | flask 3 | tornado 4 | numpy 5 | pandas 6 | pillow 7 | -------------------------------------------------------------------------------- /caffe/include/caffe/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/include/caffe/.DS_Store -------------------------------------------------------------------------------- /caffe/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/net.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/solver.hpp" 14 | #include "caffe/util/benchmark.hpp" 15 | #include "caffe/util/io.hpp" 16 | #include "caffe/vision_layers.hpp" 17 | 18 | #endif // CAFFE_CAFFE_HPP_ 19 | -------------------------------------------------------------------------------- /caffe/include/caffe/data_transformer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_TRANSFORMER_HPP 2 | #define CAFFE_DATA_TRANSFORMER_HPP 3 | 4 | #include "caffe/common.hpp" 5 | #include "caffe/proto/caffe.pb.h" 6 | 7 | namespace caffe { 8 | 9 | /** 10 | * @brief Applies common transformations to the input data, such as 11 | * scaling, mirroring, substracting the image mean... 12 | */ 13 | template 14 | class DataTransformer { 15 | public: 16 | explicit DataTransformer(const TransformationParameter& param) 17 | : param_(param) { 18 | phase_ = Caffe::phase(); 19 | } 20 | virtual ~DataTransformer() {} 21 | 22 | void InitRand(); 23 | 24 | /** 25 | * @brief Applies the transformation defined in the data layer's 26 | * transform_param block to the data. 27 | * 28 | * @param batch_item_id 29 | * Datum position within the batch. This is used to compute the 30 | * writing position in the top blob's data 31 | * @param datum 32 | * Datum containing the data to be transformed. 33 | * @param mean 34 | * @param transformed_data 35 | * This is meant to be the top blob's data. The transformed data will be 36 | * written at the appropriate place within the blob's data. 37 | */ 38 | void Transform(const int batch_item_id, const Datum& datum, 39 | const Dtype* mean, Dtype* transformed_data, 40 | int *h_off = NULL, int *w_off = NULL, 41 | bool *mirrored = NULL); 42 | 43 | protected: 44 | virtual unsigned int Rand(); 45 | 46 | // Tranformation parameters 47 | TransformationParameter param_; 48 | 49 | 50 | shared_ptr rng_; 51 | Caffe::Phase phase_; 52 | }; 53 | 54 | } // namespace caffe 55 | 56 | #endif // CAFFE_DATA_TRANSFORMER_HPP_ 57 | 58 | -------------------------------------------------------------------------------- /caffe/include/caffe/internal_thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_ 2 | #define CAFFE_INTERNAL_THREAD_HPP_ 3 | 4 | #include "caffe/common.hpp" 5 | 6 | namespace caffe { 7 | 8 | /** 9 | * A minimal wrapper for boost::thread to force host compilation for boost 10 | * Defined in caffe/util/thread.hpp 11 | */ 12 | class Thread { 13 | public: 14 | template 15 | Thread(Callable func, A1 a1); 16 | void join(); 17 | bool joinable(); 18 | private: 19 | void* thread_; 20 | }; 21 | 22 | /** 23 | * Virtual class encapsulate boost::thread for use in base class 24 | * The child class will acquire the ability to run a single thread, 25 | * by reimplementing the virutal function InternalThreadEntry. 26 | */ 27 | class InternalThread { 28 | public: 29 | InternalThread() : thread_(NULL) {} 30 | virtual ~InternalThread(); 31 | 32 | /** Returns true if the thread was successfully started. **/ 33 | bool StartInternalThread(); 34 | 35 | /** Will not return until the internal thread has exited. */ 36 | bool WaitForInternalThreadToExit(); 37 | 38 | bool is_started() const { return thread_ != NULL && thread_->joinable(); } 39 | 40 | protected: 41 | /* Implement this method in your subclass 42 | with the code you want your thread to run. */ 43 | virtual void InternalThreadEntry() {} 44 | 45 | caffe::Thread* thread_; 46 | }; 47 | 48 | } // namespace caffe 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /caffe/include/caffe/syncedmem.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SYNCEDMEM_HPP_ 2 | #define CAFFE_SYNCEDMEM_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | 9 | namespace caffe { 10 | 11 | // Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the 12 | // cudaMallocHost and cudaFree functions in order to create pinned memory. 13 | // However, those codes rely on the existence of a cuda GPU (I don't know 14 | // why that is a must since allocating memory should not be accessing the 15 | // GPU resorce, but it just creates an error as of Cuda 5.0) and will cause 16 | // problem when running on a machine without GPU. Thus, we simply define 17 | // these two functions for safety and possible future change if the problem 18 | // of calling cuda functions disappears in a future version. 19 | // 20 | // In practice, although we are creating unpinned memory here, as long as we 21 | // are constantly accessing them the memory pages almost always stays in 22 | // the physical memory (assuming we have large enough memory installed), and 23 | // does not seem to create a memory bottleneck here. 24 | 25 | inline void CaffeMallocHost(void** ptr, size_t size) { 26 | *ptr = malloc(size); 27 | } 28 | 29 | inline void CaffeFreeHost(void* ptr) { 30 | free(ptr); 31 | } 32 | 33 | 34 | /** 35 | * @brief Manages memory allocation and synchronization between the host (CPU) 36 | * and device (GPU). 37 | * 38 | * TODO(dox): more thorough description. 39 | */ 40 | class SyncedMemory { 41 | public: 42 | SyncedMemory() 43 | : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), 44 | own_cpu_data_(false) {} 45 | explicit SyncedMemory(size_t size) 46 | : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), 47 | own_cpu_data_(false) {} 48 | ~SyncedMemory(); 49 | const void* cpu_data(); 50 | void set_cpu_data(void* data); 51 | const void* gpu_data(); 52 | void* mutable_cpu_data(); 53 | void* mutable_gpu_data(); 54 | enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; 55 | SyncedHead head() { return head_; } 56 | size_t size() { return size_; } 57 | 58 | private: 59 | void to_cpu(); 60 | void to_gpu(); 61 | void* cpu_ptr_; 62 | void* gpu_ptr_; 63 | size_t size_; 64 | SyncedHead head_; 65 | bool own_cpu_data_; 66 | 67 | DISABLE_COPY_AND_ASSIGN(SyncedMemory); 68 | }; // class SyncedMemory 69 | 70 | } // namespace caffe 71 | 72 | #endif // CAFFE_SYNCEDMEM_HPP_ 73 | -------------------------------------------------------------------------------- /caffe/include/caffe/test/test_caffe_main.hpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | #ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 4 | #define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | 12 | #include "caffe/common.hpp" 13 | 14 | using std::cout; 15 | using std::endl; 16 | 17 | #ifdef CMAKE_BUILD 18 | #include 19 | #else 20 | #define CUDA_TEST_DEVICE -1 21 | #define CMAKE_SOURCE_DIR "src/" 22 | #define EXAMPLES_SOURCE_DIR "examples/" 23 | #define CMAKE_EXT "" 24 | #endif 25 | 26 | int main(int argc, char** argv); 27 | 28 | namespace caffe { 29 | 30 | template 31 | class MultiDeviceTest : public ::testing::Test { 32 | public: 33 | typedef typename TypeParam::Dtype Dtype; 34 | protected: 35 | MultiDeviceTest() { 36 | Caffe::set_mode(TypeParam::device); 37 | } 38 | virtual ~MultiDeviceTest() {} 39 | }; 40 | 41 | typedef ::testing::Types TestDtypes; 42 | 43 | struct FloatCPU { 44 | typedef float Dtype; 45 | static const Caffe::Brew device = Caffe::CPU; 46 | }; 47 | 48 | struct DoubleCPU { 49 | typedef double Dtype; 50 | static const Caffe::Brew device = Caffe::CPU; 51 | }; 52 | 53 | #ifdef CPU_ONLY 54 | 55 | typedef ::testing::Types TestDtypesAndDevices; 56 | 57 | #else 58 | 59 | struct FloatGPU { 60 | typedef float Dtype; 61 | static const Caffe::Brew device = Caffe::GPU; 62 | }; 63 | 64 | struct DoubleGPU { 65 | typedef double Dtype; 66 | static const Caffe::Brew device = Caffe::GPU; 67 | }; 68 | 69 | typedef ::testing::Types 70 | TestDtypesAndDevices; 71 | 72 | #endif 73 | 74 | } // namespace caffe 75 | 76 | #endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 77 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | void Start(); 15 | void Stop(); 16 | float MilliSeconds(); 17 | float Seconds(); 18 | 19 | inline bool initted() { return initted_; } 20 | inline bool running() { return running_; } 21 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 22 | 23 | protected: 24 | void Init(); 25 | 26 | bool initted_; 27 | bool running_; 28 | bool has_run_at_least_once_; 29 | #ifndef CPU_ONLY 30 | cudaEvent_t start_gpu_; 31 | cudaEvent_t stop_gpu_; 32 | #endif 33 | boost::posix_time::ptime start_cpu_; 34 | boost::posix_time::ptime stop_cpu_; 35 | float elapsed_milliseconds_; 36 | }; 37 | 38 | } // namespace caffe 39 | 40 | #endif // CAFFE_UTIL_BENCHMARK_H_ 41 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/im2col.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_IM2COL_HPP_ 2 | #define _CAFFE_UTIL_IM2COL_HPP_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | void im2col_cpu(const Dtype* data_im, const int channels, 8 | const int height, const int width, const int kernel_h, const int kernel_w, 9 | const int pad_h, const int pad_w, const int stride_h, 10 | const int stride_w, Dtype* data_col); 11 | 12 | template 13 | void col2im_cpu(const Dtype* data_col, const int channels, 14 | const int height, const int width, const int patch_h, const int patch_w, 15 | const int pad_h, const int pad_w, const int stride_h, 16 | const int stride_w, Dtype* data_im); 17 | 18 | template 19 | void im2col_gpu(const Dtype* data_im, const int channels, 20 | const int height, const int width, const int kernel_h, const int kernel_w, 21 | const int pad_h, const int pad_w, const int stride_h, 22 | const int stride_w, Dtype* data_col); 23 | 24 | template 25 | void col2im_gpu(const Dtype* data_col, const int channels, 26 | const int height, const int width, const int patch_h, const int patch_w, 27 | const int pad_h, const int pad_w, const int stride_h, 28 | const int stride_w, Dtype* data_im); 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_UTIL_IM2COL_HPP_ 33 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_THREAD_CPP_HPP_ 2 | #define CAFFE_THREAD_CPP_HPP_ 3 | 4 | #include 5 | #include "caffe/common.hpp" 6 | #include "caffe/internal_thread.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | Thread::Thread(Callable func, A1 a1) { 12 | this->thread_ = new boost::thread(func, a1); 13 | } 14 | 15 | void Thread::join() { 16 | static_cast(this->thread_)->join(); 17 | } 18 | 19 | bool Thread::joinable() { 20 | return static_cast(this->thread_)->joinable(); 21 | } 22 | 23 | } // namespace caffe 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /caffe/include/caffe/util/upgrade_proto.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_UPGRADE_PROTO_H_ 2 | #define CAFFE_UTIL_UPGRADE_PROTO_H_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | #include "caffe/proto/caffe_pretty_print.pb.h" 8 | 9 | namespace caffe { 10 | 11 | // Return true iff any layer contains parameters specified using 12 | // deprecated V0LayerParameter. 13 | bool NetNeedsUpgrade(const NetParameter& net_param); 14 | 15 | // Perform all necessary transformations to upgrade a V0NetParameter into a 16 | // NetParameter (including upgrading padding layers and LayerParameters). 17 | bool UpgradeV0Net(const NetParameter& v0_net_param, NetParameter* net_param); 18 | 19 | // Upgrade NetParameter with padding layers to pad-aware conv layers. 20 | // For any padding layer, remove it and put its pad parameter in any layers 21 | // taking its top blob as input. 22 | // Error if any of these above layers are not-conv layers. 23 | void UpgradeV0PaddingLayers(const NetParameter& param, 24 | NetParameter* param_upgraded_pad); 25 | 26 | // Upgrade a single V0LayerConnection to the new LayerParameter format. 27 | bool UpgradeLayerParameter(const LayerParameter& v0_layer_connection, 28 | LayerParameter* layer_param); 29 | 30 | LayerParameter_LayerType UpgradeV0LayerType(const string& type); 31 | 32 | // Return true iff any layer contains deprecated data transformation parameters. 33 | bool NetNeedsDataUpgrade(const NetParameter& net_param); 34 | 35 | // Perform all necessary transformations to upgrade old transformation fields 36 | // into a TransformationParameter. 37 | void UpgradeNetDataTransformation(NetParameter* net_param); 38 | 39 | // Convert a NetParameter to NetParameterPrettyPrint used for dumping to 40 | // proto text files. 41 | void NetParameterToPrettyPrint(const NetParameter& param, 42 | NetParameterPrettyPrint* pretty_param); 43 | 44 | // Check for deprecations and upgrade the NetParameter as needed. 45 | void UpgradeNetAsNeeded(NetParameter* param); 46 | 47 | // Read parameters from a file into a NetParameter proto message. 48 | void ReadNetParamsFromTextFileOrDie(const string& param_file, 49 | NetParameter* param); 50 | void ReadNetParamsFromBinaryFileOrDie(const string& param_file, 51 | NetParameter* param); 52 | 53 | } // namespace caffe 54 | 55 | #endif // CAFFE_UTIL_UPGRADE_PROTO_H_ 56 | -------------------------------------------------------------------------------- /caffe/matlab/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Matlab ) -------------------------------------------------------------------------------- /caffe/matlab/caffe/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/matlab/caffe/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /caffe/matlab/caffe/matcaffe_init.m: -------------------------------------------------------------------------------- 1 | function matcaffe_init(use_gpu, model_def_file, model_file) 2 | % matcaffe_init(model_def_file, model_file, use_gpu) 3 | % Initilize matcaffe wrapper 4 | 5 | if nargin < 1 6 | % By default use CPU 7 | use_gpu = 0; 8 | end 9 | if nargin < 2 || isempty(model_def_file) 10 | % By default use imagenet_deploy 11 | model_def_file = '../../models/bvlc_reference_caffenet/deploy.prototxt'; 12 | end 13 | if nargin < 3 || isempty(model_file) 14 | % By default use caffe reference model 15 | model_file = '../../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'; 16 | end 17 | 18 | 19 | if caffe('is_initialized') == 0 20 | if exist(model_file, 'file') == 0 21 | % NOTE: you'll have to get the pre-trained ILSVRC network 22 | error('You need a network model file'); 23 | end 24 | if ~exist(model_def_file,'file') 25 | % NOTE: you'll have to get network definition 26 | error('You need the network prototxt definition'); 27 | end 28 | caffe('init', model_def_file, model_file) 29 | end 30 | fprintf('Done with init\n'); 31 | 32 | % set to use GPU or CPU 33 | if use_gpu 34 | fprintf('Using GPU Mode\n'); 35 | caffe('set_mode_gpu'); 36 | else 37 | fprintf('Using CPU Mode\n'); 38 | caffe('set_mode_cpu'); 39 | end 40 | fprintf('Done with set_mode\n'); 41 | 42 | % put into test mode 43 | caffe('set_phase_test'); 44 | fprintf('Done with set_phase_test\n'); 45 | -------------------------------------------------------------------------------- /caffe/matlab/caffe/prepare_batch.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------ 2 | function images = prepare_batch(image_files,IMAGE_MEAN,batch_size) 3 | % ------------------------------------------------------------------------ 4 | if nargin < 2 5 | d = load('ilsvrc_2012_mean'); 6 | IMAGE_MEAN = d.image_mean; 7 | end 8 | num_images = length(image_files); 9 | if nargin < 3 10 | batch_size = num_images; 11 | end 12 | 13 | IMAGE_DIM = 256; 14 | CROPPED_DIM = 227; 15 | indices = [0 IMAGE_DIM-CROPPED_DIM] + 1; 16 | center = floor(indices(2) / 2)+1; 17 | 18 | num_images = length(image_files); 19 | images = zeros(CROPPED_DIM,CROPPED_DIM,3,batch_size,'single'); 20 | 21 | parfor i=1:num_images 22 | % read file 23 | fprintf('%c Preparing %s\n',13,image_files{i}); 24 | try 25 | im = imread(image_files{i}); 26 | % resize to fixed input size 27 | im = single(im); 28 | im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); 29 | % Transform GRAY to RGB 30 | if size(im,3) == 1 31 | im = cat(3,im,im,im); 32 | end 33 | % permute from RGB to BGR (IMAGE_MEAN is already BGR) 34 | im = im(:,:,[3 2 1]) - IMAGE_MEAN; 35 | % Crop the center of the image 36 | images(:,:,:,i) = permute(im(center:center+CROPPED_DIM-1,... 37 | center:center+CROPPED_DIM-1,:),[2 1 3]); 38 | catch 39 | warning('Problems with file',image_files{i}); 40 | end 41 | end -------------------------------------------------------------------------------- /caffe/matlab/caffe/print_cell.m: -------------------------------------------------------------------------------- 1 | function res=print_cell(input,file,linesep,cellsep) 2 | assert(iscell(input),'The input should be a cell') 3 | if nargin < 4 4 | cellsep = '\t'; 5 | end 6 | if nargin < 3 7 | linesep = '\n'; 8 | end 9 | if exist('file','var') && ~isempty(file) 10 | %% 11 | fid = fopen(file,'w'); 12 | for l=1:length(input) 13 | if iscell(input{l}) 14 | for i=1:length(input{l}) 15 | fprintf(fid,['%s' cellsep],input{l}{i}); 16 | end 17 | fprintf(fid,linesep); 18 | else 19 | if size(input,2) > 1 20 | for i=1:size(input,2) 21 | fprintf(fid,'%s ',input{l,i}); 22 | end 23 | fprintf(fid,linesep); 24 | else 25 | fprintf(fid,['%s' linesep],input{l}); 26 | end 27 | end 28 | end 29 | fclose(fid); 30 | else 31 | res = ''; 32 | for l=1:length(input) 33 | if iscell(input{l}) 34 | for i=1:length(input{l}) 35 | res = [res sprintf([cellsep{1} '%s' cellsep{2}],input{l}{i})]; 36 | end 37 | res = [res sprintf(linesep)]; 38 | else 39 | res = [res sprintf(['%s' linesep],input{l}(:))]; 40 | end 41 | end 42 | end -------------------------------------------------------------------------------- /caffe/matlab/caffe/read_cell.m: -------------------------------------------------------------------------------- 1 | function res=read_cell(filename,linesep,cellsep) 2 | if nargin < 2, linesep='\n'; end 3 | if nargin < 3, cellsep = '\t'; end 4 | if exist(filename,'file') 5 | fid = fopen(filename); 6 | else 7 | % Assume that filename is either a file ide or a string 8 | fid = filename; 9 | end 10 | 11 | fileLines = textscan(fid,'%s','delimiter',linesep,'BufSize',100000); 12 | 13 | fileLines = fileLines{1}; 14 | 15 | if regexp(fileLines{1},cellsep,'once') 16 | fileLines = regexprep(fileLines,['^' cellsep '|' cellsep '$'],''); 17 | res = regexp(fileLines,cellsep,'split'); 18 | res = cell2matcell(res); 19 | else 20 | res = fileLines; 21 | end 22 | -------------------------------------------------------------------------------- /caffe/python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Python ) 2 | 3 | # Python 4 | find_package(PythonLibs REQUIRED) 5 | include_directories(${PYTHON_INCLUDE_DIRS}) 6 | 7 | # Boost.Python 8 | find_package(Boost 1.46 COMPONENTS python REQUIRED) 9 | include_directories(${Boost_INCLUDE_DIRS}) 10 | 11 | file(GLOB_RECURSE Python_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 12 | 13 | add_library(pycaffe SHARED ${Python_SOURCES}) 14 | target_link_libraries(pycaffe caffe ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) 15 | 16 | ### Install ################################################################################# 17 | 18 | install(DIRECTORY caffe DESTINATION python) 19 | install(FILES requirements.txt DESTINATION python) 20 | install(TARGETS pycaffe DESTINATION python/caffe) -------------------------------------------------------------------------------- /caffe/python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver 2 | from .classifier import Classifier 3 | from .detector import Detector 4 | import io 5 | -------------------------------------------------------------------------------- /caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /caffe/python/draw_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Draw a graph of the net architecture. 4 | """ 5 | import os 6 | from google.protobuf import text_format 7 | 8 | import caffe, caffe.draw 9 | from caffe.proto import caffe_pb2 10 | 11 | 12 | def main(argv): 13 | if len(argv) != 3: 14 | print 'Usage: %s input_net_proto_file output_image_file' % \ 15 | os.path.basename(sys.argv[0]) 16 | else: 17 | net = caffe_pb2.NetParameter() 18 | text_format.Merge(open(sys.argv[1]).read(), net) 19 | print 'Drawing net to %s' % sys.argv[2] 20 | caffe.draw.draw_net_to_file(net, sys.argv[2]) 21 | 22 | 23 | if __name__ == '__main__': 24 | import sys 25 | main(sys.argv) 26 | -------------------------------------------------------------------------------- /caffe/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | scikit-learn>=0.14.1 6 | matplotlib>=1.3.1 7 | ipython>=1.1.0 8 | h5py>=2.2.0 9 | leveldb>=0.191 10 | networkx>=1.8.1 11 | nose>=1.3.0 12 | pandas>=0.12.0 13 | python-dateutil>=1.4,<2 14 | protobuf>=2.5.0 15 | python-gflags>=2.0 16 | -------------------------------------------------------------------------------- /caffe/scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Generate developer docs. 16 | make docs 17 | 18 | # Display docs using web server. 19 | cd docs 20 | jekyll serve -w -s . -d _site --port=$PORT 21 | -------------------------------------------------------------------------------- /caffe/scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /caffe/scripts/deploy_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Publish documentation to the gh-pages site. 3 | 4 | # The remote for pushing the docs (defaults to origin). 5 | # This is where you will submit the PR to BVLC:gh-pages from. 6 | REMOTE=${1:-origin} 7 | 8 | echo "Generating docs and pushing to $REMOTE:gh-pages..." 9 | echo "To build and view docs when not on master, simply do 'jekyll serve -s docs'." 10 | echo 11 | 12 | REMOTE_URL=`git config --get remote.${REMOTE}.url` 13 | BRANCH=`git rev-parse --abbrev-ref HEAD` 14 | MSG=`git log --oneline -1` 15 | 16 | if [[ $BRANCH = 'master' ]]; then 17 | # Find the docs dir, no matter where the script is called 18 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 19 | DOCS_SITE_DIR=$DIR/../docs/_site 20 | 21 | # Make sure that docs/_site tracks remote:gh-pages. 22 | # If not, then we make a new repo and check out just that branch. 23 | mkdir -p $DOCS_SITE_DIR 24 | cd $DOCS_SITE_DIR 25 | SITE_REMOTE_URL=`git config --get remote.${REMOTE}.url` 26 | SITE_BRANCH=`git rev-parse --abbrev-ref HEAD` 27 | 28 | echo $SITE_REMOTE_URL 29 | echo $SITE_BRANCH 30 | echo `pwd` 31 | 32 | if [[ ( $SITE_REMOTE_URL = $REMOTE_URL ) && ( $SITE_BRANCH = 'gh-pages' ) ]]; then 33 | echo "Confirmed that docs/_site has same remote as main repo, and is on gh-pages." 34 | else 35 | echo "Checking out $REMOTE:gh-pages into docs/_site (will take a little time)." 36 | git init . 37 | git remote add -t gh-pages -f $REMOTE $REMOTE_URL 38 | git checkout gh-pages 39 | fi 40 | 41 | echo "Building the site into docs/_site, and committing the changes." 42 | jekyll build -s .. -d . 43 | git add --all . 44 | git commit -m "$MSG" 45 | git push $REMOTE gh-pages 46 | 47 | echo "All done!" 48 | cd ../.. 49 | else echo "You must run this deployment script from the 'master' branch." 50 | fi 51 | -------------------------------------------------------------------------------- /caffe/scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.tar.gz 22 | tar xzf $MODEL_DIR/gist.tar.gz --directory=$MODEL_DIR --strip-components=1 23 | rm $MODEL_DIR/gist.tar.gz 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /caffe/scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /caffe/scripts/travis/travis_build_and_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script called by Travis to do a CPU-only build of and test Caffe. 3 | 4 | set -e 5 | MAKE="make --jobs=$NUM_THREADS --keep-going" 6 | 7 | if $WITH_CMAKE; then 8 | mkdir build 9 | cd build 10 | cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON .. 11 | $MAKE 12 | if ! $WITH_CUDA; then 13 | $MAKE runtest 14 | $MAKE lint 15 | fi 16 | $MAKE clean 17 | cd - 18 | else 19 | if ! $WITH_CUDA; then 20 | export CPU_ONLY=1 21 | fi 22 | $MAKE all test pycaffe warn lint || true 23 | if ! $WITH_CUDA; then 24 | $MAKE runtest 25 | fi 26 | $MAKE all 27 | $MAKE test 28 | $MAKE pycaffe 29 | $MAKE warn 30 | if ! $WITH_CUDA; then 31 | $MAKE lint 32 | fi 33 | fi 34 | -------------------------------------------------------------------------------- /caffe/scripts/travis/travis_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script must be run with sudo. 3 | 4 | set -e 5 | 6 | MAKE="make --jobs=$NUM_THREADS" 7 | 8 | # Install apt packages where the Ubuntu 12.04 default and ppa works for Caffe 9 | 10 | # This ppa is for gflags and glog 11 | add-apt-repository -y ppa:tuleu/precise-backports 12 | apt-get -y update 13 | apt-get install \ 14 | wget git curl \ 15 | python-dev python-numpy \ 16 | libleveldb-dev libsnappy-dev libopencv-dev \ 17 | libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev \ 18 | libprotobuf-dev protobuf-compiler \ 19 | libatlas-dev libatlas-base-dev \ 20 | libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \ 21 | bc 22 | 23 | # Add a special apt-repository to install CMake 2.8.9 for CMake Caffe build, 24 | # if needed. By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but 25 | # Caffe requires a minimum CMake version of 2.8.8. 26 | if $WITH_CMAKE; then 27 | add-apt-repository -y ppa:ubuntu-sdk-team/ppa 28 | apt-get -y update 29 | apt-get -y install cmake 30 | fi 31 | 32 | # Install CUDA, if needed 33 | if $WITH_CUDA; then 34 | CUDA_URL=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1204/x86_64/cuda-repo-ubuntu1204_6.5-14_amd64.deb 35 | CUDA_FILE=/tmp/cuda_install.deb 36 | curl $CUDA_URL -o $CUDA_FILE 37 | dpkg -i $CUDA_FILE 38 | rm -f $CUDA_FILE 39 | apt-get -y update 40 | # Install the minimal CUDA subpackages required to test Caffe build. 41 | # For a full CUDA installation, add 'cuda' to the list of packages. 42 | apt-get -y install cuda-core-6-5 cuda-cublas-6-5 cuda-cublas-dev-6-5 cuda-cudart-6-5 cuda-cudart-dev-6-5 cuda-curand-6-5 cuda-curand-dev-6-5 43 | # Create CUDA symlink at /usr/local/cuda 44 | # (This would normally be created by the CUDA installer, but we create it 45 | # manually since we did a partial installation.) 46 | ln -s /usr/local/cuda-6.5 /usr/local/cuda 47 | fi 48 | 49 | # Install LMDB 50 | LMDB_URL=ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.39.tgz 51 | LMDB_FILE=/tmp/openldap.tgz 52 | pushd . 53 | curl $LMDB_URL -o $LMDB_FILE 54 | tar -C /tmp -xzvf $LMDB_FILE 55 | cd /tmp/openldap*/libraries/liblmdb/ 56 | $MAKE 57 | $MAKE install 58 | popd 59 | rm -f $LMDB_FILE 60 | -------------------------------------------------------------------------------- /caffe/scripts/travis/travis_setup_makefile_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mv Makefile.config.example Makefile.config 6 | 7 | if $WITH_CUDA; then 8 | # Remove default gencode set; only generate compute_50. 9 | sed -i 's/-gencode arch=.*\\//' Makefile.config 10 | sed -i 's/CUDA_ARCH :=//' Makefile.config 11 | GENCODE="-gencode arch=compute_50,code=sm_50" 12 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" 13 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config 14 | fi 15 | -------------------------------------------------------------------------------- /caffe/scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -type f -maxdepth 1 ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /caffe/src/caffe/internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/internal_thread.hpp" 2 | 3 | #include "caffe/util/thread.hpp" 4 | 5 | namespace caffe { 6 | 7 | InternalThread::~InternalThread() { 8 | WaitForInternalThreadToExit(); 9 | if (thread_ != NULL) { 10 | delete thread_; 11 | } 12 | } 13 | 14 | bool InternalThread::StartInternalThread() { 15 | if (!WaitForInternalThreadToExit()) { 16 | return false; 17 | } 18 | try { 19 | thread_ = new caffe::Thread 20 | (&InternalThread::InternalThreadEntry, this); 21 | } catch (...) { 22 | return false; 23 | } 24 | return true; 25 | } 26 | 27 | /** Will not return until the internal thread has exited. */ 28 | bool InternalThread::WaitForInternalThreadToExit() { 29 | if (is_started()) { 30 | try { 31 | thread_->join(); 32 | } catch (...) { 33 | return false; 34 | } 35 | } 36 | return true; 37 | } 38 | 39 | } // namespace caffe 40 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/neuron_layers.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 11 | vector*>* top) { 12 | NeuronLayer::LayerSetUp(bottom, top); 13 | CHECK_NE((*top)[0], bottom[0]) << this->type_name() << " Layer does not " 14 | "allow in-place computation."; 15 | } 16 | 17 | template 18 | void AbsValLayer::Forward_cpu( 19 | const vector*>& bottom, vector*>* top) { 20 | const int count = (*top)[0]->count(); 21 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 22 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 23 | } 24 | 25 | template 26 | void AbsValLayer::Backward_cpu(const vector*>& top, 27 | const vector& propagate_down, vector*>* bottom) { 28 | const int count = top[0]->count(); 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | if (propagate_down[0]) { 32 | const Dtype* bottom_data = (*bottom)[0]->cpu_data(); 33 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 34 | caffe_div(count, top_data, bottom_data, bottom_diff); 35 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 36 | } 37 | } 38 | 39 | #ifdef CPU_ONLY 40 | STUB_GPU(AbsValLayer); 41 | #endif 42 | 43 | INSTANTIATE_CLASS(AbsValLayer); 44 | 45 | 46 | } // namespace caffe 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void AbsValLayer::Forward_gpu( 11 | const vector*>& bottom, vector*>* top) { 12 | const int count = (*top)[0]->count(); 13 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 14 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 15 | } 16 | 17 | template 18 | void AbsValLayer::Backward_gpu(const vector*>& top, 19 | const vector& propagate_down, vector*>* bottom) { 20 | const int count = top[0]->count(); 21 | const Dtype* top_data = top[0]->gpu_data(); 22 | const Dtype* top_diff = top[0]->gpu_diff(); 23 | if (propagate_down[0]) { 24 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 25 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 26 | caffe_gpu_div(count, top_data, bottom_data, bottom_diff); 27 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_CLASS(AbsValLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/accuracy_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/io.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | #include "caffe/vision_layers.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void AccuracyLayer::LayerSetUp( 15 | const vector*>& bottom, vector*>* top) { 16 | top_k_ = this->layer_param_.accuracy_param().top_k(); 17 | } 18 | 19 | template 20 | void AccuracyLayer::Reshape( 21 | const vector*>& bottom, vector*>* top) { 22 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 23 | << "The data and label should have the same number."; 24 | CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) 25 | << "top_k must be less than or equal to the number of classes."; 26 | CHECK_EQ(bottom[1]->channels(), 1); 27 | CHECK_EQ(bottom[1]->height(), 1); 28 | CHECK_EQ(bottom[1]->width(), 1); 29 | (*top)[0]->Reshape(1, 1, 1, 1); 30 | } 31 | 32 | template 33 | void AccuracyLayer::Forward_cpu(const vector*>& bottom, 34 | vector*>* top) { 35 | Dtype accuracy = 0; 36 | const Dtype* bottom_data = bottom[0]->cpu_data(); 37 | const Dtype* bottom_label = bottom[1]->cpu_data(); 38 | int num = bottom[0]->num(); 39 | int dim = bottom[0]->count() / bottom[0]->num(); 40 | vector maxval(top_k_+1); 41 | vector max_id(top_k_+1); 42 | for (int i = 0; i < num; ++i) { 43 | // Top-k accuracy 44 | std::vector > bottom_data_vector; 45 | for (int j = 0; j < dim; ++j) { 46 | bottom_data_vector.push_back( 47 | std::make_pair(bottom_data[i * dim + j], j)); 48 | } 49 | std::partial_sort( 50 | bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, 51 | bottom_data_vector.end(), std::greater >()); 52 | // check if true label is in top k predictions 53 | for (int k = 0; k < top_k_; k++) { 54 | if (bottom_data_vector[k].second == static_cast(bottom_label[i])) { 55 | ++accuracy; 56 | break; 57 | } 58 | } 59 | } 60 | 61 | // LOG(INFO) << "Accuracy: " << accuracy; 62 | (*top)[0]->mutable_cpu_data()[0] = accuracy / num; 63 | // Accuracy layer should not be used as a loss function. 64 | } 65 | 66 | INSTANTIATE_CLASS(AccuracyLayer); 67 | 68 | } // namespace caffe 69 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/argmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void ArgMaxLayer::LayerSetUp(const vector*>& bottom, 13 | vector*>* top) { 14 | out_max_val_ = this->layer_param_.argmax_param().out_max_val(); 15 | top_k_ = this->layer_param_.argmax_param().top_k(); 16 | CHECK_GE(top_k_, 1) << " top k must not be less than 1."; 17 | CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) 18 | << "top_k must be less than or equal to the number of classes."; 19 | } 20 | 21 | template 22 | void ArgMaxLayer::Reshape(const vector*>& bottom, 23 | vector*>* top) { 24 | if (out_max_val_) { 25 | // Produces max_ind and max_val 26 | (*top)[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); 27 | } else { 28 | // Produces only max_ind 29 | (*top)[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); 30 | } 31 | } 32 | 33 | template 34 | void ArgMaxLayer::Forward_cpu(const vector*>& bottom, 35 | vector*>* top) { 36 | const Dtype* bottom_data = bottom[0]->cpu_data(); 37 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 38 | int num = bottom[0]->num(); 39 | int dim = bottom[0]->count() / bottom[0]->num(); 40 | for (int i = 0; i < num; ++i) { 41 | std::vector > bottom_data_vector; 42 | for (int j = 0; j < dim; ++j) { 43 | bottom_data_vector.push_back( 44 | std::make_pair(bottom_data[i * dim + j], j)); 45 | } 46 | std::partial_sort( 47 | bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, 48 | bottom_data_vector.end(), std::greater >()); 49 | for (int j = 0; j < top_k_; ++j) { 50 | top_data[(*top)[0]->offset(i, 0, j)] = bottom_data_vector[j].second; 51 | } 52 | if (out_max_val_) { 53 | for (int j = 0; j < top_k_; ++j) { 54 | top_data[(*top)[0]->offset(i, 1, j)] = bottom_data_vector[j].first; 55 | } 56 | } 57 | } 58 | } 59 | 60 | INSTANTIATE_CLASS(ArgMaxLayer); 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/data_layers.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, vector*>* top) { 10 | // First, join the thread 11 | JoinPrefetchThread(); 12 | // Copy the data 13 | caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), 14 | (*top)[0]->mutable_gpu_data()); 15 | if (this->output_labels_) { 16 | caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), 17 | (*top)[1]->mutable_gpu_data()); 18 | } 19 | // Start a new prefetch thread 20 | CreatePrefetchThread(); 21 | } 22 | 23 | template 24 | void BaseShufflingDataLayer::Forward_gpu( 25 | const vector*>& bottom, vector*>* top) { 26 | int batch_size = this->layer_param_.shuffling_data_param().batch_size(); 27 | int offset = 0; 28 | int N = prefetch_data_[current_buffer_].num(); 29 | int dim = prefetch_data_[current_buffer_].count() / N; 30 | while (batch_size - offset) { 31 | if (current_row_ == N) { 32 | JoinPrefetchThread(); 33 | current_row_ = 0; 34 | current_buffer_ = 1 - current_buffer_; 35 | CreatePrefetchThread(); 36 | } 37 | int avail = std::min(batch_size-offset, N - current_row_); 38 | caffe_copy(avail * dim, prefetch_data_[current_buffer_].cpu_data() + 39 | prefetch_data_[current_buffer_].offset(current_row_), 40 | (*top)[0]->mutable_gpu_data() + (*top)[0]->offset(offset)); 41 | if (this->output_labels_) { 42 | dim = prefetch_label_[current_buffer_].count() / N; 43 | caffe_copy(avail * dim, prefetch_label_[current_buffer_].cpu_data() + 44 | prefetch_label_[current_buffer_].offset(current_row_), 45 | (*top)[1]->mutable_gpu_data() + (*top)[1]->offset(offset)); 46 | } 47 | current_row_ += avail; 48 | offset += avail; 49 | } 50 | } 51 | 52 | INSTANTIATE_CLASS(BasePrefetchingDataLayer); 53 | INSTANTIATE_CLASS(BaseShufflingDataLayer); 54 | 55 | } // namespace caffe 56 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | const float kBNLL_THRESHOLD = 50.; 10 | 11 | template 12 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 13 | vector*>* top) { 14 | const Dtype* bottom_data = bottom[0]->cpu_data(); 15 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 16 | const int count = bottom[0]->count(); 17 | for (int i = 0; i < count; ++i) { 18 | top_data[i] = bottom_data[i] > 0 ? 19 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 20 | log(1. + exp(bottom_data[i])); 21 | } 22 | } 23 | 24 | template 25 | void BNLLLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | vector*>* bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* bottom_data = (*bottom)[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 32 | const int count = (*bottom)[0]->count(); 33 | Dtype expval; 34 | for (int i = 0; i < count; ++i) { 35 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 36 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 37 | } 38 | } 39 | } 40 | 41 | #ifdef CPU_ONLY 42 | STUB_GPU(BNLLLayer); 43 | #endif 44 | 45 | INSTANTIATE_CLASS(BNLLLayer); 46 | 47 | 48 | } // namespace caffe 49 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/bnll_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | const float kBNLL_THRESHOLD = 50.; 10 | 11 | template 12 | __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { 13 | CUDA_KERNEL_LOOP(index, n) { 14 | out[index] = in[index] > 0 ? 15 | in[index] + log(1. + exp(-in[index])) : 16 | log(1. + exp(in[index])); 17 | } 18 | } 19 | 20 | template 21 | void BNLLLayer::Forward_gpu(const vector*>& bottom, 22 | vector*>* top) { 23 | const Dtype* bottom_data = bottom[0]->gpu_data(); 24 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 25 | const int count = bottom[0]->count(); 26 | // NOLINT_NEXT_LINE(whitespace/operators) 27 | BNLLForward<<>>( 28 | count, bottom_data, top_data); 29 | CUDA_POST_KERNEL_CHECK; 30 | } 31 | 32 | template 33 | __global__ void BNLLBackward(const int n, const Dtype* in_diff, 34 | const Dtype* in_data, Dtype* out_diff) { 35 | CUDA_KERNEL_LOOP(index, n) { 36 | Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); 37 | out_diff[index] = in_diff[index] * expval / (expval + 1.); 38 | } 39 | } 40 | 41 | template 42 | void BNLLLayer::Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, 44 | vector*>* bottom) { 45 | if (propagate_down[0]) { 46 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 47 | const Dtype* top_diff = top[0]->gpu_diff(); 48 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 49 | const int count = (*bottom)[0]->count(); 50 | // NOLINT_NEXT_LINE(whitespace/operators) 51 | BNLLBackward<<>>( 52 | count, top_diff, bottom_data, bottom_diff); 53 | CUDA_POST_KERNEL_CHECK; 54 | } 55 | } 56 | 57 | INSTANTIATE_CLASS(BNLLLayer); 58 | 59 | 60 | } // namespace caffe 61 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cross_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CrossLossLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | Forward_cpu(bottom, top); 14 | } 15 | 16 | template 17 | void CrossLossLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, vector*>* bottom) { 19 | Backward_cpu(top, propagate_down, bottom); 20 | } 21 | 22 | INSTANTIATE_CLASS(CrossLossLayer); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/filler.hpp" 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/im2col.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, 14 | vector*>* top) { 15 | PoolingLayer::LayerSetUp(bottom, top); 16 | 17 | CUDNN_CHECK(cudnnCreate(&handle_)); 18 | cudnn::createTensor4dDesc(&bottom_desc_); 19 | cudnn::createTensor4dDesc(&top_desc_); 20 | cudnn::createPoolingDesc(&pooling_desc_, 21 | this->layer_param_.pooling_param().pool(), &mode_, 22 | this->kernel_h_, this->kernel_w_, this->stride_h_, this->stride_w_); 23 | } 24 | 25 | template 26 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom, 27 | vector*>* top) { 28 | PoolingLayer::Reshape(bottom, top); 29 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 30 | this->channels_, this->height_, this->width_); 31 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 32 | this->channels_, this->pooled_height_, this->pooled_width_); 33 | } 34 | 35 | template 36 | CuDNNPoolingLayer::~CuDNNPoolingLayer() { 37 | cudnnDestroyTensor4dDescriptor(bottom_desc_); 38 | cudnnDestroyTensor4dDescriptor(top_desc_); 39 | cudnnDestroyPoolingDescriptor(pooling_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/filler.hpp" 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/im2col.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 14 | vector*>* top) { 15 | // Fallback to Caffe for padded pooling, max top mask. 16 | if ((this->pad_h_ > 0 || this->pad_w_ > 0) || (*top).size() > 1) { 17 | LOG(WARNING) << "Falling back to standard Caffe for padded pooling."; 18 | return PoolingLayer::Forward_gpu(bottom, top); 19 | } 20 | 21 | const Dtype* bottom_data = bottom[0]->gpu_data(); 22 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 23 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 24 | bottom_desc_, bottom_data, top_desc_, top_data)); 25 | } 26 | 27 | template 28 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 29 | const vector& propagate_down, vector*>* bottom) { 30 | if (!propagate_down[0]) { 31 | return; 32 | } 33 | 34 | // Fallback to Caffe for padded pooling, max top mask. 35 | if ((this->pad_h_ > 0 || this->pad_w_ > 0) || top.size() > 1) { 36 | LOG(WARNING) << "Falling back to standard Caffe for padded pooling."; 37 | return PoolingLayer::Backward_gpu(top, propagate_down, bottom); 38 | } 39 | 40 | const Dtype* top_diff = top[0]->gpu_diff(); 41 | const Dtype* top_data = top[0]->gpu_data(); 42 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 43 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 44 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 45 | top_desc_, top_data, top_desc_, top_diff, 46 | bottom_desc_, bottom_data, bottom_desc_, bottom_diff)); 47 | } 48 | 49 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 50 | 51 | } // namespace caffe 52 | #endif 53 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 12 | vector*>* top) { 13 | ReLULayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | } 19 | 20 | template 21 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 22 | vector*>* top) { 23 | ReLULayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNReLULayer::~CuDNNReLULayer() { 34 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 35 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 36 | cudnnDestroy(this->handle_); 37 | } 38 | 39 | INSTANTIATE_CLASS(CuDNNReLULayer); 40 | 41 | } // namespace caffe 42 | #endif 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_relu_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | // Fallback to standard Caffe for leaky ReLU. 14 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0 15 | || !ReLULayer::layer_param_.relu_param().positive_half()) { 16 | return ReLULayer::Forward_gpu(bottom, top); 17 | } 18 | 19 | const Dtype* bottom_data = bottom[0]->gpu_data(); 20 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 21 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 22 | CUDNN_ACTIVATION_RELU, 23 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 24 | } 25 | 26 | template 27 | void CuDNNReLULayer::Backward_gpu(const vector*>& top, 28 | const vector& propagate_down, 29 | vector*>* bottom) { 30 | if (!propagate_down[0]) { 31 | return; 32 | } 33 | 34 | // Fallback to standard Caffe for leaky ReLU. 35 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0 36 | || !ReLULayer::layer_param_.relu_param().positive_half()) { 37 | return ReLULayer::Backward_gpu(top, propagate_down, bottom); 38 | } 39 | 40 | const Dtype* top_data = top[0]->gpu_data(); 41 | const Dtype* top_diff = top[0]->gpu_diff(); 42 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 43 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 44 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 45 | CUDNN_ACTIVATION_RELU, 46 | this->top_desc_, top_data, this->top_desc_, top_diff, 47 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 48 | } 49 | 50 | INSTANTIATE_CLASS(CuDNNReLULayer); 51 | 52 | } // namespace caffe 53 | #endif 54 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 12 | vector*>* top) { 13 | SigmoidLayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | } 19 | 20 | template 21 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 22 | vector*>* top) { 23 | SigmoidLayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 34 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 35 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 36 | cudnnDestroy(this->handle_); 37 | } 38 | 39 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 40 | 41 | } // namespace caffe 42 | #endif 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_sigmoid_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 16 | CUDNN_ACTIVATION_SIGMOID, 17 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, 23 | vector*>* bottom) { 24 | if (!propagate_down[0]) { 25 | return; 26 | } 27 | 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 31 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 33 | CUDNN_ACTIVATION_SIGMOID, 34 | this->top_desc_, top_data, this->top_desc_, top_diff, 35 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | #include 5 | 6 | #include "thrust/device_vector.h" 7 | 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/math_functions.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 16 | vector*>* top) { 17 | SoftmaxLayer::LayerSetUp(bottom, top); 18 | // Initialize CUDNN. 19 | CUDNN_CHECK(cudnnCreate(&handle_)); 20 | cudnn::createTensor4dDesc(&bottom_desc_); 21 | cudnn::createTensor4dDesc(&top_desc_); 22 | } 23 | 24 | template 25 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 26 | vector*>* top) { 27 | SoftmaxLayer::Reshape(bottom, top); 28 | int N = bottom[0]->num(); 29 | int K = bottom[0]->channels(); 30 | int H = bottom[0]->height(); 31 | int W = bottom[0]->width(); 32 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 33 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 34 | } 35 | 36 | template 37 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 38 | cudnnDestroyTensor4dDescriptor(bottom_desc_); 39 | cudnnDestroyTensor4dDescriptor(top_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_softmax_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | #include 5 | 6 | #include "thrust/device_vector.h" 7 | 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/math_functions.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, 16 | vector*>* top) { 17 | const Dtype* bottom_data = bottom[0]->gpu_data(); 18 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 19 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, 20 | CUDNN_SOFTMAX_MODE_CHANNEL, 21 | bottom_desc_, bottom_data, top_desc_, top_data)); 22 | } 23 | 24 | template 25 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, vector*>* bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 31 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, 33 | CUDNN_SOFTMAX_MODE_CHANNEL, 34 | top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff)); 35 | } 36 | } 37 | 38 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 12 | vector*>* top) { 13 | TanHLayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 22 | vector*>* top) { 23 | TanHLayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNTanHLayer::~CuDNNTanHLayer() { 34 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 35 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 36 | cudnnDestroy(this->handle_); 37 | } 38 | 39 | INSTANTIATE_CLASS(CuDNNTanHLayer); 40 | 41 | } // namespace caffe 42 | #endif 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/cudnn_tanh_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 16 | CUDNN_ACTIVATION_TANH, 17 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, 23 | vector*>* bottom) { 24 | if (!propagate_down[0]) { 25 | return; 26 | } 27 | 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 31 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 33 | CUDNN_ACTIVATION_TANH, 34 | this->top_desc_, top_data, this->top_desc_, top_diff, 35 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_CLASS(CuDNNTanHLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/euclidean_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void EuclideanLossLayer::Reshape( 12 | const vector*>& bottom, vector*>* top) { 13 | LossLayer::Reshape(bottom, top); 14 | CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); 15 | CHECK_EQ(bottom[0]->height(), bottom[1]->height()); 16 | CHECK_EQ(bottom[0]->width(), bottom[1]->width()); 17 | diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 18 | bottom[0]->height(), bottom[0]->width()); 19 | } 20 | 21 | template 22 | void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, 23 | vector*>* top) { 24 | int count = bottom[0]->count(); 25 | caffe_sub( 26 | count, 27 | bottom[0]->cpu_data(), 28 | bottom[1]->cpu_data(), 29 | diff_.mutable_cpu_data()); 30 | Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); 31 | Dtype loss = dot / count / Dtype(2); 32 | (*top)[0]->mutable_cpu_data()[0] = loss; 33 | } 34 | 35 | template 36 | void EuclideanLossLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, vector*>* bottom) { 38 | for (int i = 0; i < 2; ++i) { 39 | if (propagate_down[i]) { 40 | const Dtype sign = (i == 0) ? 1 : -1; 41 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->count(); 42 | caffe_cpu_axpby( 43 | (*bottom)[i]->count(), // count 44 | alpha, // alpha 45 | diff_.cpu_data(), // a 46 | Dtype(0), // beta 47 | (*bottom)[i]->mutable_cpu_diff()); // b 48 | } 49 | } 50 | } 51 | 52 | #ifdef CPU_ONLY 53 | STUB_GPU(EuclideanLossLayer); 54 | #endif 55 | 56 | INSTANTIATE_CLASS(EuclideanLossLayer); 57 | 58 | } // namespace caffe 59 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/euclidean_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | int count = bottom[0]->count(); 14 | caffe_gpu_sub( 15 | count, 16 | bottom[0]->gpu_data(), 17 | bottom[1]->gpu_data(), 18 | diff_.mutable_gpu_data()); 19 | Dtype dot; 20 | caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); 21 | Dtype loss = dot / count / Dtype(2); 22 | (*top)[0]->mutable_cpu_data()[0] = loss; 23 | } 24 | 25 | template 26 | void EuclideanLossLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, vector*>* bottom) { 28 | for (int i = 0; i < 2; ++i) { 29 | if (propagate_down[i]) { 30 | const Dtype sign = (i == 0) ? 1 : -1; 31 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->count(); 32 | caffe_gpu_axpby( 33 | (*bottom)[i]->count(), // count 34 | alpha, // alpha 35 | diff_.gpu_data(), // a 36 | Dtype(0), // beta 37 | (*bottom)[i]->mutable_gpu_diff()); // b 38 | } 39 | } 40 | } 41 | 42 | INSTANTIATE_CLASS(EuclideanLossLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void FlattenLayer::Reshape(const vector*>& bottom, 11 | vector*>* top) { 12 | int channels_out = bottom[0]->channels() * bottom[0]->height() 13 | * bottom[0]->width(); 14 | (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); 15 | count_ = bottom[0]->num() * channels_out; 16 | CHECK_EQ(count_, bottom[0]->count()); 17 | CHECK_EQ(count_, (*top)[0]->count()); 18 | } 19 | 20 | template 21 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 22 | vector*>* top) { 23 | (*top)[0]->ShareData(*bottom[0]); 24 | } 25 | 26 | template 27 | void FlattenLayer::Backward_cpu(const vector*>& top, 28 | const vector& propagate_down, vector*>* bottom) { 29 | (*bottom)[0]->ShareDiff(*top[0]); 30 | } 31 | 32 | #ifdef CPU_ONLY 33 | STUB_GPU(FlattenLayer); 34 | #endif 35 | 36 | INSTANTIATE_CLASS(FlattenLayer); 37 | 38 | } // namespace caffe 39 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/flatten_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void FlattenLayer::Forward_gpu(const vector*>& bottom, 11 | vector*>* top) { 12 | (*top)[0]->ShareData(*bottom[0]); 13 | } 14 | 15 | template 16 | void FlattenLayer::Backward_gpu(const vector*>& top, 17 | const vector& propagate_down, vector*>* bottom) { 18 | (*bottom)[0]->ShareDiff(*top[0]); 19 | } 20 | 21 | INSTANTIATE_CLASS(FlattenLayer); 22 | 23 | } // namespace caffe 24 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/hdf5_data_layer.cu: -------------------------------------------------------------------------------- 1 | /* 2 | TODO: 3 | - only load parts of the file, in accordance with a prototxt param "max_mem" 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include "hdf5.h" 11 | #include "hdf5_hl.h" 12 | 13 | #include "caffe/layer.hpp" 14 | #include "caffe/util/io.hpp" 15 | #include "caffe/vision_layers.hpp" 16 | 17 | namespace caffe { 18 | 19 | template 20 | void HDF5DataLayer::Forward_gpu(const vector*>& bottom, 21 | vector*>* top) { 22 | const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); 23 | const int data_count = (*top)[0]->count() / (*top)[0]->num(); 24 | const int label_data_count = (*top)[1]->count() / (*top)[1]->num(); 25 | 26 | for (int i = 0; i < batch_size; ++i, ++current_row_) { 27 | if (current_row_ == data_blob_.num()) { 28 | if (num_files_ > 1) { 29 | current_file_ += 1; 30 | 31 | if (current_file_ == num_files_) { 32 | current_file_ = 0; 33 | LOG(INFO) << "looping around to first file"; 34 | } 35 | 36 | LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); 37 | } 38 | current_row_ = 0; 39 | } 40 | caffe_copy(data_count, 41 | &data_blob_.cpu_data()[current_row_ * data_count], 42 | &(*top)[0]->mutable_gpu_data()[i * data_count]); 43 | caffe_copy(label_data_count, 44 | &label_blob_.cpu_data()[current_row_ * label_data_count], 45 | &(*top)[1]->mutable_gpu_data()[i * label_data_count]); 46 | } 47 | } 48 | 49 | INSTANTIATE_CLASS(HDF5DataLayer); 50 | 51 | } // namespace caffe 52 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/io.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 16 | vector*>* top) { 17 | CHECK_GE(bottom.size(), 2); 18 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 19 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 20 | bottom[0]->height(), bottom[0]->width()); 21 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 22 | bottom[1]->height(), bottom[1]->width()); 23 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 24 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 25 | 26 | for (int i = 0; i < bottom[0]->num(); ++i) { 27 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 28 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 29 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 30 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 31 | } 32 | SaveBlobs(); 33 | } 34 | 35 | template 36 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, vector*>* bottom) { 38 | return; 39 | } 40 | 41 | INSTANTIATE_CLASS(HDF5OutputLayer); 42 | 43 | } // namespace caffe 44 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/im2col_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/im2col.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void Im2colLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 15 | for (int n = 0; n < bottom[0]->num(); ++n) { 16 | im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, 17 | width_, kernel_h_, kernel_w_, pad_h_, pad_w_, 18 | stride_h_, stride_w_, top_data + (*top)[0]->offset(n)); 19 | } 20 | } 21 | 22 | template 23 | void Im2colLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, vector*>* bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 27 | for (int n = 0; n < top[0]->num(); ++n) { 28 | col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, 29 | kernel_h_, kernel_w_, pad_h_, pad_w_, 30 | stride_h_, stride_w_, bottom_diff + (*bottom)[0]->offset(n)); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_CLASS(Im2colLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/inner_product_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/blob.hpp" 4 | #include "caffe/common.hpp" 5 | #include "caffe/filler.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void InnerProductLayer::Forward_gpu(const vector*>& bottom, 14 | vector*>* top) { 15 | const Dtype* bottom_data = bottom[0]->gpu_data(); 16 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 17 | const Dtype* weight = this->blobs_[0]->gpu_data(); 18 | caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., 19 | bottom_data, weight, (Dtype)0., top_data); 20 | if (bias_term_) { 21 | caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., 22 | bias_multiplier_.gpu_data(), 23 | this->blobs_[1]->gpu_data(), (Dtype)1., top_data); 24 | } 25 | } 26 | 27 | template 28 | void InnerProductLayer::Backward_gpu(const vector*>& top, 29 | const vector& propagate_down, 30 | vector*>* bottom) { 31 | if (this->param_propagate_down_[0]) { 32 | const Dtype* top_diff = top[0]->gpu_diff(); 33 | const Dtype* bottom_data = (*bottom)[0]->gpu_data(); 34 | // Gradient with respect to weight 35 | caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., 36 | top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); 37 | } 38 | if (bias_term_ && this->param_propagate_down_[1]) { 39 | const Dtype* top_diff = top[0]->gpu_diff(); 40 | // Gradient with respect to bias 41 | caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, 42 | bias_multiplier_.gpu_data(), (Dtype)0., 43 | this->blobs_[1]->mutable_gpu_diff()); 44 | } 45 | if (propagate_down[0]) { 46 | const Dtype* top_diff = top[0]->gpu_diff(); 47 | // Gradient with respect to bottom data 48 | caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., 49 | top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., 50 | (*bottom)[0]->mutable_gpu_diff()); 51 | } 52 | } 53 | 54 | INSTANTIATE_CLASS(InnerProductLayer); 55 | 56 | } // namespace caffe 57 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/l1_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void L1LossLayer::Reshape( 12 | const vector*>& bottom, vector*>* top) { 13 | LossLayer::Reshape(bottom, top); 14 | CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); 15 | CHECK_EQ(bottom[0]->height(), bottom[1]->height()); 16 | CHECK_EQ(bottom[0]->width(), bottom[1]->width()); 17 | diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 18 | bottom[0]->height(), bottom[0]->width()); 19 | } 20 | 21 | template 22 | void L1LossLayer::Forward_cpu(const vector*>& bottom, 23 | vector*>* top) { 24 | int count = bottom[0]->count(); 25 | caffe_sub( 26 | count, 27 | bottom[0]->cpu_data(), 28 | bottom[1]->cpu_data(), 29 | diff_.mutable_cpu_data()); 30 | Dtype asum = caffe_cpu_asum(count, diff_.cpu_data()); 31 | Dtype loss = asum / count ; 32 | (*top)[0]->mutable_cpu_data()[0] = loss; 33 | } 34 | 35 | template 36 | void L1LossLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, vector*>* bottom) { 38 | caffe_cpu_sign(diff_.count(), diff_.cpu_data(), diff_.mutable_cpu_data()); 39 | for (int i = 0; i < 2; ++i) { 40 | if (propagate_down[i]) { 41 | const Dtype sign = (i == 0) ? 1 : -1; 42 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->count(); 43 | caffe_cpu_axpby( 44 | (*bottom)[i]->count(), // count 45 | alpha, // alpha 46 | diff_.cpu_data(), // a 47 | Dtype(0), // beta 48 | (*bottom)[i]->mutable_cpu_diff()); // b 49 | } 50 | } 51 | } 52 | 53 | #ifdef CPU_ONLY 54 | STUB_GPU(L1LossLayer); 55 | #endif 56 | 57 | INSTANTIATE_CLASS(L1LossLayer); 58 | 59 | } // namespace caffe 60 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/l1_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void L1LossLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | int count = bottom[0]->count(); 14 | caffe_gpu_sub( 15 | count, 16 | bottom[0]->gpu_data(), 17 | bottom[1]->gpu_data(), 18 | diff_.mutable_gpu_data()); 19 | Dtype asum; 20 | caffe_gpu_asum(count, diff_.gpu_data(), &asum); 21 | Dtype loss = asum / count; 22 | (*top)[0]->mutable_cpu_data()[0] = loss; 23 | } 24 | 25 | template 26 | void L1LossLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, vector*>* bottom) { 28 | caffe_gpu_sign(diff_.count(), diff_.gpu_data(), diff_.mutable_gpu_data()); 29 | for (int i = 0; i < 2; ++i) { 30 | if (propagate_down[i]) { 31 | const Dtype sign = (i == 0) ? 1 : -1; 32 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->count(); 33 | caffe_gpu_axpby( 34 | (*bottom)[i]->count(), // count 35 | alpha, // alpha 36 | diff_.gpu_data(), // a 37 | Dtype(0), // beta 38 | (*bottom)[i]->mutable_gpu_diff()); // b 39 | } 40 | } 41 | } 42 | 43 | INSTANTIATE_CLASS(L1LossLayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/io.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | #include "caffe/vision_layers.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void LossLayer::LayerSetUp( 15 | const vector*>& bottom, vector*>* top) { 16 | // LossLayers have a non-zero (1) loss by default. 17 | if (this->layer_param_.loss_weight_size() == 0) { 18 | this->layer_param_.add_loss_weight(Dtype(1)); 19 | } 20 | } 21 | 22 | template 23 | void LossLayer::Reshape( 24 | const vector*>& bottom, vector*>* top) { 25 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 26 | << "The data and label should have the same number."; 27 | (*top)[0]->Reshape(1, 1, 1, 1); 28 | } 29 | 30 | INSTANTIATE_CLASS(LossLayer); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/multi_softmax_loss.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void MultiSoftmaxLossLayer::LayerSetUp( 13 | const vector*>& bottom, vector*>* top) { 14 | LossLayer::LayerSetUp(bottom, top); 15 | num_class_ = this->layer_param_.multi_softmax_loss_param().class_per_group(); 16 | } 17 | 18 | template 19 | void MultiSoftmaxLossLayer::Reshape( 20 | const vector*>& bottom, vector*>* top) { 21 | LossLayer::Reshape(bottom, top); 22 | CHECK_EQ(bottom[0]->count()/bottom[0]->num()%num_class_, 0) 23 | << "Input dimension must be multiple of num_class_!"; 24 | CHECK_EQ(bottom[0]->count()/num_class_, bottom[1]->count()) 25 | << "Inconsistent input and label dimensions!"; 26 | acc_.Reshape(bottom[0]->num(), bottom[0]->count()/bottom[0]->num()/num_class_, 1, 1); 27 | loss_.Reshape(bottom[0]->num(), bottom[0]->count()/bottom[0]->num()/num_class_, 1, 1); 28 | 29 | if (this->layer_param_.loss_weight_size() == 1) 30 | this->layer_param_.add_loss_weight(Dtype(0)); 31 | (*top)[1]->Reshape(1, 1, 1, 1); 32 | if (this->layer_param_.loss_weight_size() == 2) 33 | this->layer_param_.add_loss_weight(Dtype(0)); 34 | (*top)[2]->Reshape(bottom[0]->num(), num_class_, 1, bottom[0]->count()/bottom[0]->num()/num_class_); 35 | } 36 | 37 | template 38 | void MultiSoftmaxLossLayer::Forward_cpu( 39 | const vector*>& bottom, vector*>* top) { 40 | // The forward pass computes the softmax prob values. 41 | LOG(ERROR) << "Forward_cpu not implemented for MultiSoftmaxLossLayer"; 42 | } 43 | 44 | template 45 | void MultiSoftmaxLossLayer::Backward_cpu(const vector*>& top, 46 | const vector& propagate_down, 47 | vector*>* bottom) { 48 | LOG(ERROR) << "Backward_cpu not implemented for MultiSoftmaxLossLayer"; 49 | } 50 | 51 | 52 | #ifdef CPU_ONLY 53 | STUB_GPU(MultiSoftmaxLossLayer); 54 | #endif 55 | 56 | INSTANTIATE_CLASS(MultiSoftmaxLossLayer); 57 | 58 | 59 | } // namespace caffe 60 | 61 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/multinomial_logistic_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/io.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | #include "caffe/vision_layers.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void MultinomialLogisticLossLayer::Reshape( 15 | const vector*>& bottom, vector*>* top) { 16 | LossLayer::Reshape(bottom, top); 17 | CHECK_EQ(bottom[1]->channels(), 1); 18 | CHECK_EQ(bottom[1]->height(), 1); 19 | CHECK_EQ(bottom[1]->width(), 1); 20 | } 21 | 22 | template 23 | void MultinomialLogisticLossLayer::Forward_cpu( 24 | const vector*>& bottom, vector*>* top) { 25 | const Dtype* bottom_data = bottom[0]->cpu_data(); 26 | const Dtype* bottom_label = bottom[1]->cpu_data(); 27 | int num = bottom[0]->num(); 28 | int dim = bottom[0]->count() / bottom[0]->num(); 29 | Dtype loss = 0; 30 | for (int i = 0; i < num; ++i) { 31 | int label = static_cast(bottom_label[i]); 32 | Dtype prob = std::max( 33 | bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); 34 | loss -= log(prob); 35 | } 36 | (*top)[0]->mutable_cpu_data()[0] = loss / num; 37 | } 38 | 39 | template 40 | void MultinomialLogisticLossLayer::Backward_cpu( 41 | const vector*>& top, const vector& propagate_down, 42 | vector*>* bottom) { 43 | if (propagate_down[1]) { 44 | LOG(FATAL) << this->type_name() 45 | << " Layer cannot backpropagate to label inputs."; 46 | } 47 | if (propagate_down[0]) { 48 | const Dtype* bottom_data = (*bottom)[0]->cpu_data(); 49 | const Dtype* bottom_label = (*bottom)[1]->cpu_data(); 50 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 51 | int num = (*bottom)[0]->num(); 52 | int dim = (*bottom)[0]->count() / (*bottom)[0]->num(); 53 | caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff); 54 | const Dtype scale = - top[0]->cpu_diff()[0] / num; 55 | for (int i = 0; i < num; ++i) { 56 | int label = static_cast(bottom_label[i]); 57 | Dtype prob = std::max( 58 | bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); 59 | bottom_diff[i * dim + label] = scale / prob; 60 | } 61 | } 62 | } 63 | 64 | INSTANTIATE_CLASS(MultinomialLogisticLossLayer); 65 | 66 | } // namespace caffe 67 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/vision_layers.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void NeuronLayer::Reshape(const vector*>& bottom, 10 | vector*>* top) { 11 | (*top)[0]->ReshapeLike(*bottom[0]); 12 | } 13 | 14 | INSTANTIATE_CLASS(NeuronLayer); 15 | 16 | } // namespace caffe 17 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void ReLULayer::Forward_cpu(const vector*>& bottom, 11 | vector*>* top) { 12 | const Dtype* bottom_data = bottom[0]->cpu_data(); 13 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 14 | const int count = bottom[0]->count(); 15 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 16 | bool pos = this->layer_param_.relu_param().positive_half(); 17 | if (pos) { 18 | for (int i = 0; i < count; ++i) { 19 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 20 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 21 | } 22 | } else { 23 | for (int i = 0; i < count; ++i) { 24 | top_data[i] = std::min(bottom_data[i], Dtype(1)) 25 | + negative_slope * std::max(bottom_data[i]-Dtype(1), Dtype(0)); 26 | } 27 | } 28 | 29 | } 30 | 31 | template 32 | void ReLULayer::Backward_cpu(const vector*>& top, 33 | const vector& propagate_down, 34 | vector*>* bottom) { 35 | if (propagate_down[0]) { 36 | const Dtype* bottom_data = (*bottom)[0]->cpu_data(); 37 | const Dtype* top_diff = top[0]->cpu_diff(); 38 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 39 | const int count = (*bottom)[0]->count(); 40 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 41 | bool pos = this->layer_param_.relu_param().positive_half(); 42 | if (pos) { 43 | for (int i = 0; i < count; ++i) { 44 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 45 | + negative_slope * (bottom_data[i] <= 0)); 46 | } 47 | } else { 48 | for (int i = 0; i < count; ++i) { 49 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] < 1) 50 | + negative_slope * (bottom_data[i] >= 1)); 51 | } 52 | } 53 | 54 | } 55 | } 56 | 57 | 58 | #ifdef CPU_ONLY 59 | STUB_GPU(ReLULayer); 60 | #endif 61 | 62 | INSTANTIATE_CLASS(ReLULayer); 63 | 64 | 65 | } // namespace caffe 66 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/resize_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void ResizeLayer::Forward_gpu(const vector*>& bottom, 13 | vector*>* top) { 14 | Forward_cpu(bottom, top); 15 | } 16 | 17 | 18 | template 19 | void ResizeLayer::Backward_gpu(const vector*>& top, 20 | const vector& propagate_down, vector*>* bottom) { 21 | Backward_cpu(top, propagate_down, bottom); 22 | } 23 | 24 | 25 | INSTANTIATE_CLASS(ResizeLayer); 26 | 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void SigmoidCrossEntropyLossLayer::Forward_gpu( 13 | const vector*>& bottom, vector*>* top) { 14 | // The forward pass computes the sigmoid outputs. 15 | sigmoid_bottom_vec_[0] = bottom[0]; 16 | sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_); 17 | // Compute the loss (negative log likelihood) 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | // Stable version of loss computation from input data 21 | const Dtype* input_data = bottom[0]->cpu_data(); 22 | const Dtype* target = bottom[1]->cpu_data(); 23 | Dtype loss = 0; 24 | for (int i = 0; i < count; ++i) { 25 | loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - 26 | log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); 27 | } 28 | (*top)[0]->mutable_cpu_data()[0] = loss / num; 29 | } 30 | 31 | template 32 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 33 | const vector*>& top, const vector& propagate_down, 34 | vector*>* bottom) { 35 | if (propagate_down[1]) { 36 | LOG(FATAL) << this->type_name() 37 | << " Layer cannot backpropagate to label inputs."; 38 | } 39 | if (propagate_down[0]) { 40 | // First, compute the diff 41 | const int count = (*bottom)[0]->count(); 42 | const int num = (*bottom)[0]->num(); 43 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 44 | const Dtype* target = (*bottom)[1]->gpu_data(); 45 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 46 | caffe_copy(count, sigmoid_output_data, bottom_diff); 47 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 48 | // Scale down gradient 49 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 50 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 51 | } 52 | } 53 | 54 | INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); 55 | 56 | 57 | } // namespace caffe 58 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | inline Dtype sigmoid(Dtype x) { 12 | return 1. / (1. + exp(-x)); 13 | } 14 | 15 | template 16 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 17 | vector*>* top) { 18 | const Dtype* bottom_data = bottom[0]->cpu_data(); 19 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 20 | const int count = bottom[0]->count(); 21 | for (int i = 0; i < count; ++i) { 22 | top_data[i] = sigmoid(bottom_data[i]); 23 | } 24 | } 25 | 26 | template 27 | void SigmoidLayer::Backward_cpu(const vector*>& top, 28 | const vector& propagate_down, 29 | vector*>* bottom) { 30 | if (propagate_down[0]) { 31 | const Dtype* top_data = top[0]->cpu_data(); 32 | const Dtype* top_diff = top[0]->cpu_diff(); 33 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 34 | const int count = (*bottom)[0]->count(); 35 | for (int i = 0; i < count; ++i) { 36 | const Dtype sigmoid_x = top_data[i]; 37 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 38 | } 39 | } 40 | } 41 | 42 | #ifdef CPU_ONLY 43 | STUB_GPU(SigmoidLayer); 44 | #endif 45 | 46 | INSTANTIATE_CLASS(SigmoidLayer); 47 | 48 | 49 | } // namespace caffe 50 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/sigmoid_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = 1. / (1. + exp(-in[index])); 14 | } 15 | } 16 | 17 | template 18 | void SigmoidLayer::Forward_gpu(const vector*>& bottom, 19 | vector*>* top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | // NOLINT_NEXT_LINE(whitespace/operators) 24 | SigmoidForward<<>>( 25 | count, bottom_data, top_data); 26 | CUDA_POST_KERNEL_CHECK; 27 | // << " count: " << count << " bottom_data: " 28 | // << (unsigned long)bottom_data 29 | // << " top_data: " << (unsigned long)top_data 30 | // << " blocks: " << CAFFE_GET_BLOCKS(count) 31 | // << " threads: " << CAFFE_CUDA_NUM_THREADS; 32 | } 33 | 34 | template 35 | __global__ void SigmoidBackward(const int n, const Dtype* in_diff, 36 | const Dtype* out_data, Dtype* out_diff) { 37 | CUDA_KERNEL_LOOP(index, n) { 38 | const Dtype sigmoid_x = out_data[index]; 39 | out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); 40 | } 41 | } 42 | 43 | template 44 | void SigmoidLayer::Backward_gpu(const vector*>& top, 45 | const vector& propagate_down, 46 | vector*>* bottom) { 47 | if (propagate_down[0]) { 48 | const Dtype* top_data = top[0]->gpu_data(); 49 | const Dtype* top_diff = top[0]->gpu_diff(); 50 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 51 | const int count = (*bottom)[0]->count(); 52 | // NOLINT_NEXT_LINE(whitespace/operators) 53 | SigmoidBackward<<>>( 54 | count, top_diff, top_data, bottom_diff); 55 | CUDA_POST_KERNEL_CHECK; 56 | } 57 | } 58 | 59 | INSTANTIATE_CLASS(SigmoidLayer); 60 | 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common_layers.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SilenceLayer::Backward_cpu(const vector*>& top, 11 | const vector& propagate_down, vector*>* bottom) { 12 | for (int i = 0; i < bottom->size(); ++i) { 13 | if (propagate_down[i]) { 14 | caffe_set((*bottom)[i]->count(), Dtype(0), 15 | (*bottom)[i]->mutable_cpu_data()); 16 | } 17 | } 18 | } 19 | 20 | #ifdef CPU_ONLY 21 | STUB_GPU(SilenceLayer); 22 | #endif 23 | 24 | INSTANTIATE_CLASS(SilenceLayer); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common_layers.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 11 | vector*>* top) { 12 | // Do nothing. 13 | } 14 | 15 | template 16 | void SilenceLayer::Backward_gpu(const vector*>& top, 17 | const vector& propagate_down, vector*>* bottom) { 18 | for (int i = 0; i < bottom->size(); ++i) { 19 | if (propagate_down[i]) { 20 | caffe_gpu_set((*bottom)[i]->count(), Dtype(0), 21 | (*bottom)[i]->mutable_gpu_data()); 22 | } 23 | } 24 | } 25 | 26 | INSTANTIATE_CLASS(SilenceLayer); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/softmax_cross_entropy_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | __global__ void kernel_entropy_loss(int num, const Dtype *data, const Dtype *label, Dtype *out) { 13 | CUDA_KERNEL_LOOP(index, num) { 14 | out[index] = label[index] * log(data[index]); 15 | } 16 | } 17 | 18 | template 19 | void SoftmaxCrossEntropyLossLayer::Forward_gpu( 20 | const vector*>& bottom, vector*>* top) { 21 | softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_); 22 | kernel_entropy_loss<<>>(prob_.count(), prob_.gpu_data(), bottom[1]->gpu_data(), entropy_.mutable_gpu_data()); 24 | Dtype loss; 25 | caffe_gpu_asum(entropy_.count(), entropy_.gpu_data(), &loss); 26 | (*top)[0]->mutable_cpu_data()[0] = loss / entropy_.num(); 27 | if (top->size() == 2) { 28 | (*top)[1]->ShareData(prob_); 29 | } 30 | } 31 | 32 | template 33 | void SoftmaxCrossEntropyLossLayer::Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, vector*>* bottom) { 35 | if (propagate_down[1]) { 36 | LOG(FATAL) << "SoftmaxCrossEntropyLossLayer cannot propagate to label."; 37 | } 38 | 39 | if (propagate_down[0]) { 40 | caffe_gpu_sub((*bottom)[0]->count(), prob_.gpu_data(), (*bottom)[1]->gpu_data(), (*bottom)[0]->mutable_gpu_diff()); 41 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 42 | caffe_gpu_scal((*bottom)[0]->count(), loss_weight / (*bottom)[0]->num(), (*bottom)[0]->mutable_gpu_diff()); 43 | } 44 | } 45 | 46 | INSTANTIATE_CLASS(SoftmaxCrossEntropyLossLayer); 47 | 48 | 49 | } // namespace caffe 50 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/softmax_cross_entropy_loss.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void SoftmaxCrossEntropyLossLayer::LayerSetUp( 13 | const vector*>& bottom, vector*>* top) { 14 | LossLayer::LayerSetUp(bottom, top); 15 | softmax_bottom_vec_.clear(); 16 | softmax_bottom_vec_.push_back(bottom[0]); 17 | softmax_top_vec_.clear(); 18 | softmax_top_vec_.push_back(&prob_); 19 | softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_); 20 | } 21 | 22 | template 23 | void SoftmaxCrossEntropyLossLayer::Reshape( 24 | const vector*>& bottom, vector*>* top) { 25 | LossLayer::Reshape(bottom, top); 26 | softmax_layer_->Reshape(softmax_bottom_vec_, &softmax_top_vec_); 27 | entropy_.ReshapeLike(*bottom[0]); 28 | if (top->size() >= 2) { 29 | // softmax output 30 | (*top)[1]->ReshapeLike(*bottom[0]); 31 | } 32 | } 33 | 34 | template 35 | void SoftmaxCrossEntropyLossLayer::Forward_cpu( 36 | const vector*>& bottom, vector*>* top) { 37 | LOG(ERROR) << "Forward_cpu not implemented."; 38 | } 39 | 40 | template 41 | void SoftmaxCrossEntropyLossLayer::Backward_cpu(const vector*>& top, 42 | const vector& propagate_down, 43 | vector*>* bottom) { 44 | LOG(ERROR) << "Backward_cpu not implemented."; 45 | } 46 | 47 | 48 | #ifdef CPU_ONLY 49 | STUB_GPU(SoftmaxCrossEntropyLossLayer); 50 | #endif 51 | 52 | INSTANTIATE_CLASS(SoftmaxCrossEntropyLossLayer); 53 | 54 | 55 | } // namespace caffe 56 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/softmax_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void SoftmaxWithLossLayer::Forward_gpu( 13 | const vector*>& bottom, vector*>* top) { 14 | Forward_cpu(bottom, top); 15 | } 16 | 17 | template 18 | void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, 19 | const vector& propagate_down, vector*>* bottom) { 20 | // TODO(Yangqing): implement the GPU version of softmax. 21 | Backward_cpu(top, propagate_down, bottom); 22 | } 23 | 24 | INSTANTIATE_CLASS(SoftmaxWithLossLayer); 25 | 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/split_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SplitLayer::Reshape(const vector*>& bottom, 11 | vector*>* top) { 12 | count_ = bottom[0]->count(); 13 | for (int i = 0; i < top->size(); ++i) { 14 | // Do not allow in-place computation in the SplitLayer. Instead, share data 15 | // by reference in the forward pass, and keep separate diff allocations in 16 | // the backward pass. (Technically, it should be possible to share the diff 17 | // blob of the first split output with the input, but this seems to cause 18 | // some strange effects in practice...) 19 | CHECK_NE((*top)[i], bottom[0]) << this->type_name() << " Layer does not " 20 | "allow in-place computation."; 21 | (*top)[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), 22 | bottom[0]->height(), bottom[0]->width()); 23 | CHECK_EQ(count_, (*top)[i]->count()); 24 | } 25 | } 26 | 27 | template 28 | void SplitLayer::Forward_cpu(const vector*>& bottom, 29 | vector*>* top) { 30 | for (int i = 0; i < top->size(); ++i) { 31 | (*top)[i]->ShareData(*bottom[0]); 32 | } 33 | } 34 | 35 | template 36 | void SplitLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, vector*>* bottom) { 38 | if (!propagate_down[0]) { return; } 39 | if (top.size() == 1) { 40 | caffe_copy(count_, top[0]->cpu_diff(), (*bottom)[0]->mutable_cpu_diff()); 41 | return; 42 | } 43 | caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), 44 | (*bottom)[0]->mutable_cpu_diff()); 45 | // Add remaining top blob diffs. 46 | for (int i = 2; i < top.size(); ++i) { 47 | const Dtype* top_diff = top[i]->cpu_diff(); 48 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 49 | caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); 50 | } 51 | } 52 | 53 | 54 | #ifdef CPU_ONLY 55 | STUB_GPU(SplitLayer); 56 | #endif 57 | 58 | INSTANTIATE_CLASS(SplitLayer); 59 | 60 | } // namespace caffe 61 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SplitLayer::Forward_gpu(const vector*>& bottom, 11 | vector*>* top) { 12 | for (int i = 0; i < top->size(); ++i) { 13 | (*top)[i]->ShareData(*bottom[0]); 14 | } 15 | } 16 | 17 | template 18 | void SplitLayer::Backward_gpu(const vector*>& top, 19 | const vector& propagate_down, vector*>* bottom) { 20 | if (!propagate_down[0]) { return; } 21 | if (top.size() == 1) { 22 | caffe_copy(count_, top[0]->gpu_diff(), (*bottom)[0]->mutable_gpu_diff()); 23 | return; 24 | } 25 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 26 | (*bottom)[0]->mutable_gpu_diff()); 27 | // Add remaining top blob diffs. 28 | for (int i = 2; i < top.size(); ++i) { 29 | const Dtype* top_diff = top[i]->gpu_diff(); 30 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 31 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 32 | } 33 | } 34 | 35 | 36 | INSTANTIATE_CLASS(SplitLayer); 37 | 38 | } // namespace caffe 39 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/spring_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void SpringLossLayer::Forward_gpu(const vector*>& bottom, 12 | vector*>* top) { 13 | Forward_cpu(bottom, top); 14 | } 15 | 16 | template 17 | void SpringLossLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, vector*>* bottom) { 19 | Backward_cpu(top, propagate_down, bottom); 20 | } 21 | 22 | INSTANTIATE_CLASS(SpringLossLayer); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void TanHLayer::Forward_cpu(const vector*>& bottom, 14 | vector*>* top) { 15 | const Dtype* bottom_data = bottom[0]->cpu_data(); 16 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 17 | Dtype exp2x; 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | exp2x = exp(2 * bottom_data[i]); 21 | top_data[i] = (exp2x - Dtype(1)) / (exp2x + Dtype(1)); 22 | } 23 | } 24 | 25 | template 26 | void TanHLayer::Backward_cpu(const vector*>& top, 27 | const vector& propagate_down, 28 | vector*>* bottom) { 29 | if (propagate_down[0]) { 30 | const Dtype* top_data = top[0]->cpu_data(); 31 | const Dtype* top_diff = top[0]->cpu_diff(); 32 | Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); 33 | const int count = (*bottom)[0]->count(); 34 | Dtype tanhx; 35 | for (int i = 0; i < count; ++i) { 36 | tanhx = top_data[i]; 37 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 38 | } 39 | } 40 | } 41 | 42 | #ifdef CPU_ONLY 43 | STUB_GPU(TanHLayer); 44 | #endif 45 | 46 | INSTANTIATE_CLASS(TanHLayer); 47 | 48 | } // namespace caffe 49 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/tanh_layer.cu: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { 14 | CUDA_KERNEL_LOOP(index, n) { 15 | Dtype exp2x = exp(2 * in[index]); 16 | out[index] = (exp2x - Dtype(1)) / (exp2x + Dtype(1)); 17 | } 18 | } 19 | 20 | template 21 | void TanHLayer::Forward_gpu(const vector*>& bottom, 22 | vector*>* top) { 23 | const Dtype* bottom_data = bottom[0]->gpu_data(); 24 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 25 | const int count = bottom[0]->count(); 26 | // NOLINT_NEXT_LINE(whitespace/operators) 27 | TanHForward<<>>( 28 | count, bottom_data, top_data); 29 | CUDA_POST_KERNEL_CHECK; 30 | } 31 | 32 | template 33 | __global__ void TanHBackward(const int n, const Dtype* in_diff, 34 | const Dtype* out_data, Dtype* out_diff) { 35 | CUDA_KERNEL_LOOP(index, n) { 36 | Dtype tanhx = out_data[index]; 37 | out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); 38 | } 39 | } 40 | 41 | template 42 | void TanHLayer::Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, 44 | vector*>* bottom) { 45 | if (propagate_down[0]) { 46 | const Dtype* top_data = top[0]->gpu_data(); 47 | const Dtype* top_diff = top[0]->gpu_diff(); 48 | Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); 49 | const int count = (*bottom)[0]->count(); 50 | // NOLINT_NEXT_LINE(whitespace/operators) 51 | TanHBackward<<>>( 52 | count, top_diff, top_data, bottom_diff); 53 | CUDA_POST_KERNEL_CHECK; 54 | } 55 | } 56 | 57 | INSTANTIATE_CLASS(TanHLayer); 58 | 59 | 60 | } // namespace caffe 61 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/vision_layers.hpp" 5 | 6 | 7 | namespace caffe { 8 | 9 | template 10 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 11 | vector*>* top) { 12 | NeuronLayer::LayerSetUp(bottom, top); 13 | threshold_ = this->layer_param_.threshold_param().threshold(); 14 | } 15 | 16 | template 17 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 18 | vector*>* top) { 19 | const Dtype* bottom_data = bottom[0]->cpu_data(); 20 | Dtype* top_data = (*top)[0]->mutable_cpu_data(); 21 | const int count = bottom[0]->count(); 22 | for (int i = 0; i < count; ++i) { 23 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 24 | } 25 | } 26 | 27 | #ifdef CPU_ONLY 28 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 29 | #endif 30 | 31 | INSTANTIATE_CLASS(ThresholdLayer); 32 | 33 | } // namespace caffe 34 | -------------------------------------------------------------------------------- /caffe/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | __global__ void ThresholdForward(const int n, const Dtype threshold, 11 | const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = in[index] > threshold ? 1 : 0; 14 | } 15 | } 16 | 17 | template 18 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 19 | vector*>* top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = (*top)[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | // NOLINT_NEXT_LINE(whitespace/operators) 24 | ThresholdForward<<>>( 25 | count, threshold_, bottom_data, top_data); 26 | CUDA_POST_KERNEL_CHECK; 27 | } 28 | 29 | 30 | INSTANTIATE_CLASS(ThresholdLayer); 31 | 32 | 33 | } // namespace caffe 34 | -------------------------------------------------------------------------------- /caffe/src/caffe/proto/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Proto ) 2 | 3 | # Google Protocol Buffers 4 | find_package( Protobuf REQUIRED ) 5 | 6 | # As of Ubuntu 14.04 protoc is no longer a part of libprotobuf-dev package and should be installed 7 | # separately as in: sudo apt-get install protobuf-compiler 8 | if(PROTOBUF_PROTOC_EXECUTABLE) 9 | message(STATUS "Found PROTOBUF Compiler: ${PROTOBUF_PROTOC_EXECUTABLE}") 10 | else() 11 | message(FATAL_ERROR "Could not find PROTOBUF Compiler") 12 | endif() 13 | 14 | include_directories(${PROTOBUF_INCLUDE_DIR}) 15 | file(GLOB ProtoFiles "${CMAKE_CURRENT_SOURCE_DIR}/*.proto") 16 | PROTOBUF_GENERATE_CPP(ProtoSources ProtoHeaders ${ProtoFiles}) 17 | 18 | add_library(proto 19 | ${ProtoSources} 20 | ${ProtoHeaders} 21 | ) 22 | 23 | target_link_libraries(proto ${PROTOBUF_LIBRARIES}) 24 | 25 | # Create proto include directory 26 | file(MAKE_DIRECTORY ${CMAKE_SOURCE_DIR}/include/caffe/proto) 27 | 28 | # Copy proto headers to include/caffe/proto/ 29 | foreach(header ${ProtoHeaders}) 30 | 31 | ADD_CUSTOM_COMMAND(TARGET proto 32 | COMMAND cmake -E copy ${header} 33 | ${Caffe_INCLUDE_DIRS}/caffe/proto/ 34 | DEPENDS ${header} 35 | ) 36 | 37 | endforeach(header) 38 | -------------------------------------------------------------------------------- /caffe/src/caffe/proto/caffe_pretty_print.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package caffe; 4 | 5 | import "caffe.proto"; 6 | 7 | // A near-duplicate of NetParameter with fields re-numbered to beautify 8 | // automatic prototext dumps. The main practical purpose is to print inputs 9 | // before layers, because having inputs at the end looks weird. 10 | // NetParameterPrettyPrint should never be used in code except for conversion 11 | // FROM NetParameter and subsequent dumping to proto text file. 12 | message NetParameterPrettyPrint { 13 | optional string name = 1; 14 | optional bool force_backward = 2 [default = false]; 15 | repeated string input = 3; 16 | repeated int32 input_dim = 4; 17 | repeated LayerParameter layers = 5; 18 | } 19 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/cmake_test_defines.hpp.in: -------------------------------------------------------------------------------- 1 | #define CUDA_TEST_DEVICE @CUDA_TEST_DEVICE@ 2 | #define CMAKE_SOURCE_DIR "@CMAKE_SOURCE_DIR@/src/" 3 | #define EXAMPLES_SOURCE_DIR "@CMAKE_SOURCE_DIR@/examples/" 4 | #define CMAKE_EXT ".gen.cmake" 5 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_blob.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "gtest/gtest.h" 4 | 5 | #include "caffe/blob.hpp" 6 | #include "caffe/common.hpp" 7 | #include "caffe/filler.hpp" 8 | 9 | #include "caffe/test/test_caffe_main.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | class BlobSimpleTest : public ::testing::Test { 15 | protected: 16 | BlobSimpleTest() 17 | : blob_(new Blob()), 18 | blob_preshaped_(new Blob(2, 3, 4, 5)) {} 19 | virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } 20 | Blob* const blob_; 21 | Blob* const blob_preshaped_; 22 | }; 23 | 24 | TYPED_TEST_CASE(BlobSimpleTest, TestDtypes); 25 | 26 | TYPED_TEST(BlobSimpleTest, TestInitialization) { 27 | EXPECT_TRUE(this->blob_); 28 | EXPECT_TRUE(this->blob_preshaped_); 29 | EXPECT_EQ(this->blob_preshaped_->num(), 2); 30 | EXPECT_EQ(this->blob_preshaped_->channels(), 3); 31 | EXPECT_EQ(this->blob_preshaped_->height(), 4); 32 | EXPECT_EQ(this->blob_preshaped_->width(), 5); 33 | EXPECT_EQ(this->blob_preshaped_->count(), 120); 34 | EXPECT_EQ(this->blob_->num(), 0); 35 | EXPECT_EQ(this->blob_->channels(), 0); 36 | EXPECT_EQ(this->blob_->height(), 0); 37 | EXPECT_EQ(this->blob_->width(), 0); 38 | EXPECT_EQ(this->blob_->count(), 0); 39 | } 40 | 41 | TYPED_TEST(BlobSimpleTest, TestPointersCPUGPU) { 42 | EXPECT_TRUE(this->blob_preshaped_->gpu_data()); 43 | EXPECT_TRUE(this->blob_preshaped_->cpu_data()); 44 | EXPECT_TRUE(this->blob_preshaped_->mutable_gpu_data()); 45 | EXPECT_TRUE(this->blob_preshaped_->mutable_cpu_data()); 46 | } 47 | 48 | TYPED_TEST(BlobSimpleTest, TestReshape) { 49 | this->blob_->Reshape(2, 3, 4, 5); 50 | EXPECT_EQ(this->blob_->num(), 2); 51 | EXPECT_EQ(this->blob_->channels(), 3); 52 | EXPECT_EQ(this->blob_->height(), 4); 53 | EXPECT_EQ(this->blob_->width(), 5); 54 | EXPECT_EQ(this->blob_->count(), 120); 55 | } 56 | 57 | } // namespace caffe 58 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | 4 | #include "caffe/test/test_caffe_main.hpp" 5 | 6 | namespace caffe { 7 | #ifndef CPU_ONLY 8 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 9 | #endif 10 | } 11 | 12 | #ifndef CPU_ONLY 13 | using caffe::CAFFE_TEST_CUDA_PROP; 14 | #endif 15 | 16 | int main(int argc, char** argv) { 17 | ::testing::InitGoogleTest(&argc, argv); 18 | ::google::InitGoogleLogging(argv[0]); 19 | #ifndef CPU_ONLY 20 | // Before starting testing, let's first print out a few cuda defice info. 21 | int device; 22 | cudaGetDeviceCount(&device); 23 | cout << "Cuda number of devices: " << device << endl; 24 | if (argc > 1) { 25 | // Use the given device 26 | device = atoi(argv[1]); 27 | cudaSetDevice(device); 28 | cout << "Setting to use device " << device << endl; 29 | } else if (CUDA_TEST_DEVICE >= 0) { 30 | // Use the device assigned in build configuration; but with a lower priority 31 | device = CUDA_TEST_DEVICE; 32 | } 33 | cudaGetDevice(&device); 34 | cout << "Current device id: " << device << endl; 35 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 36 | #endif 37 | // invoke the test. 38 | return RUN_ALL_TESTS(); 39 | } 40 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_common.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "gtest/gtest.h" 4 | 5 | #include "caffe/common.hpp" 6 | #include "caffe/syncedmem.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | 9 | #include "caffe/test/test_caffe_main.hpp" 10 | 11 | namespace caffe { 12 | 13 | class CommonTest : public ::testing::Test {}; 14 | 15 | #ifndef CPU_ONLY // GPU Caffe singleton test. 16 | 17 | TEST_F(CommonTest, TestCublasHandlerGPU) { 18 | int cuda_device_id; 19 | CUDA_CHECK(cudaGetDevice(&cuda_device_id)); 20 | EXPECT_TRUE(Caffe::cublas_handle()); 21 | } 22 | 23 | #endif 24 | 25 | TEST_F(CommonTest, TestBrewMode) { 26 | Caffe::set_mode(Caffe::CPU); 27 | EXPECT_EQ(Caffe::mode(), Caffe::CPU); 28 | Caffe::set_mode(Caffe::GPU); 29 | EXPECT_EQ(Caffe::mode(), Caffe::GPU); 30 | } 31 | 32 | TEST_F(CommonTest, TestPhase) { 33 | Caffe::set_phase(Caffe::TRAIN); 34 | EXPECT_EQ(Caffe::phase(), Caffe::TRAIN); 35 | Caffe::set_phase(Caffe::TEST); 36 | EXPECT_EQ(Caffe::phase(), Caffe::TEST); 37 | } 38 | 39 | TEST_F(CommonTest, TestRandSeedCPU) { 40 | SyncedMemory data_a(10 * sizeof(int)); 41 | SyncedMemory data_b(10 * sizeof(int)); 42 | Caffe::set_random_seed(1701); 43 | caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); 44 | 45 | Caffe::set_random_seed(1701); 46 | caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); 47 | 48 | for (int i = 0; i < 10; ++i) { 49 | EXPECT_EQ(static_cast(data_a.cpu_data())[i], 50 | static_cast(data_b.cpu_data())[i]); 51 | } 52 | } 53 | 54 | #ifndef CPU_ONLY // GPU Caffe singleton test. 55 | 56 | TEST_F(CommonTest, TestRandSeedGPU) { 57 | SyncedMemory data_a(10 * sizeof(unsigned int)); 58 | SyncedMemory data_b(10 * sizeof(unsigned int)); 59 | Caffe::set_random_seed(1701); 60 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 61 | static_cast(data_a.mutable_gpu_data()), 10)); 62 | Caffe::set_random_seed(1701); 63 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 64 | static_cast(data_b.mutable_gpu_data()), 10)); 65 | for (int i = 0; i < 10; ++i) { 66 | EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], 67 | ((const unsigned int*)(data_b.cpu_data()))[i]); 68 | } 69 | } 70 | 71 | #endif 72 | 73 | } // namespace caffe 74 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/generate_sample_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate data used in the HDF5DataLayer test. 3 | """ 4 | import os 5 | import numpy as np 6 | import h5py 7 | 8 | num_cols = 8 9 | num_rows = 10 10 | height = 6 11 | width = 5 12 | total_size = num_cols * num_rows * height * width 13 | 14 | data = np.arange(total_size) 15 | data = data.reshape(num_rows, num_cols, height, width) 16 | data = data.astype('float32') 17 | 18 | # We had a bug where data was copied into label, but the tests weren't 19 | # catching it, so let's make label 1-indexed. 20 | label = 1 + np.arange(num_rows)[:, np.newaxis] 21 | label = label.astype('float32') 22 | 23 | print data 24 | print label 25 | 26 | with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: 27 | f['data'] = data 28 | f['label'] = label 29 | 30 | with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: 31 | f.create_dataset( 32 | 'data', data=data + total_size, 33 | compression='gzip', compression_opts=1 34 | ) 35 | f.create_dataset( 36 | 'label', data=label, 37 | compression='gzip', compression_opts=1 38 | ) 39 | 40 | with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: 41 | f.write(os.path.dirname(__file__) + '/sample_data.h5\n') 42 | f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') 43 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/caffe/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_data/sample_data_list.txt.in: -------------------------------------------------------------------------------- 1 | @CMAKE_SOURCE_DIR@/src/caffe/test/test_data/sample_data.h5 2 | @CMAKE_SOURCE_DIR@/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | 6 | #include "caffe/test/test_caffe_main.hpp" 7 | 8 | namespace caffe { 9 | 10 | 11 | class InternalThreadTest : public ::testing::Test {}; 12 | 13 | TEST_F(InternalThreadTest, TestStartAndExit) { 14 | InternalThread thread; 15 | EXPECT_FALSE(thread.is_started()); 16 | EXPECT_TRUE(thread.StartInternalThread()); 17 | EXPECT_TRUE(thread.is_started()); 18 | EXPECT_TRUE(thread.WaitForInternalThreadToExit()); 19 | EXPECT_FALSE(thread.is_started()); 20 | } 21 | 22 | } // namespace caffe 23 | 24 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_multinomial_logistic_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/filler.hpp" 11 | #include "caffe/vision_layers.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | #include "caffe/test/test_gradient_check_util.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class MultinomialLogisticLossLayerTest : public ::testing::Test { 20 | protected: 21 | MultinomialLogisticLossLayerTest() 22 | : blob_bottom_data_(new Blob(10, 5, 1, 1)), 23 | blob_bottom_label_(new Blob(10, 1, 1, 1)), 24 | blob_top_loss_(new Blob()) { 25 | Caffe::set_random_seed(1701); 26 | // fill the values 27 | FillerParameter filler_param; 28 | PositiveUnitballFiller filler(filler_param); 29 | filler.Fill(this->blob_bottom_data_); 30 | blob_bottom_vec_.push_back(blob_bottom_data_); 31 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 32 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 33 | } 34 | blob_bottom_vec_.push_back(blob_bottom_label_); 35 | blob_top_vec_.push_back(blob_top_loss_); 36 | } 37 | virtual ~MultinomialLogisticLossLayerTest() { 38 | delete blob_bottom_data_; 39 | delete blob_bottom_label_; 40 | delete blob_top_loss_; 41 | } 42 | Blob* const blob_bottom_data_; 43 | Blob* const blob_bottom_label_; 44 | Blob* const blob_top_loss_; 45 | vector*> blob_bottom_vec_; 46 | vector*> blob_top_vec_; 47 | }; 48 | 49 | TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); 50 | 51 | 52 | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { 53 | LayerParameter layer_param; 54 | Caffe::set_mode(Caffe::CPU); 55 | MultinomialLogisticLossLayer layer(layer_param); 56 | layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); 57 | GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); 58 | checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), 59 | &(this->blob_top_vec_), 0); 60 | } 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_platform.cpp: -------------------------------------------------------------------------------- 1 | #ifndef CPU_ONLY 2 | 3 | #include 4 | #include 5 | 6 | #include "glog/logging.h" 7 | #include "gtest/gtest.h" 8 | 9 | #include "caffe/test/test_caffe_main.hpp" 10 | 11 | namespace caffe { 12 | 13 | extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; 14 | 15 | class PlatformTest : public ::testing::Test {}; 16 | 17 | TEST_F(PlatformTest, TestInitialization) { 18 | printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major); 19 | printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor); 20 | printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name); 21 | printf("Total global memory: %lu\n", 22 | CAFFE_TEST_CUDA_PROP.totalGlobalMem); 23 | printf("Total shared memory per block: %lu\n", 24 | CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); 25 | printf("Total registers per block: %d\n", 26 | CAFFE_TEST_CUDA_PROP.regsPerBlock); 27 | printf("Warp size: %d\n", 28 | CAFFE_TEST_CUDA_PROP.warpSize); 29 | printf("Maximum memory pitch: %lu\n", 30 | CAFFE_TEST_CUDA_PROP.memPitch); 31 | printf("Maximum threads per block: %d\n", 32 | CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); 33 | for (int i = 0; i < 3; ++i) 34 | printf("Maximum dimension %d of block: %d\n", i, 35 | CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); 36 | for (int i = 0; i < 3; ++i) 37 | printf("Maximum dimension %d of grid: %d\n", i, 38 | CAFFE_TEST_CUDA_PROP.maxGridSize[i]); 39 | printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); 40 | printf("Total constant memory: %lu\n", 41 | CAFFE_TEST_CUDA_PROP.totalConstMem); 42 | printf("Texture alignment: %lu\n", 43 | CAFFE_TEST_CUDA_PROP.textureAlignment); 44 | printf("Concurrent copy and execution: %s\n", 45 | (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); 46 | printf("Number of multiprocessors: %d\n", 47 | CAFFE_TEST_CUDA_PROP.multiProcessorCount); 48 | printf("Kernel execution timeout: %s\n", 49 | (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); 50 | printf("Unified virtual addressing: %s\n", 51 | (CAFFE_TEST_CUDA_PROP.unifiedAddressing ? "Yes" : "No")); 52 | EXPECT_TRUE(true); 53 | } 54 | 55 | } // namespace caffe 56 | 57 | #endif // CPU_ONLY 58 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type(LayerParameter_LayerType_NONE); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe/src/caffe/test/test_softmax_with_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/filler.hpp" 11 | #include "caffe/vision_layers.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | #include "caffe/test/test_gradient_check_util.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class SoftmaxWithLossLayerTest : public MultiDeviceTest { 20 | typedef typename TypeParam::Dtype Dtype; 21 | 22 | protected: 23 | SoftmaxWithLossLayerTest() 24 | : blob_bottom_data_(new Blob(10, 5, 2, 3)), 25 | blob_bottom_label_(new Blob(10, 1, 2, 3)), 26 | blob_top_loss_(new Blob()) { 27 | // fill the values 28 | FillerParameter filler_param; 29 | filler_param.set_std(10); 30 | GaussianFiller filler(filler_param); 31 | filler.Fill(this->blob_bottom_data_); 32 | blob_bottom_vec_.push_back(blob_bottom_data_); 33 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 34 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 35 | } 36 | blob_bottom_vec_.push_back(blob_bottom_label_); 37 | blob_top_vec_.push_back(blob_top_loss_); 38 | } 39 | virtual ~SoftmaxWithLossLayerTest() { 40 | delete blob_bottom_data_; 41 | delete blob_bottom_label_; 42 | delete blob_top_loss_; 43 | } 44 | Blob* const blob_bottom_data_; 45 | Blob* const blob_bottom_label_; 46 | Blob* const blob_top_loss_; 47 | vector*> blob_bottom_vec_; 48 | vector*> blob_top_vec_; 49 | }; 50 | 51 | TYPED_TEST_CASE(SoftmaxWithLossLayerTest, TestDtypesAndDevices); 52 | 53 | 54 | TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) { 55 | typedef typename TypeParam::Dtype Dtype; 56 | LayerParameter layer_param; 57 | layer_param.add_loss_weight(3); 58 | SoftmaxWithLossLayer layer(layer_param); 59 | GradientChecker checker(1e-2, 1e-2, 1701); 60 | checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), 61 | &(this->blob_top_vec_), 0); 62 | } 63 | 64 | } // namespace caffe 65 | -------------------------------------------------------------------------------- /caffe/src/caffe/util/benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common.hpp" 4 | #include "caffe/util/benchmark.hpp" 5 | 6 | namespace caffe { 7 | 8 | Timer::Timer() 9 | : initted_(false), 10 | running_(false), 11 | has_run_at_least_once_(false) { 12 | Init(); 13 | } 14 | 15 | Timer::~Timer() { 16 | if (Caffe::mode() == Caffe::GPU) { 17 | #ifndef CPU_ONLY 18 | CUDA_CHECK(cudaEventDestroy(start_gpu_)); 19 | CUDA_CHECK(cudaEventDestroy(stop_gpu_)); 20 | #else 21 | NO_GPU; 22 | #endif 23 | } 24 | } 25 | 26 | void Timer::Start() { 27 | if (!running()) { 28 | if (Caffe::mode() == Caffe::GPU) { 29 | #ifndef CPU_ONLY 30 | CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); 31 | #else 32 | NO_GPU; 33 | #endif 34 | } else { 35 | start_cpu_ = boost::posix_time::microsec_clock::local_time(); 36 | } 37 | running_ = true; 38 | has_run_at_least_once_ = true; 39 | } 40 | } 41 | 42 | void Timer::Stop() { 43 | if (running()) { 44 | if (Caffe::mode() == Caffe::GPU) { 45 | #ifndef CPU_ONLY 46 | CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); 47 | CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); 48 | #else 49 | NO_GPU; 50 | #endif 51 | } else { 52 | stop_cpu_ = boost::posix_time::microsec_clock::local_time(); 53 | } 54 | running_ = false; 55 | } 56 | } 57 | 58 | float Timer::MilliSeconds() { 59 | if (!has_run_at_least_once()) { 60 | LOG(WARNING) << "Timer has never been run before reading time."; 61 | return 0; 62 | } 63 | if (running()) { 64 | Stop(); 65 | } 66 | if (Caffe::mode() == Caffe::GPU) { 67 | #ifndef CPU_ONLY 68 | CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, 69 | stop_gpu_)); 70 | #else 71 | NO_GPU; 72 | #endif 73 | } else { 74 | elapsed_milliseconds_ = (stop_cpu_ - start_cpu_).total_milliseconds(); 75 | } 76 | return elapsed_milliseconds_; 77 | } 78 | 79 | float Timer::Seconds() { 80 | return MilliSeconds() / 1000.; 81 | } 82 | 83 | void Timer::Init() { 84 | if (!initted()) { 85 | if (Caffe::mode() == Caffe::GPU) { 86 | #ifndef CPU_ONLY 87 | CUDA_CHECK(cudaEventCreate(&start_gpu_)); 88 | CUDA_CHECK(cudaEventCreate(&stop_gpu_)); 89 | #else 90 | NO_GPU; 91 | #endif 92 | } 93 | initted_ = true; 94 | } 95 | } 96 | 97 | } // namespace caffe 98 | -------------------------------------------------------------------------------- /caffe/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(gtest CXX C) 2 | cmake_minimum_required(VERSION 2.6.2) 3 | 4 | add_library(gtest gtest-all.cpp) 5 | add_library(gtest_main gtest_main.cc) 6 | target_link_libraries(gtest_main gtest) -------------------------------------------------------------------------------- /caffe/src/gtest/gtest_main.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2006, Google Inc. 2 | // All rights reserved. 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | #include 31 | 32 | #include "gtest/gtest.h" 33 | 34 | GTEST_API_ int main(int argc, char **argv) { 35 | std::cout << "Running main() from gtest_main.cc\n"; 36 | 37 | testing::InitGoogleTest(&argc, argv); 38 | return RUN_ALL_TESTS(); 39 | } 40 | -------------------------------------------------------------------------------- /caffe/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project( Tools ) 2 | 3 | # Find all source files 4 | file(GLOB_RECURSE TOOLS_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 5 | 6 | # Build each source file independently 7 | foreach(source ${TOOLS_SOURCES}) 8 | get_filename_component(name ${source} NAME_WE) 9 | add_executable(${name}.bin ${source}) 10 | set_target_properties(${name}.bin PROPERTIES OUTPUT_NAME ${name}) 11 | target_link_libraries(${name}.bin caffe) 12 | 13 | ### Install ################################################################################# 14 | 15 | install(TARGETS ${name}.bin DESTINATION tools) 16 | 17 | 18 | endforeach(source) 19 | 20 | -------------------------------------------------------------------------------- /caffe/tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/extra/extract_seconds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import datetime 3 | import os 4 | import sys 5 | 6 | def extract_datetime_from_line(line, year): 7 | # Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565 8 | line = line.strip().split() 9 | month = int(line[0][1:3]) 10 | day = int(line[0][3:]) 11 | timestamp = line[1] 12 | pos = timestamp.rfind('.') 13 | ts = [int(x) for x in timestamp[:pos].split(':')] 14 | hour = ts[0] 15 | minute = ts[1] 16 | second = ts[2] 17 | microsecond = int(timestamp[pos + 1:]) 18 | dt = datetime.datetime(year, month, day, hour, minute, second, microsecond) 19 | return dt 20 | 21 | def extract_seconds(input_file, output_file): 22 | with open(input_file, 'r') as f: 23 | lines = f.readlines() 24 | log_created_time = os.path.getctime(input_file) 25 | log_created_year = datetime.datetime.fromtimestamp(log_created_time).year 26 | start_time_found = False 27 | out = open(output_file, 'w') 28 | for line in lines: 29 | line = line.strip() 30 | if not start_time_found and line.find('Solving') != -1: 31 | start_time_found = True 32 | start_datetime = extract_datetime_from_line(line, log_created_year) 33 | if line.find('Iteration') != -1: 34 | dt = extract_datetime_from_line(line, log_created_year) 35 | elapsed_seconds = (dt - start_datetime).total_seconds() 36 | out.write('%f\n' % elapsed_seconds) 37 | out.close() 38 | 39 | if __name__ == '__main__': 40 | if len(sys.argv) < 3: 41 | print('Usage: ./extract_seconds input_file output_file') 42 | exit(1) 43 | extract_seconds(sys.argv[1], sys.argv[2]) 44 | -------------------------------------------------------------------------------- /caffe/tools/extra/launch_resize_and_crop_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer 3 | 4 | # If you encounter error that the address already in use, kill the process. 5 | # 11235 is the port of server process 6 | # https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py 7 | # sudo netstat -ap | grep 11235 8 | # The last column of the output is PID/Program name 9 | # kill -9 PID 10 | # Second solution: 11 | # nmap localhost 12 | # fuser -k 11235/tcp 13 | # Or just wait a few seconds. 14 | 15 | ## Launch your Mapreduce locally 16 | # num_clients: number of processes 17 | # image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. 18 | # input: the file containing one image path relative to input_folder each line 19 | # input_folder: where are the original images 20 | # output_folder: where to save the resized and cropped images 21 | ./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 22 | 23 | ## Launch your Mapreduce with MPI 24 | # mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 25 | -------------------------------------------------------------------------------- /caffe/tools/extra/parse_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage parse_log.sh caffe.log 3 | # It creates the following two text files, each containing a table: 4 | # caffe.log.test (columns: '#Iters Seconds TestAccuracy TestLoss') 5 | # caffe.log.train (columns: '#Iters Seconds TrainingLoss LearningRate') 6 | 7 | 8 | # get the dirname of the script 9 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 10 | 11 | if [ "$#" -lt 1 ] 12 | then 13 | echo "Usage parse_log.sh /path/to/your.log" 14 | exit 15 | fi 16 | LOG=`basename $1` 17 | grep -B 1 'Test ' $1 > aux.txt 18 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 19 | grep 'Test net output #0' aux.txt | awk '{print $11}' > aux1.txt 20 | grep 'Test net output #1' aux.txt | awk '{print $11}' > aux2.txt 21 | 22 | # Extracting elapsed seconds 23 | # For extraction of time since this line contains the start time 24 | grep '] Solving ' $1 > aux3.txt 25 | grep 'Testing net' $1 >> aux3.txt 26 | $DIR/extract_seconds.py aux3.txt aux4.txt 27 | 28 | # Generating 29 | echo '#Iters Seconds TestAccuracy TestLoss'> $LOG.test 30 | paste aux0.txt aux4.txt aux1.txt aux2.txt | column -t >> $LOG.test 31 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt 32 | 33 | # For extraction of time since this line contains the start time 34 | grep '] Solving ' $1 > aux.txt 35 | grep ', loss = ' $1 >> aux.txt 36 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 37 | grep ', loss = ' $1 | awk '{print $9}' > aux1.txt 38 | grep ', lr = ' $1 | awk '{print $9}' > aux2.txt 39 | 40 | # Extracting elapsed seconds 41 | $DIR/extract_seconds.py aux.txt aux3.txt 42 | 43 | # Generating 44 | echo '#Iters Seconds TrainingLoss LearningRate'> $LOG.train 45 | paste aux0.txt aux3.txt aux1.txt aux2.txt | column -t >> $LOG.train 46 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt 47 | -------------------------------------------------------------------------------- /caffe/tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe/tools/upgrade_net_proto_binary.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | 9 | #include "caffe/caffe.hpp" 10 | #include "caffe/util/io.hpp" 11 | #include "caffe/util/upgrade_proto.hpp" 12 | 13 | using std::ofstream; 14 | 15 | using namespace caffe; // NOLINT(build/namespaces) 16 | 17 | int main(int argc, char** argv) { 18 | ::google::InitGoogleLogging(argv[0]); 19 | if (argc != 3) { 20 | LOG(ERROR) << "Usage: " 21 | << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; 22 | return 1; 23 | } 24 | 25 | NetParameter net_param; 26 | if (!ReadProtoFromBinaryFile(argv[1], &net_param)) { 27 | LOG(ERROR) << "Failed to parse input binary file as NetParameter: " 28 | << argv[1]; 29 | return 2; 30 | } 31 | bool need_upgrade = NetNeedsUpgrade(net_param); 32 | bool success = true; 33 | if (need_upgrade) { 34 | NetParameter v0_net_param(net_param); 35 | success = UpgradeV0Net(v0_net_param, &net_param); 36 | } else { 37 | LOG(ERROR) << "File already in V1 proto format: " << argv[1]; 38 | } 39 | 40 | WriteProtoToBinaryFile(net_param, argv[2]); 41 | 42 | LOG(ERROR) << "Wrote upgraded NetParameter binary proto to " << argv[2]; 43 | return !success; 44 | } 45 | -------------------------------------------------------------------------------- /caffe/tools/upgrade_net_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | 9 | #include "caffe/caffe.hpp" 10 | #include "caffe/util/io.hpp" 11 | #include "caffe/util/upgrade_proto.hpp" 12 | 13 | using std::ofstream; 14 | 15 | using namespace caffe; // NOLINT(build/namespaces) 16 | 17 | int main(int argc, char** argv) { 18 | ::google::InitGoogleLogging(argv[0]); 19 | if (argc != 3) { 20 | LOG(ERROR) << "Usage: " 21 | << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; 22 | return 1; 23 | } 24 | 25 | NetParameter net_param; 26 | if (!ReadProtoFromTextFile(argv[1], &net_param)) { 27 | LOG(ERROR) << "Failed to parse input text file as NetParameter: " 28 | << argv[1]; 29 | return 2; 30 | } 31 | bool need_upgrade = NetNeedsUpgrade(net_param); 32 | bool need_data_upgrade = NetNeedsDataUpgrade(net_param); 33 | bool success = true; 34 | if (need_upgrade) { 35 | NetParameter v0_net_param(net_param); 36 | success = UpgradeV0Net(v0_net_param, &net_param); 37 | } else { 38 | LOG(ERROR) << "File already in V1 proto format: " << argv[1]; 39 | } 40 | 41 | if (need_data_upgrade) { 42 | UpgradeNetDataTransformation(&net_param); 43 | } 44 | 45 | // Convert to a NetParameterPrettyPrint to print fields in desired 46 | // order. 47 | NetParameterPrettyPrint net_param_pretty; 48 | NetParameterToPrettyPrint(net_param, &net_param_pretty); 49 | 50 | // Save new format prototxt. 51 | WriteProtoToTextFile(net_param_pretty, argv[2]); 52 | 53 | LOG(ERROR) << "Wrote upgraded NetParameter text proto to " << argv[2]; 54 | return !success; 55 | } 56 | -------------------------------------------------------------------------------- /dec/exp/mnist/save_iter_100000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/dec/exp/mnist/save_iter_100000.caffemodel -------------------------------------------------------------------------------- /dec/exp/reutersidf/save_iter_100000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/dec/exp/reutersidf/save_iter_100000.caffemodel -------------------------------------------------------------------------------- /dec/exp/reutersidf10k/save_iter_100000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/dec/exp/reutersidf10k/save_iter_100000.caffemodel -------------------------------------------------------------------------------- /dec/exp/stl/save_iter_100000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/dec/exp/stl/save_iter_100000.caffemodel -------------------------------------------------------------------------------- /dec/exp/test/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/piiswrong/dec/e551e01f90a3d67d2ef9c90e968c8faf5d8f857d/dec/exp/test/.gitignore -------------------------------------------------------------------------------- /dec/make_mnist_data.py: -------------------------------------------------------------------------------- 1 | import dec 2 | print 'Preparing mnist data. This could take a while...' 3 | dec.make_mnist_data() 4 | -------------------------------------------------------------------------------- /dec/make_reuters_data.py: -------------------------------------------------------------------------------- 1 | import dec 2 | print 'Preparing reuters data. This could take a while...' 3 | dec.make_reuters_data() 4 | -------------------------------------------------------------------------------- /dec/make_stl_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dec 3 | print 'Building HOG feature extractor...' 4 | os.system('python setup_features.py build') 5 | os.system('python setup_features.py install') 6 | 7 | print 'Preparing stl data. This could take a while...' 8 | dec.make_stl_data() 9 | -------------------------------------------------------------------------------- /dec/setup_features.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Build import cythonize 3 | import numpy as np 4 | 5 | setup( 6 | name = "features", 7 | ext_modules = cythonize("features.pyx"), 8 | include_dirs = [np.get_include()] 9 | ) 10 | -------------------------------------------------------------------------------- /mnist/get_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz 3 | gunzip -f train-images-idx3-ubyte.gz 4 | wget http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz 5 | gunzip -f train-labels-idx1-ubyte.gz 6 | wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz 7 | gunzip -f t10k-images-idx3-ubyte.gz 8 | wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz 9 | gunzip -f t10k-labels-idx1-ubyte.gz 10 | -------------------------------------------------------------------------------- /reuters/get_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a12-token-files/lyrl2004_tokens_test_pt0.dat.gz 3 | gunzip lyrl2004_tokens_test_pt0.dat.gz 4 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a12-token-files/lyrl2004_tokens_test_pt1.dat.gz 5 | gunzip lyrl2004_tokens_test_pt1.dat.gz 6 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a12-token-files/lyrl2004_tokens_test_pt2.dat.gz 7 | gunzip lyrl2004_tokens_test_pt2.dat.gz 8 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a12-token-files/lyrl2004_tokens_test_pt3.dat.gz 9 | gunzip lyrl2004_tokens_test_pt3.dat.gz 10 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a12-token-files/lyrl2004_tokens_train.dat.gz 11 | gunzip lyrl2004_tokens_train.dat.gz 12 | wget http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a08-topic-qrels/rcv1-v2.topics.qrels.gz 13 | gunzip rcv1-v2.topics.qrels.gz 14 | -------------------------------------------------------------------------------- /stl/get_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | wget http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz 3 | tar -xf --strip=1 stl10_binary.tar.gz 4 | --------------------------------------------------------------------------------