├── .Doxyfile
├── .gitignore
├── .travis.yml
├── CMakeLists.txt
├── CONTRIBUTORS.md
├── INSTALL.md
├── LICENSE
├── Makefile
├── Makefile.config.example
├── README.md
├── caffe.cloc
├── cmake
├── ConfigGen.cmake
├── Cuda.cmake
├── Dependencies.cmake
├── External
│ ├── gflags.cmake
│ └── glog.cmake
├── Misc.cmake
├── Modules
│ ├── FindAtlas.cmake
│ ├── FindGFlags.cmake
│ ├── FindGlog.cmake
│ ├── FindLAPACK.cmake
│ ├── FindLMDB.cmake
│ ├── FindLevelDB.cmake
│ ├── FindMKL.cmake
│ ├── FindMatlabMex.cmake
│ ├── FindNumPy.cmake
│ ├── FindOpenBLAS.cmake
│ ├── FindSnappy.cmake
│ └── FindvecLib.cmake
├── ProtoBuf.cmake
├── Summary.cmake
├── Targets.cmake
├── Templates
│ ├── CaffeConfig.cmake.in
│ ├── CaffeConfigVersion.cmake.in
│ └── caffe_config.h.in
├── Utils.cmake
└── lint.cmake
├── data
├── cifar10
│ └── get_cifar10.sh
├── ilsvrc12
│ └── get_ilsvrc_aux.sh
└── mnist
│ └── get_mnist.sh
├── docs
├── CMakeLists.txt
├── CNAME
├── README.md
├── _config.yml
├── _layouts
│ └── default.html
├── development.md
├── images
│ ├── GitHub-Mark-64px.png
│ └── caffeine-icon.png
├── index.md
├── install_apt.md
├── install_osx.md
├── install_yum.md
├── installation.md
├── model_zoo.md
├── performance_hardware.md
├── stylesheets
│ ├── pygment_trac.css
│ ├── reset.css
│ └── styles.css
└── tutorial
│ ├── convolution.md
│ ├── data.md
│ ├── fig
│ ├── .gitignore
│ ├── backward.jpg
│ ├── forward.jpg
│ ├── forward_backward.png
│ ├── layer.jpg
│ └── logreg.jpg
│ ├── forward_backward.md
│ ├── index.md
│ ├── interfaces.md
│ ├── layers.md
│ ├── loss.md
│ ├── net_layer_blob.md
│ └── solver.md
├── examples
├── 00-classification.ipynb
├── 01-learning-lenet.ipynb
├── 02-brewing-logreg.ipynb
├── 03-fine-tuning.ipynb
├── CMakeLists.txt
├── cifar10
│ ├── cifar10_full.prototxt
│ ├── cifar10_full_solver.prototxt
│ ├── cifar10_full_solver_lr1.prototxt
│ ├── cifar10_full_solver_lr2.prototxt
│ ├── cifar10_full_train_test.prototxt
│ ├── cifar10_quick.prototxt
│ ├── cifar10_quick_solver.prototxt
│ ├── cifar10_quick_solver_lr1.prototxt
│ ├── cifar10_quick_train_test.prototxt
│ ├── convert_cifar_data.cpp
│ ├── create_cifar10.sh
│ ├── readme.md
│ ├── train_full.sh
│ └── train_quick.sh
├── cpp_classification
│ ├── classification.cpp
│ └── readme.md
├── detection.ipynb
├── feature_extraction
│ ├── imagenet_val.prototxt
│ └── readme.md
├── finetune_flickr_style
│ ├── assemble_data.py
│ ├── flickr_style.csv.gz
│ ├── readme.md
│ └── style_names.txt
├── finetune_pascal_detection
│ ├── pascal_finetune_solver.prototxt
│ └── pascal_finetune_trainval_test.prototxt
├── hdf5_classification
│ ├── nonlinear_auto_test.prototxt
│ ├── nonlinear_auto_train.prototxt
│ ├── nonlinear_solver.prototxt
│ ├── nonlinear_train_val.prototxt
│ ├── solver.prototxt
│ └── train_val.prototxt
├── imagenet
│ ├── create_imagenet.sh
│ ├── make_imagenet_mean.sh
│ ├── readme.md
│ ├── resume_training.sh
│ └── train_caffenet.sh
├── images
│ ├── cat.jpg
│ ├── cat_gray.jpg
│ └── fish-bike.jpg
├── mnist
│ ├── convert_mnist_data.cpp
│ ├── create_mnist.sh
│ ├── lenet.prototxt
│ ├── lenet_auto_solver.prototxt
│ ├── lenet_consolidated_solver.prototxt
│ ├── lenet_multistep_solver.prototxt
│ ├── lenet_solver.prototxt
│ ├── lenet_stepearly_solver.prototxt
│ ├── lenet_train_test.prototxt
│ ├── mnist_autoencoder.prototxt
│ ├── mnist_autoencoder_solver.prototxt
│ ├── mnist_autoencoder_solver_adagrad.prototxt
│ ├── mnist_autoencoder_solver_nesterov.prototxt
│ ├── readme.md
│ ├── train_lenet.sh
│ ├── train_lenet_consolidated.sh
│ ├── train_mnist_autoencoder.sh
│ ├── train_mnist_autoencoder_adagrad.sh
│ └── train_mnist_autoencoder_nesterov.sh
├── net_surgery.ipynb
├── net_surgery
│ ├── bvlc_caffenet_full_conv.prototxt
│ └── conv.prototxt
├── pycaffe
│ ├── caffenet.py
│ ├── layers
│ │ └── pyloss.py
│ └── linreg.prototxt
├── siamese
│ ├── convert_mnist_siamese_data.cpp
│ ├── create_mnist_siamese.sh
│ ├── mnist_siamese.ipynb
│ ├── mnist_siamese.prototxt
│ ├── mnist_siamese_solver.prototxt
│ ├── mnist_siamese_train_test.prototxt
│ ├── readme.md
│ └── train_mnist_siamese.sh
└── web_demo
│ ├── app.py
│ ├── exifutil.py
│ ├── readme.md
│ ├── requirements.txt
│ └── templates
│ └── index.html
├── include
└── caffe
│ ├── blob.hpp
│ ├── caffe.hpp
│ ├── common.hpp
│ ├── common_layers.hpp
│ ├── data_layers.hpp
│ ├── data_transformer.hpp
│ ├── filler.hpp
│ ├── internal_thread.hpp
│ ├── layer.hpp
│ ├── layer_factory.hpp
│ ├── loss_layers.hpp
│ ├── net.hpp
│ ├── neuron_layers.hpp
│ ├── python_layer.hpp
│ ├── solver.hpp
│ ├── syncedmem.hpp
│ ├── test
│ ├── test_caffe_main.hpp
│ └── test_gradient_check_util.hpp
│ ├── util
│ ├── benchmark.hpp
│ ├── cudnn.hpp
│ ├── db.hpp
│ ├── db_leveldb.hpp
│ ├── db_lmdb.hpp
│ ├── device_alternate.hpp
│ ├── im2col.hpp
│ ├── insert_splits.hpp
│ ├── io.hpp
│ ├── math_functions.hpp
│ ├── mkl_alternate.hpp
│ ├── rng.hpp
│ └── upgrade_proto.hpp
│ └── vision_layers.hpp
├── matlab
├── +caffe
│ ├── +test
│ │ ├── test_net.m
│ │ └── test_solver.m
│ ├── Blob.m
│ ├── Layer.m
│ ├── Net.m
│ ├── Solver.m
│ ├── get_net.m
│ ├── get_solver.m
│ ├── imagenet
│ │ └── ilsvrc_2012_mean.mat
│ ├── io.m
│ ├── private
│ │ ├── CHECK.m
│ │ ├── CHECK_FILE_EXIST.m
│ │ ├── caffe_.cpp
│ │ └── is_valid_handle.m
│ ├── reset_all.m
│ ├── run_tests.m
│ ├── set_device.m
│ ├── set_mode_cpu.m
│ └── set_mode_gpu.m
├── CMakeLists.txt
├── demo
│ └── classification_demo.m
└── hdf5creation
│ ├── .gitignore
│ ├── demo.m
│ └── store2hdf5.m
├── models
├── bvlc_alexnet
│ ├── deploy.prototxt
│ ├── readme.md
│ ├── solver.prototxt
│ └── train_val.prototxt
├── bvlc_googlenet
│ ├── deploy.prototxt
│ ├── quick_solver.prototxt
│ ├── readme.md
│ ├── solver.prototxt
│ └── train_val.prototxt
├── bvlc_reference_caffenet
│ ├── deploy.prototxt
│ ├── readme.md
│ ├── solver.prototxt
│ └── train_val.prototxt
├── bvlc_reference_rcnn_ilsvrc13
│ ├── deploy.prototxt
│ └── readme.md
└── finetune_flickr_style
│ ├── deploy.prototxt
│ ├── readme.md
│ ├── solver.prototxt
│ └── train_val.prototxt
├── python
├── CMakeLists.txt
├── caffe
│ ├── __init__.py
│ ├── _caffe.cpp
│ ├── classifier.py
│ ├── detector.py
│ ├── draw.py
│ ├── imagenet
│ │ └── ilsvrc_2012_mean.npy
│ ├── io.py
│ ├── net_spec.py
│ ├── pycaffe.py
│ └── test
│ │ ├── test_net.py
│ │ ├── test_net_spec.py
│ │ ├── test_python_layer.py
│ │ └── test_solver.py
├── classify.py
├── detect.py
├── draw_net.py
└── requirements.txt
├── scripts
├── build_docs.sh
├── copy_notebook.py
├── cpp_lint.py
├── deploy_docs.sh
├── download_model_binary.py
├── download_model_from_gist.sh
├── gather_examples.sh
├── travis
│ ├── travis_build_and_test.sh
│ ├── travis_install.sh
│ └── travis_setup_makefile_config.sh
└── upload_model_to_gist.sh
├── src
├── caffe
│ ├── CMakeLists.txt
│ ├── blob.cpp
│ ├── common.cpp
│ ├── data_transformer.cpp
│ ├── internal_thread.cpp
│ ├── layer_factory.cpp
│ ├── layers
│ │ ├── absval_layer.cpp
│ │ ├── absval_layer.cu
│ │ ├── accuracy_layer.cpp
│ │ ├── argmax_layer.cpp
│ │ ├── base_conv_layer.cpp
│ │ ├── base_data_layer.cpp
│ │ ├── base_data_layer.cu
│ │ ├── bnll_layer.cpp
│ │ ├── bnll_layer.cu
│ │ ├── concat_layer.cpp
│ │ ├── concat_layer.cu
│ │ ├── contrastive_loss_layer.cpp
│ │ ├── contrastive_loss_layer.cu
│ │ ├── conv_layer.cpp
│ │ ├── conv_layer.cu
│ │ ├── cudnn_conv_layer.cpp
│ │ ├── cudnn_conv_layer.cu
│ │ ├── cudnn_pooling_layer.cpp
│ │ ├── cudnn_pooling_layer.cu
│ │ ├── cudnn_relu_layer.cpp
│ │ ├── cudnn_relu_layer.cu
│ │ ├── cudnn_sigmoid_layer.cpp
│ │ ├── cudnn_sigmoid_layer.cu
│ │ ├── cudnn_softmax_layer.cpp
│ │ ├── cudnn_softmax_layer.cu
│ │ ├── cudnn_tanh_layer.cpp
│ │ ├── cudnn_tanh_layer.cu
│ │ ├── data_layer.cpp
│ │ ├── deconv_layer.cpp
│ │ ├── deconv_layer.cu
│ │ ├── dropout_layer.cpp
│ │ ├── dropout_layer.cu
│ │ ├── dummy_data_layer.cpp
│ │ ├── eltwise_layer.cpp
│ │ ├── eltwise_layer.cu
│ │ ├── euclidean_loss_layer.cpp
│ │ ├── euclidean_loss_layer.cu
│ │ ├── exp_layer.cpp
│ │ ├── exp_layer.cu
│ │ ├── filter_layer.cpp
│ │ ├── filter_layer.cu
│ │ ├── flatten_layer.cpp
│ │ ├── hdf5_data_layer.cpp
│ │ ├── hdf5_data_layer.cu
│ │ ├── hdf5_output_layer.cpp
│ │ ├── hdf5_output_layer.cu
│ │ ├── hinge_loss_layer.cpp
│ │ ├── im2col_layer.cpp
│ │ ├── im2col_layer.cu
│ │ ├── image_data_layer.cpp
│ │ ├── infogain_loss_layer.cpp
│ │ ├── inner_product_layer.cpp
│ │ ├── inner_product_layer.cu
│ │ ├── log_layer.cpp
│ │ ├── log_layer.cu
│ │ ├── loss_layer.cpp
│ │ ├── lrn_layer.cpp
│ │ ├── lrn_layer.cu
│ │ ├── memory_data_layer.cpp
│ │ ├── multinomial_logistic_loss_layer.cpp
│ │ ├── mvn_layer.cpp
│ │ ├── mvn_layer.cu
│ │ ├── neuron_layer.cpp
│ │ ├── pooling_layer.cpp
│ │ ├── pooling_layer.cu
│ │ ├── power_layer.cpp
│ │ ├── power_layer.cu
│ │ ├── prelu_layer.cpp
│ │ ├── prelu_layer.cu
│ │ ├── reduction_layer.cpp
│ │ ├── reduction_layer.cu
│ │ ├── relu_layer.cpp
│ │ ├── relu_layer.cu
│ │ ├── reshape_layer.cpp
│ │ ├── sigmoid_cross_entropy_loss_layer.cpp
│ │ ├── sigmoid_cross_entropy_loss_layer.cu
│ │ ├── sigmoid_layer.cpp
│ │ ├── sigmoid_layer.cu
│ │ ├── silence_layer.cpp
│ │ ├── silence_layer.cu
│ │ ├── slice_layer.cpp
│ │ ├── slice_layer.cu
│ │ ├── softmax_layer.cpp
│ │ ├── softmax_layer.cu
│ │ ├── softmax_loss_layer.cpp
│ │ ├── softmax_loss_layer.cu
│ │ ├── split_layer.cpp
│ │ ├── split_layer.cu
│ │ ├── spp_layer.cpp
│ │ ├── tanh_layer.cpp
│ │ ├── tanh_layer.cu
│ │ ├── threshold_layer.cpp
│ │ ├── threshold_layer.cu
│ │ └── window_data_layer.cpp
│ ├── net.cpp
│ ├── proto
│ │ └── caffe.proto
│ ├── solver.cpp
│ ├── syncedmem.cpp
│ ├── test
│ │ ├── CMakeLists.txt
│ │ ├── test_accuracy_layer.cpp
│ │ ├── test_argmax_layer.cpp
│ │ ├── test_benchmark.cpp
│ │ ├── test_blob.cpp
│ │ ├── test_caffe_main.cpp
│ │ ├── test_common.cpp
│ │ ├── test_concat_layer.cpp
│ │ ├── test_contrastive_loss_layer.cpp
│ │ ├── test_convolution_layer.cpp
│ │ ├── test_data
│ │ │ ├── generate_sample_data.py
│ │ │ ├── sample_data.h5
│ │ │ ├── sample_data_2_gzip.h5
│ │ │ └── sample_data_list.txt
│ │ ├── test_data_layer.cpp
│ │ ├── test_data_transformer.cpp
│ │ ├── test_db.cpp
│ │ ├── test_deconvolution_layer.cpp
│ │ ├── test_dummy_data_layer.cpp
│ │ ├── test_eltwise_layer.cpp
│ │ ├── test_euclidean_loss_layer.cpp
│ │ ├── test_filler.cpp
│ │ ├── test_filter_layer.cpp
│ │ ├── test_flatten_layer.cpp
│ │ ├── test_gradient_based_solver.cpp
│ │ ├── test_hdf5_output_layer.cpp
│ │ ├── test_hdf5data_layer.cpp
│ │ ├── test_hinge_loss_layer.cpp
│ │ ├── test_im2col_kernel.cu
│ │ ├── test_im2col_layer.cpp
│ │ ├── test_image_data_layer.cpp
│ │ ├── test_infogain_loss_layer.cpp
│ │ ├── test_inner_product_layer.cpp
│ │ ├── test_internal_thread.cpp
│ │ ├── test_io.cpp
│ │ ├── test_layer_factory.cpp
│ │ ├── test_lrn_layer.cpp
│ │ ├── test_math_functions.cpp
│ │ ├── test_maxpool_dropout_layers.cpp
│ │ ├── test_memory_data_layer.cpp
│ │ ├── test_multinomial_logistic_loss_layer.cpp
│ │ ├── test_mvn_layer.cpp
│ │ ├── test_net.cpp
│ │ ├── test_neuron_layer.cpp
│ │ ├── test_platform.cpp
│ │ ├── test_pooling_layer.cpp
│ │ ├── test_power_layer.cpp
│ │ ├── test_protobuf.cpp
│ │ ├── test_random_number_generator.cpp
│ │ ├── test_reduction_layer.cpp
│ │ ├── test_reshape_layer.cpp
│ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp
│ │ ├── test_slice_layer.cpp
│ │ ├── test_softmax_layer.cpp
│ │ ├── test_softmax_with_loss_layer.cpp
│ │ ├── test_solver.cpp
│ │ ├── test_split_layer.cpp
│ │ ├── test_spp_layer.cpp
│ │ ├── test_stochastic_pooling.cpp
│ │ ├── test_syncedmem.cpp
│ │ ├── test_tanh_layer.cpp
│ │ ├── test_threshold_layer.cpp
│ │ ├── test_upgrade_proto.cpp
│ │ └── test_util_blas.cpp
│ └── util
│ │ ├── benchmark.cpp
│ │ ├── cudnn.cpp
│ │ ├── db.cpp
│ │ ├── db_leveldb.cpp
│ │ ├── db_lmdb.cpp
│ │ ├── im2col.cpp
│ │ ├── im2col.cu
│ │ ├── insert_splits.cpp
│ │ ├── io.cpp
│ │ ├── math_functions.cpp
│ │ ├── math_functions.cu
│ │ └── upgrade_proto.cpp
└── gtest
│ ├── CMakeLists.txt
│ ├── gtest-all.cpp
│ ├── gtest.h
│ └── gtest_main.cc
└── tools
├── CMakeLists.txt
├── caffe.cpp
├── compute_image_mean.cpp
├── convert_imageset.cpp
├── device_query.cpp
├── extra
├── extract_seconds.py
├── launch_resize_and_crop_images.sh
├── parse_log.py
├── parse_log.sh
├── plot_log.gnuplot.example
├── plot_training_log.py.example
└── resize_and_crop_images.py
├── extract_features.cpp
├── finetune_net.cpp
├── net_speed_benchmark.cpp
├── test_net.cpp
├── train_net.cpp
├── upgrade_net_proto_binary.cpp
└── upgrade_net_proto_text.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | ## General
2 |
3 | # Compiled Object files
4 | *.slo
5 | *.lo
6 | *.o
7 | *.cuo
8 |
9 | # Compiled Dynamic libraries
10 | *.so
11 | *.dylib
12 |
13 | # Compiled Static libraries
14 | *.lai
15 | *.la
16 | *.a
17 |
18 | # Compiled protocol buffers
19 | *.pb.h
20 | *.pb.cc
21 | *_pb2.py
22 |
23 | # Compiled python
24 | *.pyc
25 |
26 | # Compiled MATLAB
27 | *.mex*
28 |
29 | # IPython notebook checkpoints
30 | .ipynb_checkpoints
31 |
32 | # Editor temporaries
33 | *.swp
34 | *~
35 |
36 | # Sublime Text settings
37 | *.sublime-workspace
38 | *.sublime-project
39 |
40 | # Eclipse Project settings
41 | *.*project
42 | .settings
43 |
44 | # QtCreator files
45 | *.user
46 |
47 | # PyCharm files
48 | .idea
49 |
50 | # OSX dir files
51 | .DS_Store
52 |
53 | ## Caffe
54 |
55 | # User's build configuration
56 | Makefile.config
57 |
58 | # Data and models are either
59 | # 1. reference, and not casually committed
60 | # 2. custom, and live on their own unless they're deliberated contributed
61 | data/*
62 | models/*
63 | *.caffemodel
64 | *.solverstate
65 | *.binaryproto
66 | *leveldb
67 | *lmdb
68 |
69 | # build, distribute, and bins (+ python proto bindings)
70 | build
71 | .build_debug/*
72 | .build_release/*
73 | distribute/*
74 | *.testbin
75 | *.bin
76 | python/caffe/proto/
77 | cmake_build
78 | .cmake_build
79 |
80 | # Generated documentation
81 | docs/_site
82 | docs/gathered
83 | _site
84 | doxygen
85 | docs/dev
86 |
87 | # LevelDB files
88 | *.sst
89 | *.ldb
90 | LOCK
91 | LOG*
92 | CURRENT
93 | MANIFEST-*
94 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # Use a build matrix to do two builds in parallel:
2 | # one using CMake, and one using make.
3 | env:
4 | matrix:
5 | - WITH_CUDA=false WITH_CMAKE=false
6 | - WITH_CUDA=false WITH_CMAKE=true
7 | - WITH_CUDA=true WITH_CMAKE=false
8 | - WITH_CUDA=true WITH_CMAKE=true
9 | - WITH_CUDA=false WITH_CMAKE=true PYTHON_VERSION=3
10 |
11 | language: cpp
12 |
13 | # Cache Ubuntu apt packages.
14 | cache:
15 | apt: true
16 | directories:
17 | - /home/travis/miniconda
18 | - /home/travis/miniconda2
19 | - /home/travis/miniconda3
20 |
21 | compiler: gcc
22 |
23 | before_install:
24 | - export NUM_THREADS=4
25 | - export SCRIPTS=./scripts/travis
26 | - export CONDA_DIR="/home/travis/miniconda$PYTHON_VERSION"
27 |
28 | install:
29 | - sudo -E $SCRIPTS/travis_install.sh
30 |
31 | before_script:
32 | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64:$CONDA_DIR/lib
33 | - export PATH=$CONDA_DIR/bin:$PATH
34 | - if ! $WITH_CMAKE; then $SCRIPTS/travis_setup_makefile_config.sh; fi
35 |
36 | script: $SCRIPTS/travis_build_and_test.sh
37 |
38 | notifications:
39 | # Emails are sent to the committer's git-configured email address by default,
40 | # but only if they have access to the repository. To enable Travis on your
41 | # public fork of Caffe, just go to travis-ci.org and flip the switch on for
42 | # your Caffe fork. To configure your git email address, use:
43 | # git config --global user.email me@example.com
44 | email:
45 | on_success: always
46 | on_failure: always
47 |
48 | # IRC notifications disabled by default.
49 | # Uncomment next 5 lines to send notifications to chat.freenode.net#caffe
50 | # irc:
51 | # channels:
52 | # - "chat.freenode.net#caffe"
53 | # template:
54 | # - "%{repository}/%{branch} (%{commit} - %{author}): %{message}"
55 |
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 |
3 | Caffe is developed by a core set of BVLC members and the open-source community.
4 |
5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)!
6 |
7 | **For the detailed history of contributions** of a given file, try
8 |
9 | git blame file
10 |
11 | to see line-by-line credits and
12 |
13 | git log --follow file
14 |
15 | to see the change log even across renames and rewrites.
16 |
17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details.
18 |
19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE.
20 |
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | See http://caffe.berkeleyvision.org/installation.html for the latest
4 | installation instructions.
5 |
6 | Check the issue tracker in case you need help:
7 | https://github.com/BVLC/caffe/issues
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Caffe-Data-Augmentation
2 |
3 | The original repository for Caffe, developed by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu)) and community contributors, is at ([BVLC\caffe](https://github.com/BVLC/caffe/tree/rc2))
4 |
5 | This project adds a data augmentation feature to caffe, augmenting the data in 9 several ways.
6 |
7 | The ways in which the data is augmentated is explained here:
8 |
9 | - Image Translation - a random shift in a x and y axis pixels of the entire image. The shift has uniform probability between -7 and 7.
10 | - Image Rescailing - shrinking or enalrgin the image (before cropping) by a random unifrom factor between 0.8 and 1.2.
11 | - Horizontal Flipping - flipping the image in the horizontal axis.
12 | - Vertical Flipping - flipping the image in the vertical axis.
13 | - Elastic Deformation with Random Interpolation - dislocate pixels and use OpenCV interpolations method randomly.
14 | - Color Noising - adding a small independent noise to each color channel of the image.
15 | - Brightness Noising - adding a small noise to the brightness of each pixel.
16 | - Small Blurring - convolving the image with small random-sized blurring kernel.
17 | - Single Random Transformation - choosing a transformation at random.
18 | - Multiple Random Transformations - chooses each transofrmation with probability 1/7, such that the mean is one transformation for every image.
19 |
20 | The desired transformation(s) is chosen by parameter transform_type within the prototxt file for the data layer. The value of the parameter corresponds to the transform schemes described above.
21 | For example, transform_type=4 uses vertical flipping as it's transformation.
22 |
23 |
24 | This project was developed by Shani Rehana, Baruch Epstein and Shahar Katz.
25 |
26 | The latest version for this project is rc2.
27 |
--------------------------------------------------------------------------------
/caffe.cloc:
--------------------------------------------------------------------------------
1 | Bourne Shell
2 | filter remove_matches ^\s*#
3 | filter remove_inline #.*$
4 | extension sh
5 | script_exe sh
6 | C
7 | filter remove_matches ^\s*//
8 | filter call_regexp_common C
9 | filter remove_inline //.*$
10 | extension c
11 | extension ec
12 | extension pgc
13 | C++
14 | filter remove_matches ^\s*//
15 | filter remove_inline //.*$
16 | filter call_regexp_common C
17 | extension C
18 | extension cc
19 | extension cpp
20 | extension cxx
21 | extension pcc
22 | C/C++ Header
23 | filter remove_matches ^\s*//
24 | filter call_regexp_common C
25 | filter remove_inline //.*$
26 | extension H
27 | extension h
28 | extension hh
29 | extension hpp
30 | CUDA
31 | filter remove_matches ^\s*//
32 | filter remove_inline //.*$
33 | filter call_regexp_common C
34 | extension cu
35 | Python
36 | filter remove_matches ^\s*#
37 | filter docstring_to_C
38 | filter call_regexp_common C
39 | filter remove_inline #.*$
40 | extension py
41 | make
42 | filter remove_matches ^\s*#
43 | filter remove_inline #.*$
44 | extension Gnumakefile
45 | extension Makefile
46 | extension am
47 | extension gnumakefile
48 | extension makefile
49 | filename Gnumakefile
50 | filename Makefile
51 | filename gnumakefile
52 | filename makefile
53 | script_exe make
54 |
--------------------------------------------------------------------------------
/cmake/External/glog.cmake:
--------------------------------------------------------------------------------
1 | # glog depends on gflags
2 | include("cmake/External/gflags.cmake")
3 |
4 | if (NOT __GLOG_INCLUDED)
5 | set(__GLOG_INCLUDED TRUE)
6 |
7 | # try the system-wide glog first
8 | find_package(Glog)
9 | if (GLOG_FOUND)
10 | set(GLOG_EXTERNAL FALSE)
11 | else()
12 | # fetch and build glog from github
13 |
14 | # build directory
15 | set(glog_PREFIX ${CMAKE_BINARY_DIR}/external/glog-prefix)
16 | # install directory
17 | set(glog_INSTALL ${CMAKE_BINARY_DIR}/external/glog-install)
18 |
19 | # we build glog statically, but want to link it into the caffe shared library
20 | # this requires position-independent code
21 | if (UNIX)
22 | set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC")
23 | endif()
24 |
25 | set(GLOG_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
26 | set(GLOG_C_FLAGS ${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS})
27 |
28 | # depend on gflags if we're also building it
29 | if (GFLAGS_EXTERNAL)
30 | set(GLOG_DEPENDS gflags)
31 | endif()
32 |
33 | ExternalProject_Add(glog
34 | DEPENDS ${GLOG_DEPENDS}
35 | PREFIX ${glog_PREFIX}
36 | GIT_REPOSITORY "https://github.com/google/glog"
37 | GIT_TAG "v0.3.4"
38 | UPDATE_COMMAND ""
39 | INSTALL_DIR ${gflags_INSTALL}
40 | CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/..
41 | LOG_DOWNLOAD 1
42 | LOG_CONFIGURE 1
43 | LOG_INSTALL 1
44 | )
45 |
46 | set(GLOG_FOUND TRUE)
47 | set(GLOG_INCLUDE_DIRS ${glog_INSTALL}/include)
48 | set(GLOG_LIBRARIES ${GFLAGS_LIBRARIES} ${glog_INSTALL}/lib/libglog.a)
49 | set(GLOG_LIBRARY_DIRS ${glog_INSTALL}/lib)
50 | set(GLOG_EXTERNAL TRUE)
51 |
52 | list(APPEND external_project_dependencies glog)
53 | endif()
54 |
55 | endif()
56 |
57 |
--------------------------------------------------------------------------------
/cmake/Misc.cmake:
--------------------------------------------------------------------------------
1 | # ---[ Configuration types
2 | set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE)
3 | mark_as_advanced(CMAKE_CONFIGURATION_TYPES)
4 |
5 | if(DEFINED CMAKE_BUILD_TYPE)
6 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES})
7 | endif()
8 |
9 | # --[ If user doesn't specify build type then assume release
10 | if("${CMAKE_BUILD_TYPE}" STREQUAL "")
11 | set(CMAKE_BUILD_TYPE Release)
12 | endif()
13 |
14 | if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
15 | set(CMAKE_COMPILER_IS_CLANGXX TRUE)
16 | endif()
17 |
18 | # ---[ Solution folders
19 | caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) )
20 |
21 | if(USE_PROJECT_FOLDERS)
22 | set_property(GLOBAL PROPERTY USE_FOLDERS ON)
23 | set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets")
24 | endif()
25 |
26 | # ---[ Install options
27 | if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
28 | set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE)
29 | endif()
30 |
31 | # ---[ RPATH settings
32 | set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath")
33 | set(CMAKE_MACOSX_RPATH TRUE)
34 |
35 | list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir)
36 | if(${__is_systtem_dir} STREQUAL -1)
37 | set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib)
38 | endif()
39 |
40 | # ---[ Funny target
41 | if(UNIX OR APPLE)
42 | add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build"
43 | COMMENT "Adding symlink: /build -> ${PROJECT_BINARY_DIR}" )
44 | endif()
45 |
46 | # ---[ Set debug postfix
47 | set(Caffe_DEBUG_POSTFIX "-d")
48 |
49 | set(CAffe_POSTFIX "")
50 | if(CMAKE_BUILD_TYPE MATCHES "Debug")
51 | set(CAffe_POSTFIX ${Caffe_DEBUG_POSTFIX})
52 | endif()
53 |
--------------------------------------------------------------------------------
/cmake/Modules/FindAtlas.cmake:
--------------------------------------------------------------------------------
1 | # Find the Atlas (and Lapack) libraries
2 | #
3 | # The following variables are optionally searched for defaults
4 | # Atlas_ROOT_DIR: Base directory where all Atlas components are found
5 | #
6 | # The following are set after configuration is done:
7 | # Atlas_FOUND
8 | # Atlas_INCLUDE_DIRS
9 | # Atlas_LIBRARIES
10 | # Atlas_LIBRARYRARY_DIRS
11 |
12 | set(Atlas_INCLUDE_SEARCH_PATHS
13 | /usr/include/atlas
14 | /usr/include/atlas-base
15 | $ENV{Atlas_ROOT_DIR}
16 | $ENV{Atlas_ROOT_DIR}/include
17 | )
18 |
19 | set(Atlas_LIB_SEARCH_PATHS
20 | /usr/lib/atlas
21 | /usr/lib/atlas-base
22 | $ENV{Atlas_ROOT_DIR}
23 | $ENV{Atlas_ROOT_DIR}/lib
24 | )
25 |
26 | find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
27 | find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS})
28 |
29 | find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS})
30 | find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
31 | find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS})
32 |
33 | set(LOOKED_FOR
34 | Atlas_CBLAS_INCLUDE_DIR
35 | Atlas_CLAPACK_INCLUDE_DIR
36 |
37 | Atlas_CBLAS_LIBRARY
38 | Atlas_BLAS_LIBRARY
39 | Atlas_LAPACK_LIBRARY
40 | )
41 |
42 | include(FindPackageHandleStandardArgs)
43 | find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR})
44 |
45 | if(ATLAS_FOUND)
46 | set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR})
47 | set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY})
48 | mark_as_advanced(${LOOKED_FOR})
49 |
50 | message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})")
51 | endif(ATLAS_FOUND)
52 |
53 |
--------------------------------------------------------------------------------
/cmake/Modules/FindGFlags.cmake:
--------------------------------------------------------------------------------
1 | # - Try to find GFLAGS
2 | #
3 | # The following variables are optionally searched for defaults
4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found
5 | #
6 | # The following are set after configuration is done:
7 | # GFLAGS_FOUND
8 | # GFLAGS_INCLUDE_DIRS
9 | # GFLAGS_LIBRARIES
10 | # GFLAGS_LIBRARYRARY_DIRS
11 |
12 | include(FindPackageHandleStandardArgs)
13 |
14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags")
15 |
16 | # We are testing only a couple of files in the include directories
17 | if(WIN32)
18 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
19 | PATHS ${GFLAGS_ROOT_DIR}/src/windows)
20 | else()
21 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
22 | PATHS ${GFLAGS_ROOT_DIR})
23 | endif()
24 |
25 | if(MSVC)
26 | find_library(GFLAGS_LIBRARY_RELEASE
27 | NAMES libgflags
28 | PATHS ${GFLAGS_ROOT_DIR}
29 | PATH_SUFFIXES Release)
30 |
31 | find_library(GFLAGS_LIBRARY_DEBUG
32 | NAMES libgflags-debug
33 | PATHS ${GFLAGS_ROOT_DIR}
34 | PATH_SUFFIXES Debug)
35 |
36 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG})
37 | else()
38 | find_library(GFLAGS_LIBRARY gflags)
39 | endif()
40 |
41 | find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY)
42 |
43 |
44 | if(GFLAGS_FOUND)
45 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
46 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY})
47 | message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})")
48 | mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE
49 | GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR)
50 | endif()
51 |
--------------------------------------------------------------------------------
/cmake/Modules/FindGlog.cmake:
--------------------------------------------------------------------------------
1 | # - Try to find Glog
2 | #
3 | # The following variables are optionally searched for defaults
4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found
5 | #
6 | # The following are set after configuration is done:
7 | # GLOG_FOUND
8 | # GLOG_INCLUDE_DIRS
9 | # GLOG_LIBRARIES
10 | # GLOG_LIBRARYRARY_DIRS
11 |
12 | include(FindPackageHandleStandardArgs)
13 |
14 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog")
15 |
16 | if(WIN32)
17 | find_path(GLOG_INCLUDE_DIR glog/logging.h
18 | PATHS ${GLOG_ROOT_DIR}/src/windows)
19 | else()
20 | find_path(GLOG_INCLUDE_DIR glog/logging.h
21 | PATHS ${GLOG_ROOT_DIR})
22 | endif()
23 |
24 | if(MSVC)
25 | find_library(GLOG_LIBRARY_RELEASE libglog_static
26 | PATHS ${GLOG_ROOT_DIR}
27 | PATH_SUFFIXES Release)
28 |
29 | find_library(GLOG_LIBRARY_DEBUG libglog_static
30 | PATHS ${GLOG_ROOT_DIR}
31 | PATH_SUFFIXES Debug)
32 |
33 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG})
34 | else()
35 | find_library(GLOG_LIBRARY glog
36 | PATHS ${GLOG_ROOT_DIR}
37 | PATH_SUFFIXES lib lib64)
38 | endif()
39 |
40 | find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY)
41 |
42 | if(GLOG_FOUND)
43 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR})
44 | set(GLOG_LIBRARIES ${GLOG_LIBRARY})
45 | message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})")
46 | mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG
47 | GLOG_LIBRARY GLOG_INCLUDE_DIR)
48 | endif()
49 |
--------------------------------------------------------------------------------
/cmake/Modules/FindLMDB.cmake:
--------------------------------------------------------------------------------
1 | # Try to find the LMBD libraries and headers
2 | # LMDB_FOUND - system has LMDB lib
3 | # LMDB_INCLUDE_DIR - the LMDB include directory
4 | # LMDB_LIBRARIES - Libraries needed to use LMDB
5 |
6 | # FindCWD based on FindGMP by:
7 | # Copyright (c) 2006, Laurent Montel,
8 | #
9 | # Redistribution and use is allowed according to the terms of the BSD license.
10 |
11 | # Adapted from FindCWD by:
12 | # Copyright 2013 Conrad Steenberg
13 | # Aug 31, 2013
14 |
15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include")
16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" )
17 |
18 | include(FindPackageHandleStandardArgs)
19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES)
20 |
21 | if(LMDB_FOUND)
22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})")
23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES)
24 |
25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h
26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH)
27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}")
28 | endif()
29 |
--------------------------------------------------------------------------------
/cmake/Modules/FindLevelDB.cmake:
--------------------------------------------------------------------------------
1 | # - Find LevelDB
2 | #
3 | # LevelDB_INCLUDES - List of LevelDB includes
4 | # LevelDB_LIBRARIES - List of libraries when using LevelDB.
5 | # LevelDB_FOUND - True if LevelDB found.
6 |
7 | # Look for the header file.
8 | find_path(LevelDB_INCLUDE NAMES leveldb/db.h
9 | PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include
10 | DOC "Path in which the file leveldb/db.h is located." )
11 |
12 | # Look for the library.
13 | find_library(LevelDB_LIBRARY NAMES leveldb
14 | PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib
15 | DOC "Path to leveldb library." )
16 |
17 | include(FindPackageHandleStandardArgs)
18 | find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY)
19 |
20 | if(LEVELDB_FOUND)
21 | message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})")
22 | set(LevelDB_INCLUDES ${LevelDB_INCLUDE})
23 | set(LevelDB_LIBRARIES ${LevelDB_LIBRARY})
24 | mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY)
25 |
26 | if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h")
27 | file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines
28 | REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;")
29 |
30 | foreach(__line ${__version_lines})
31 | if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);")
32 | set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1})
33 | elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);")
34 | set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1})
35 | endif()
36 | endforeach()
37 |
38 | if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR)
39 | set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}")
40 | endif()
41 |
42 | caffe_clear_vars(__line __version_lines)
43 | endif()
44 | endif()
45 |
--------------------------------------------------------------------------------
/cmake/Modules/FindMatlabMex.cmake:
--------------------------------------------------------------------------------
1 | # This module looks for MatlabMex compiler
2 | # Defines variables:
3 | # Matlab_DIR - Matlab root dir
4 | # Matlab_mex - path to mex compiler
5 | # Matlab_mexext - path to mexext
6 |
7 | if(MSVC)
8 | foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7")
9 | get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE)
10 | if(__matlab_root)
11 | break()
12 | endif()
13 | endforeach()
14 | endif()
15 |
16 | if(APPLE)
17 | foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a")
18 | if(EXISTS /Applications/MATLAB_${__ver}.app)
19 | set(__matlab_root /Applications/MATLAB_${__ver}.app)
20 | break()
21 | endif()
22 | endforeach()
23 | endif()
24 |
25 | if(UNIX)
26 | execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE
27 | OUTPUT_VARIABLE __out RESULT_VARIABLE __res)
28 |
29 | if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing
30 | execute_process(COMMAND which matlab COMMAND xargs readlink
31 | COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n
32 | OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE)
33 | endif()
34 | endif()
35 |
36 |
37 | find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root}
38 | DOC "Matlab directory" NO_DEFAULT_PATH)
39 |
40 | find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
41 | find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH)
42 |
43 | include(FindPackageHandleStandardArgs)
44 | find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext)
45 |
46 | if(MATLABMEX_FOUND)
47 | mark_as_advanced(Matlab_mex Matlab_mexext)
48 | endif()
49 |
--------------------------------------------------------------------------------
/cmake/Modules/FindOpenBLAS.cmake:
--------------------------------------------------------------------------------
1 |
2 |
3 | SET(Open_BLAS_INCLUDE_SEARCH_PATHS
4 | /usr/include
5 | /usr/include/openblas-base
6 | /usr/local/include
7 | /usr/local/include/openblas-base
8 | /opt/OpenBLAS/include
9 | $ENV{OpenBLAS_HOME}
10 | $ENV{OpenBLAS_HOME}/include
11 | )
12 |
13 | SET(Open_BLAS_LIB_SEARCH_PATHS
14 | /lib/
15 | /lib/openblas-base
16 | /lib64/
17 | /usr/lib
18 | /usr/lib/openblas-base
19 | /usr/lib64
20 | /usr/local/lib
21 | /usr/local/lib64
22 | /opt/OpenBLAS/lib
23 | $ENV{OpenBLAS}cd
24 | $ENV{OpenBLAS}/lib
25 | $ENV{OpenBLAS_HOME}
26 | $ENV{OpenBLAS_HOME}/lib
27 | )
28 |
29 | FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS})
30 | FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS})
31 |
32 | SET(OpenBLAS_FOUND ON)
33 |
34 | # Check include files
35 | IF(NOT OpenBLAS_INCLUDE_DIR)
36 | SET(OpenBLAS_FOUND OFF)
37 | MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off")
38 | ENDIF()
39 |
40 | # Check libraries
41 | IF(NOT OpenBLAS_LIB)
42 | SET(OpenBLAS_FOUND OFF)
43 | MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off")
44 | ENDIF()
45 |
46 | IF (OpenBLAS_FOUND)
47 | IF (NOT OpenBLAS_FIND_QUIETLY)
48 | MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}")
49 | MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}")
50 | ENDIF (NOT OpenBLAS_FIND_QUIETLY)
51 | ELSE (OpenBLAS_FOUND)
52 | IF (OpenBLAS_FIND_REQUIRED)
53 | MESSAGE(FATAL_ERROR "Could not find OpenBLAS")
54 | ENDIF (OpenBLAS_FIND_REQUIRED)
55 | ENDIF (OpenBLAS_FOUND)
56 |
57 | MARK_AS_ADVANCED(
58 | OpenBLAS_INCLUDE_DIR
59 | OpenBLAS_LIB
60 | OpenBLAS
61 | )
62 |
63 |
--------------------------------------------------------------------------------
/cmake/Modules/FindSnappy.cmake:
--------------------------------------------------------------------------------
1 | # Find the Snappy libraries
2 | #
3 | # The following variables are optionally searched for defaults
4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found
5 | #
6 | # The following are set after configuration is done:
7 | # SNAPPY_FOUND
8 | # Snappy_INCLUDE_DIR
9 | # Snappy_LIBRARIES
10 |
11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h
12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include)
13 |
14 | find_library(Snappy_LIBRARIES NAMES snappy
15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib)
16 |
17 | include(FindPackageHandleStandardArgs)
18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES)
19 |
20 | if(SNAPPY_FOUND)
21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})")
22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES)
23 |
24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h
25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL)
26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}")
27 | endif()
28 |
29 |
--------------------------------------------------------------------------------
/cmake/Modules/FindvecLib.cmake:
--------------------------------------------------------------------------------
1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework
2 | #
3 | # The following are set after configuration is done:
4 | # VECLIB_FOUND
5 | # vecLib_INCLUDE_DIR
6 | # vecLib_LINKER_LIBS
7 |
8 |
9 | if(NOT APPLE)
10 | return()
11 | endif()
12 |
13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers")
14 |
15 | find_path(vecLib_INCLUDE_DIR vecLib.h
16 | DOC "vecLib include directory"
17 | PATHS /System/Library/${__veclib_include_suffix}
18 | /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix}
19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/)
20 |
21 | include(FindPackageHandleStandardArgs)
22 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR)
23 |
24 | if(VECLIB_FOUND)
25 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*")
26 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib")
27 | message(STATUS "Found standalone vecLib.framework")
28 | else()
29 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate")
30 | message(STATUS "Found vecLib as part of Accelerate.framework")
31 | endif()
32 |
33 | mark_as_advanced(vecLib_INCLUDE_DIR)
34 | endif()
35 |
--------------------------------------------------------------------------------
/cmake/Templates/CaffeConfig.cmake.in:
--------------------------------------------------------------------------------
1 | # Config file for the Caffe package.
2 | #
3 | # Note:
4 | # Caffe and this config file depends on opencv,
5 | # so put `find_package(OpenCV)` before searching Caffe
6 | # via `find_package(Caffe)`. All other lib/includes
7 | # dependencies are hard coded in the file
8 | #
9 | # After successful configuration the following variables
10 | # will be defined:
11 | #
12 | # Caffe_INCLUDE_DIRS - Caffe include directories
13 | # Caffe_LIBRARIES - libraries to link against
14 | # Caffe_DEFINITIONS - a list of definitions to pass to compiler
15 | #
16 | # Caffe_HAVE_CUDA - signals about CUDA support
17 | # Caffe_HAVE_CUDNN - signals about cuDNN support
18 |
19 |
20 | # OpenCV dependency
21 |
22 | if(NOT OpenCV_FOUND)
23 | set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@")
24 | if(Caffe_OpenCV_CONFIG_PATH)
25 | get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE)
26 |
27 | if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core)
28 | message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}")
29 | include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake)
30 | endif()
31 |
32 | else()
33 | find_package(OpenCV REQUIRED)
34 | endif()
35 | unset(Caffe_OpenCV_CONFIG_PATH)
36 | endif()
37 |
38 | # Compute paths
39 | get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
40 | set(Caffe_INCLUDE_DIRS "@Caffe_INCLUDE_DIRS@")
41 |
42 | @Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND@
43 |
44 | # Our library dependencies
45 | if(NOT TARGET caffe AND NOT caffe_BINARY_DIR)
46 | include("${Caffe_CMAKE_DIR}/CaffeTargets.cmake")
47 | endif()
48 |
49 | # List of IMPORTED libs created by CaffeTargets.cmake
50 | set(Caffe_LIBRARIES caffe)
51 |
52 | # Definitions
53 | set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@")
54 |
55 | # Cuda support variables
56 | set(Caffe_CPU_ONLY @CPU_ONLY@)
57 | set(Caffe_HAVE_CUDA @HAVE_CUDA@)
58 | set(Caffe_HAVE_CUDNN @HAVE_CUDNN@)
59 |
--------------------------------------------------------------------------------
/cmake/Templates/CaffeConfigVersion.cmake.in:
--------------------------------------------------------------------------------
1 | set(PACKAGE_VERSION "@Caffe_VERSION@")
2 |
3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible
4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
5 | set(PACKAGE_VERSION_COMPATIBLE FALSE)
6 | else()
7 | set(PACKAGE_VERSION_COMPATIBLE TRUE)
8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
9 | set(PACKAGE_VERSION_EXACT TRUE)
10 | endif()
11 | endif()
12 |
--------------------------------------------------------------------------------
/cmake/Templates/caffe_config.h.in:
--------------------------------------------------------------------------------
1 | /* Sources directory */
2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}"
3 |
4 | /* Binaries directory */
5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}"
6 |
7 | /* NVIDA Cuda */
8 | #cmakedefine HAVE_CUDA
9 |
10 | /* NVIDA cuDNN */
11 | #cmakedefine HAVE_CUDNN
12 | #cmakedefine USE_CUDNN
13 |
14 | /* NVIDA cuDNN */
15 | #cmakedefine CPU_ONLY
16 |
17 | /* Test device */
18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE}
19 |
20 | /* Temporary (TODO: remove) */
21 | #if 1
22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/"
23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/"
24 | #define CMAKE_EXT ".gen.cmake"
25 | #else
26 | #define CMAKE_SOURCE_DIR "src/"
27 | #define EXAMPLES_SOURCE_DIR "examples/"
28 | #define CMAKE_EXT ""
29 | #endif
30 |
31 | /* Matlab */
32 | #cmakedefine HAVE_MATLAB
33 |
--------------------------------------------------------------------------------
/cmake/lint.cmake:
--------------------------------------------------------------------------------
1 |
2 | set(CMAKE_SOURCE_DIR ..)
3 | set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py)
4 | set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc)
5 | set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc)
6 | set(LINT_DIRS include src/caffe examples tools python matlab)
7 |
8 | cmake_policy(SET CMP0009 NEW) # suppress cmake warning
9 |
10 | # find all files of interest
11 | foreach(ext ${SRC_FILE_EXTENSIONS})
12 | foreach(dir ${LINT_DIRS})
13 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext})
14 | set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES})
15 | endforeach()
16 | endforeach()
17 |
18 | # find all files that should be excluded
19 | foreach(ext ${EXCLUDE_FILE_EXTENSTIONS})
20 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext})
21 | set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES})
22 | endforeach()
23 |
24 | # exclude generated pb files
25 | list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES})
26 |
27 | execute_process(
28 | COMMAND ${LINT_COMMAND} ${LINT_SOURCES}
29 | ERROR_VARIABLE LINT_OUTPUT
30 | ERROR_STRIP_TRAILING_WHITESPACE
31 | )
32 |
33 | string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT})
34 |
35 | list(GET LINT_OUTPUT -1 LINT_RESULT)
36 | list(REMOVE_AT LINT_OUTPUT -1)
37 | string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT})
38 | list(GET LINT_RESULT -1 NUM_ERRORS)
39 | if(NUM_ERRORS GREATER 0)
40 | foreach(msg ${LINT_OUTPUT})
41 | string(FIND ${msg} "Done" result)
42 | if(result LESS 0)
43 | message(STATUS ${msg})
44 | endif()
45 | endforeach()
46 | message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!")
47 | else()
48 | message(STATUS "Lint did not find any errors!")
49 | endif()
50 |
51 |
--------------------------------------------------------------------------------
/data/cifar10/get_cifar10.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # This scripts downloads the CIFAR10 (binary version) data and unzips it.
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd $DIR
6 |
7 | echo "Downloading..."
8 |
9 | wget --no-check-certificate http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz
10 |
11 | echo "Unzipping..."
12 |
13 | tar -xf cifar-10-binary.tar.gz && rm -f cifar-10-binary.tar.gz
14 | mv cifar-10-batches-bin/* . && rm -rf cifar-10-batches-bin
15 |
16 | # Creation is split out because leveldb sometimes causes segfault
17 | # and needs to be re-created.
18 |
19 | echo "Done."
20 |
--------------------------------------------------------------------------------
/data/ilsvrc12/get_ilsvrc_aux.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | #
3 | # N.B. This does not download the ilsvrcC12 data set, as it is gargantuan.
4 | # This script downloads the imagenet example auxiliary files including:
5 | # - the ilsvrc12 image mean, binaryproto
6 | # - synset ids and words
7 | # - Python pickle-format data of ImageNet graph structure and relative infogain
8 | # - the training splits with labels
9 |
10 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
11 | cd $DIR
12 |
13 | echo "Downloading..."
14 |
15 | wget http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz
16 |
17 | echo "Unzipping..."
18 |
19 | tar -xf caffe_ilsvrc12.tar.gz && rm -f caffe_ilsvrc12.tar.gz
20 |
21 | echo "Done."
22 |
--------------------------------------------------------------------------------
/data/mnist/get_mnist.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # This scripts downloads the mnist data and unzips it.
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd $DIR
6 |
7 | echo "Downloading..."
8 |
9 | wget --no-check-certificate http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
10 | wget --no-check-certificate http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
11 | wget --no-check-certificate http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
12 | wget --no-check-certificate http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
13 |
14 | echo "Unzipping..."
15 |
16 | gunzip train-images-idx3-ubyte.gz
17 | gunzip train-labels-idx1-ubyte.gz
18 | gunzip t10k-images-idx3-ubyte.gz
19 | gunzip t10k-labels-idx1-ubyte.gz
20 |
21 | # Creation is split out because leveldb sometimes causes segfault
22 | # and needs to be re-created.
23 |
24 | echo "Done."
25 |
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | caffe.berkeleyvision.org
2 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Caffe Documentation
2 |
3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`.
4 |
5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `.
6 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | defaults:
2 | -
3 | scope:
4 | path: "" # an empty string here means all files in the project
5 | values:
6 | layout: "default"
7 |
8 |
--------------------------------------------------------------------------------
/docs/images/GitHub-Mark-64px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/images/GitHub-Mark-64px.png
--------------------------------------------------------------------------------
/docs/images/caffeine-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/images/caffeine-icon.png
--------------------------------------------------------------------------------
/docs/install_apt.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Installation: Ubuntu
3 | ---
4 |
5 | # Ubuntu Installation
6 |
7 | **General dependencies**
8 |
9 | sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
10 | sudo apt-get install --no-install-recommends libboost-all-dev
11 |
12 | **CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions.
13 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date.
14 | This can be skipped for CPU-only installation.
15 |
16 | **BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance.
17 |
18 | **Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface.
19 |
20 | **Remaining dependencies, 14.04**
21 |
22 | Everything is packaged in 14.04.
23 |
24 | sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev
25 |
26 | **Remaining dependencies, 12.04**
27 |
28 | These dependencies need manual installation in 12.04.
29 |
30 | # glog
31 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz
32 | tar zxvf glog-0.3.3.tar.gz
33 | cd glog-0.3.3
34 | ./configure
35 | make && make install
36 | # gflags
37 | wget https://github.com/schuhschuh/gflags/archive/master.zip
38 | unzip master.zip
39 | cd gflags-master
40 | mkdir build && cd build
41 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
42 | make && make install
43 | # lmdb
44 | git clone https://github.com/LMDB/lmdb
45 | cd mdb/libraries/liblmdb
46 | make && make install
47 |
48 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first.
49 |
50 | Continue with [compilation](installation.html#compilation).
51 |
--------------------------------------------------------------------------------
/docs/install_yum.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Installation: RHEL / Fedora / CentOS
3 | ---
4 |
5 | # RHEL / Fedora / CentOS Installation
6 |
7 | **General dependencies**
8 |
9 | sudo yum install protobuf-devel leveldb-devel snappy-devel opencv-devel boost-devel hdf5-devel
10 |
11 | **Remaining dependencies, recent OS**
12 |
13 | sudo yum install gflags-devel glog-devel lmdb-devel
14 |
15 | **Remaining dependencies, if not found**
16 |
17 | # glog
18 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz
19 | tar zxvf glog-0.3.3.tar.gz
20 | cd glog-0.3.3
21 | ./configure
22 | make && make install
23 | # gflags
24 | wget https://github.com/schuhschuh/gflags/archive/master.zip
25 | unzip master.zip
26 | cd gflags-master
27 | mkdir build && cd build
28 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
29 | make && make install
30 | # lmdb
31 | git clone git://gitorious.org/mdb/mdb.git
32 | cd mdb/libraries/liblmdb
33 | make && make install
34 |
35 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first.
36 |
37 | **CUDA**: Install via the NVIDIA package instead of `yum` to be certain of the library and driver versions.
38 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date.
39 | + CentOS/RHEL/Fedora:
40 |
41 | **BLAS**: install ATLAS by `sudo yum install atlas-devel` or install OpenBLAS or MKL for better CPU performance. For the Makefile build, uncomment and set `BLAS_LIB` accordingly as ATLAS is usually installed under `/usr/lib[64]/atlas`).
42 |
43 | **Python** (optional): if you use the default Python you will need to `sudo yum install` the `python-devel` package to have the Python headers for building the pycaffe wrapper.
44 |
45 | Continue with [compilation](installation.html#compilation).
46 |
--------------------------------------------------------------------------------
/docs/stylesheets/reset.css:
--------------------------------------------------------------------------------
1 | /* MeyerWeb Reset */
2 |
3 | html, body, div, span, applet, object, iframe,
4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre,
5 | a, abbr, acronym, address, big, cite, code,
6 | del, dfn, em, img, ins, kbd, q, s, samp,
7 | small, strike, strong, sub, sup, tt, var,
8 | b, u, i, center,
9 | dl, dt, dd, ol, ul, li,
10 | fieldset, form, label, legend,
11 | table, caption, tbody, tfoot, thead, tr, th, td,
12 | article, aside, canvas, details, embed,
13 | figure, figcaption, footer, header, hgroup,
14 | menu, nav, output, ruby, section, summary,
15 | time, mark, audio, video {
16 | margin: 0;
17 | padding: 0;
18 | border: 0;
19 | font: inherit;
20 | vertical-align: baseline;
21 | }
22 |
--------------------------------------------------------------------------------
/docs/tutorial/convolution.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Convolution
3 | ---
4 | # Caffeinated Convolution
5 |
6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication.
7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices.
8 |
9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo).
10 |
11 | As it turns out, this same reduction was independently explored in the context of conv. nets by
12 |
13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006.
14 |
--------------------------------------------------------------------------------
/docs/tutorial/fig/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/.gitignore
--------------------------------------------------------------------------------
/docs/tutorial/fig/backward.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/backward.jpg
--------------------------------------------------------------------------------
/docs/tutorial/fig/forward.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/forward.jpg
--------------------------------------------------------------------------------
/docs/tutorial/fig/forward_backward.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/forward_backward.png
--------------------------------------------------------------------------------
/docs/tutorial/fig/layer.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/layer.jpg
--------------------------------------------------------------------------------
/docs/tutorial/fig/logreg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/docs/tutorial/fig/logreg.jpg
--------------------------------------------------------------------------------
/examples/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | file(GLOB_RECURSE examples_srcs "${PROJECT_SOURCE_DIR}/examples/*.cpp")
2 |
3 | foreach(source_file ${examples_srcs})
4 | # get file name
5 | get_filename_component(name ${source_file} NAME_WE)
6 |
7 | # get folder name
8 | get_filename_component(path ${source_file} PATH)
9 | get_filename_component(folder ${path} NAME_WE)
10 |
11 | add_executable(${name} ${source_file})
12 | target_link_libraries(${name} ${Caffe_LINK})
13 | caffe_default_properties(${name})
14 |
15 | # set back RUNTIME_OUTPUT_DIRECTORY
16 | set_target_properties(${name} PROPERTIES
17 | RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/examples/${folder}")
18 |
19 | caffe_set_solution_folder(${name} examples)
20 |
21 | # install
22 | install(TARGETS ${name} DESTINATION bin)
23 |
24 | if(UNIX OR APPLE)
25 | # Funny command to make tutorials work
26 | # TODO: remove in future as soon as naming is standartaized everywhere
27 | set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${CAffe_POSTFIX})
28 | add_custom_command(TARGET ${name} POST_BUILD
29 | COMMAND ln -sf "${__outname}" "${__outname}.bin")
30 | endif()
31 | endforeach()
32 |
--------------------------------------------------------------------------------
/examples/cifar10/cifar10_full_solver.prototxt:
--------------------------------------------------------------------------------
1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
2 | # then another factor of 10 after 10 more epochs (5000 iters)
3 |
4 | # The train/test net protocol buffer definition
5 | net: "examples/cifar10/cifar10_full_train_test.prototxt"
6 | # test_iter specifies how many forward passes the test should carry out.
7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
8 | # covering the full 10,000 testing images.
9 | test_iter: 100
10 | # Carry out testing every 1000 training iterations.
11 | test_interval: 1000
12 | # The base learning rate, momentum and the weight decay of the network.
13 | base_lr: 0.001
14 | momentum: 0.9
15 | weight_decay: 0.004
16 | # The learning rate policy
17 | lr_policy: "fixed"
18 | # Display every 200 iterations
19 | display: 200
20 | # The maximum number of iterations
21 | max_iter: 60000
22 | # snapshot intermediate results
23 | snapshot: 10000
24 | snapshot_prefix: "examples/cifar10/cifar10_full"
25 | # solver mode: CPU or GPU
26 | solver_mode: GPU
27 |
--------------------------------------------------------------------------------
/examples/cifar10/cifar10_full_solver_lr1.prototxt:
--------------------------------------------------------------------------------
1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
2 | # then another factor of 10 after 10 more epochs (5000 iters)
3 |
4 | # The train/test net protocol buffer definition
5 | net: "examples/cifar10/cifar10_full_train_test.prototxt"
6 | # test_iter specifies how many forward passes the test should carry out.
7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
8 | # covering the full 10,000 testing images.
9 | test_iter: 100
10 | # Carry out testing every 1000 training iterations.
11 | test_interval: 1000
12 | # The base learning rate, momentum and the weight decay of the network.
13 | base_lr: 0.0001
14 | momentum: 0.9
15 | weight_decay: 0.004
16 | # The learning rate policy
17 | lr_policy: "fixed"
18 | # Display every 200 iterations
19 | display: 200
20 | # The maximum number of iterations
21 | max_iter: 65000
22 | # snapshot intermediate results
23 | snapshot: 5000
24 | snapshot_prefix: "examples/cifar10/cifar10_full"
25 | # solver mode: CPU or GPU
26 | solver_mode: GPU
27 |
--------------------------------------------------------------------------------
/examples/cifar10/cifar10_full_solver_lr2.prototxt:
--------------------------------------------------------------------------------
1 | # reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
2 | # then another factor of 10 after 10 more epochs (5000 iters)
3 |
4 | # The train/test net protocol buffer definition
5 | net: "examples/cifar10/cifar10_full_train_test.prototxt"
6 | # test_iter specifies how many forward passes the test should carry out.
7 | # In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
8 | # covering the full 10,000 testing images.
9 | test_iter: 100
10 | # Carry out testing every 1000 training iterations.
11 | test_interval: 1000
12 | # The base learning rate, momentum and the weight decay of the network.
13 | base_lr: 0.00001
14 | momentum: 0.9
15 | weight_decay: 0.004
16 | # The learning rate policy
17 | lr_policy: "fixed"
18 | # Display every 200 iterations
19 | display: 200
20 | # The maximum number of iterations
21 | max_iter: 70000
22 | # snapshot intermediate results
23 | snapshot: 5000
24 | snapshot_prefix: "examples/cifar10/cifar10_full"
25 | # solver mode: CPU or GPU
26 | solver_mode: GPU
27 |
--------------------------------------------------------------------------------
/examples/cifar10/cifar10_quick_solver.prototxt:
--------------------------------------------------------------------------------
1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10
2 |
3 | # The train/test net protocol buffer definition
4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt"
5 | # test_iter specifies how many forward passes the test should carry out.
6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
7 | # covering the full 10,000 testing images.
8 | test_iter: 100
9 | # Carry out testing every 500 training iterations.
10 | test_interval: 500
11 | # The base learning rate, momentum and the weight decay of the network.
12 | base_lr: 0.001
13 | momentum: 0.9
14 | weight_decay: 0.004
15 | # The learning rate policy
16 | lr_policy: "fixed"
17 | # Display every 100 iterations
18 | display: 100
19 | # The maximum number of iterations
20 | max_iter: 4000
21 | # snapshot intermediate results
22 | snapshot: 4000
23 | snapshot_prefix: "examples/cifar10/cifar10_quick"
24 | # solver mode: CPU or GPU
25 | solver_mode: GPU
26 |
--------------------------------------------------------------------------------
/examples/cifar10/cifar10_quick_solver_lr1.prototxt:
--------------------------------------------------------------------------------
1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10
2 |
3 | # The train/test net protocol buffer definition
4 | net: "examples/cifar10/cifar10_quick_train_test.prototxt"
5 | # test_iter specifies how many forward passes the test should carry out.
6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
7 | # covering the full 10,000 testing images.
8 | test_iter: 100
9 | # Carry out testing every 500 training iterations.
10 | test_interval: 500
11 | # The base learning rate, momentum and the weight decay of the network.
12 | base_lr: 0.0001
13 | momentum: 0.9
14 | weight_decay: 0.004
15 | # The learning rate policy
16 | lr_policy: "fixed"
17 | # Display every 100 iterations
18 | display: 100
19 | # The maximum number of iterations
20 | max_iter: 5000
21 | # snapshot intermediate results
22 | snapshot: 5000
23 | snapshot_prefix: "examples/cifar10/cifar10_quick"
24 | # solver mode: CPU or GPU
25 | solver_mode: GPU
26 |
--------------------------------------------------------------------------------
/examples/cifar10/create_cifar10.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # This script converts the cifar data into leveldb format.
3 |
4 | EXAMPLE=examples/cifar10
5 | DATA=data/cifar10
6 | DBTYPE=lmdb
7 |
8 | echo "Creating $DBTYPE..."
9 |
10 | rm -rf $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/cifar10_test_$DBTYPE
11 |
12 | ./build/examples/cifar10/convert_cifar_data.bin $DATA $EXAMPLE $DBTYPE
13 |
14 | echo "Computing image mean..."
15 |
16 | ./build/tools/compute_image_mean -backend=$DBTYPE \
17 | $EXAMPLE/cifar10_train_$DBTYPE $EXAMPLE/mean.binaryproto
18 |
19 | echo "Done."
20 |
--------------------------------------------------------------------------------
/examples/cifar10/train_full.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | TOOLS=./build/tools
4 |
5 | $TOOLS/caffe train \
6 | --solver=examples/cifar10/cifar10_full_solver.prototxt
7 |
8 | # reduce learning rate by factor of 10
9 | $TOOLS/caffe train \
10 | --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \
11 | --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate
12 |
13 | # reduce learning rate by factor of 10
14 | $TOOLS/caffe train \
15 | --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \
16 | --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate
17 |
--------------------------------------------------------------------------------
/examples/cifar10/train_quick.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | TOOLS=./build/tools
4 |
5 | $TOOLS/caffe train \
6 | --solver=examples/cifar10/cifar10_quick_solver.prototxt
7 |
8 | # reduce learning rate by factor of 10 after 8 epochs
9 | $TOOLS/caffe train \
10 | --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \
11 | --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate
12 |
--------------------------------------------------------------------------------
/examples/finetune_flickr_style/flickr_style.csv.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/examples/finetune_flickr_style/flickr_style.csv.gz
--------------------------------------------------------------------------------
/examples/finetune_flickr_style/style_names.txt:
--------------------------------------------------------------------------------
1 | Detailed
2 | Pastel
3 | Melancholy
4 | Noir
5 | HDR
6 | Vintage
7 | Long Exposure
8 | Horror
9 | Sunny
10 | Bright
11 | Hazy
12 | Bokeh
13 | Serene
14 | Texture
15 | Ethereal
16 | Macro
17 | Depth of Field
18 | Geometric Composition
19 | Minimal
20 | Romantic
21 |
--------------------------------------------------------------------------------
/examples/finetune_pascal_detection/pascal_finetune_solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt"
2 | test_iter: 100
3 | test_interval: 1000
4 | base_lr: 0.001
5 | lr_policy: "step"
6 | gamma: 0.1
7 | stepsize: 20000
8 | display: 20
9 | max_iter: 100000
10 | momentum: 0.9
11 | weight_decay: 0.0005
12 | snapshot: 10000
13 | snapshot_prefix: "examples/finetune_pascal_detection/pascal_det_finetune"
14 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/nonlinear_auto_test.prototxt:
--------------------------------------------------------------------------------
1 | layer {
2 | name: "data"
3 | type: "HDF5Data"
4 | top: "data"
5 | top: "label"
6 | hdf5_data_param {
7 | source: "examples/hdf5_classification/data/test.txt"
8 | batch_size: 10
9 | }
10 | }
11 | layer {
12 | name: "ip1"
13 | type: "InnerProduct"
14 | bottom: "data"
15 | top: "ip1"
16 | inner_product_param {
17 | num_output: 40
18 | weight_filler {
19 | type: "xavier"
20 | }
21 | }
22 | }
23 | layer {
24 | name: "relu1"
25 | type: "ReLU"
26 | bottom: "ip1"
27 | top: "ip1"
28 | }
29 | layer {
30 | name: "ip2"
31 | type: "InnerProduct"
32 | bottom: "ip1"
33 | top: "ip2"
34 | inner_product_param {
35 | num_output: 2
36 | weight_filler {
37 | type: "xavier"
38 | }
39 | }
40 | }
41 | layer {
42 | name: "accuracy"
43 | type: "Accuracy"
44 | bottom: "ip2"
45 | bottom: "label"
46 | top: "accuracy"
47 | }
48 | layer {
49 | name: "loss"
50 | type: "SoftmaxWithLoss"
51 | bottom: "ip2"
52 | bottom: "label"
53 | top: "loss"
54 | }
55 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/nonlinear_auto_train.prototxt:
--------------------------------------------------------------------------------
1 | layer {
2 | name: "data"
3 | type: "HDF5Data"
4 | top: "data"
5 | top: "label"
6 | hdf5_data_param {
7 | source: "examples/hdf5_classification/data/train.txt"
8 | batch_size: 10
9 | }
10 | }
11 | layer {
12 | name: "ip1"
13 | type: "InnerProduct"
14 | bottom: "data"
15 | top: "ip1"
16 | inner_product_param {
17 | num_output: 40
18 | weight_filler {
19 | type: "xavier"
20 | }
21 | }
22 | }
23 | layer {
24 | name: "relu1"
25 | type: "ReLU"
26 | bottom: "ip1"
27 | top: "ip1"
28 | }
29 | layer {
30 | name: "ip2"
31 | type: "InnerProduct"
32 | bottom: "ip1"
33 | top: "ip2"
34 | inner_product_param {
35 | num_output: 2
36 | weight_filler {
37 | type: "xavier"
38 | }
39 | }
40 | }
41 | layer {
42 | name: "accuracy"
43 | type: "Accuracy"
44 | bottom: "ip2"
45 | bottom: "label"
46 | top: "accuracy"
47 | }
48 | layer {
49 | name: "loss"
50 | type: "SoftmaxWithLoss"
51 | bottom: "ip2"
52 | bottom: "label"
53 | top: "loss"
54 | }
55 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/nonlinear_solver.prototxt:
--------------------------------------------------------------------------------
1 | train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt"
2 | test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt"
3 | test_iter: 250
4 | test_interval: 1000
5 | base_lr: 0.01
6 | lr_policy: "step"
7 | gamma: 0.1
8 | stepsize: 5000
9 | display: 1000
10 | max_iter: 10000
11 | momentum: 0.9
12 | weight_decay: 0.0005
13 | snapshot: 10000
14 | snapshot_prefix: "examples/hdf5_classification/data/train"
15 | solver_mode: CPU
16 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/nonlinear_train_val.prototxt:
--------------------------------------------------------------------------------
1 | name: "LogisticRegressionNet"
2 | layer {
3 | name: "data"
4 | type: "HDF5Data"
5 | top: "data"
6 | top: "label"
7 | include {
8 | phase: TRAIN
9 | }
10 | hdf5_data_param {
11 | source: "examples/hdf5_classification/data/train.txt"
12 | batch_size: 10
13 | }
14 | }
15 | layer {
16 | name: "data"
17 | type: "HDF5Data"
18 | top: "data"
19 | top: "label"
20 | include {
21 | phase: TEST
22 | }
23 | hdf5_data_param {
24 | source: "examples/hdf5_classification/data/test.txt"
25 | batch_size: 10
26 | }
27 | }
28 | layer {
29 | name: "fc1"
30 | type: "InnerProduct"
31 | bottom: "data"
32 | top: "fc1"
33 | param {
34 | lr_mult: 1
35 | decay_mult: 1
36 | }
37 | param {
38 | lr_mult: 2
39 | decay_mult: 0
40 | }
41 | inner_product_param {
42 | num_output: 40
43 | weight_filler {
44 | type: "xavier"
45 | }
46 | bias_filler {
47 | type: "constant"
48 | value: 0
49 | }
50 | }
51 | }
52 | layer {
53 | name: "relu1"
54 | type: "ReLU"
55 | bottom: "fc1"
56 | top: "fc1"
57 | }
58 | layer {
59 | name: "fc2"
60 | type: "InnerProduct"
61 | bottom: "fc1"
62 | top: "fc2"
63 | param {
64 | lr_mult: 1
65 | decay_mult: 1
66 | }
67 | param {
68 | lr_mult: 2
69 | decay_mult: 0
70 | }
71 | inner_product_param {
72 | num_output: 2
73 | weight_filler {
74 | type: "xavier"
75 | }
76 | bias_filler {
77 | type: "constant"
78 | value: 0
79 | }
80 | }
81 | }
82 | layer {
83 | name: "loss"
84 | type: "SoftmaxWithLoss"
85 | bottom: "fc2"
86 | bottom: "label"
87 | top: "loss"
88 | }
89 | layer {
90 | name: "accuracy"
91 | type: "Accuracy"
92 | bottom: "fc2"
93 | bottom: "label"
94 | top: "accuracy"
95 | include {
96 | phase: TEST
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/solver.prototxt:
--------------------------------------------------------------------------------
1 | train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
2 | test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
3 | test_iter: 250
4 | test_interval: 1000
5 | base_lr: 0.01
6 | lr_policy: "step"
7 | gamma: 0.1
8 | stepsize: 5000
9 | display: 1000
10 | max_iter: 10000
11 | momentum: 0.9
12 | weight_decay: 0.0005
13 | snapshot: 10000
14 | snapshot_prefix: "examples/hdf5_classification/data/train"
15 | solver_mode: CPU
16 |
--------------------------------------------------------------------------------
/examples/hdf5_classification/train_val.prototxt:
--------------------------------------------------------------------------------
1 | name: "LogisticRegressionNet"
2 | layer {
3 | name: "data"
4 | type: "HDF5Data"
5 | top: "data"
6 | top: "label"
7 | include {
8 | phase: TRAIN
9 | }
10 | hdf5_data_param {
11 | source: "examples/hdf5_classification/data/train.txt"
12 | batch_size: 10
13 | }
14 | }
15 | layer {
16 | name: "data"
17 | type: "HDF5Data"
18 | top: "data"
19 | top: "label"
20 | include {
21 | phase: TEST
22 | }
23 | hdf5_data_param {
24 | source: "examples/hdf5_classification/data/test.txt"
25 | batch_size: 10
26 | }
27 | }
28 | layer {
29 | name: "fc1"
30 | type: "InnerProduct"
31 | bottom: "data"
32 | top: "fc1"
33 | param {
34 | lr_mult: 1
35 | decay_mult: 1
36 | }
37 | param {
38 | lr_mult: 2
39 | decay_mult: 0
40 | }
41 | inner_product_param {
42 | num_output: 2
43 | weight_filler {
44 | type: "xavier"
45 | }
46 | bias_filler {
47 | type: "constant"
48 | value: 0
49 | }
50 | }
51 | }
52 | layer {
53 | name: "loss"
54 | type: "SoftmaxWithLoss"
55 | bottom: "fc1"
56 | bottom: "label"
57 | top: "loss"
58 | }
59 | layer {
60 | name: "accuracy"
61 | type: "Accuracy"
62 | bottom: "fc1"
63 | bottom: "label"
64 | top: "accuracy"
65 | include {
66 | phase: TEST
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/examples/imagenet/create_imagenet.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Create the imagenet lmdb inputs
3 | # N.B. set the path to the imagenet train + val data dirs
4 |
5 | EXAMPLE=examples/imagenet
6 | DATA=data/ilsvrc12
7 | TOOLS=build/tools
8 |
9 | TRAIN_DATA_ROOT=/path/to/imagenet/train/
10 | VAL_DATA_ROOT=/path/to/imagenet/val/
11 |
12 | # Set RESIZE=true to resize the images to 256x256. Leave as false if images have
13 | # already been resized using another tool.
14 | RESIZE=false
15 | if $RESIZE; then
16 | RESIZE_HEIGHT=256
17 | RESIZE_WIDTH=256
18 | else
19 | RESIZE_HEIGHT=0
20 | RESIZE_WIDTH=0
21 | fi
22 |
23 | if [ ! -d "$TRAIN_DATA_ROOT" ]; then
24 | echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT"
25 | echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" \
26 | "where the ImageNet training data is stored."
27 | exit 1
28 | fi
29 |
30 | if [ ! -d "$VAL_DATA_ROOT" ]; then
31 | echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT"
32 | echo "Set the VAL_DATA_ROOT variable in create_imagenet.sh to the path" \
33 | "where the ImageNet validation data is stored."
34 | exit 1
35 | fi
36 |
37 | echo "Creating train lmdb..."
38 |
39 | GLOG_logtostderr=1 $TOOLS/convert_imageset \
40 | --resize_height=$RESIZE_HEIGHT \
41 | --resize_width=$RESIZE_WIDTH \
42 | --shuffle \
43 | $TRAIN_DATA_ROOT \
44 | $DATA/train.txt \
45 | $EXAMPLE/ilsvrc12_train_lmdb
46 |
47 | echo "Creating val lmdb..."
48 |
49 | GLOG_logtostderr=1 $TOOLS/convert_imageset \
50 | --resize_height=$RESIZE_HEIGHT \
51 | --resize_width=$RESIZE_WIDTH \
52 | --shuffle \
53 | $VAL_DATA_ROOT \
54 | $DATA/val.txt \
55 | $EXAMPLE/ilsvrc12_val_lmdb
56 |
57 | echo "Done."
58 |
--------------------------------------------------------------------------------
/examples/imagenet/make_imagenet_mean.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Compute the mean image from the imagenet training lmdb
3 | # N.B. this is available in data/ilsvrc12
4 |
5 | EXAMPLE=examples/imagenet
6 | DATA=data/ilsvrc12
7 | TOOLS=build/tools
8 |
9 | $TOOLS/compute_image_mean $EXAMPLE/ilsvrc12_train_lmdb \
10 | $DATA/imagenet_mean.binaryproto
11 |
12 | echo "Done."
13 |
--------------------------------------------------------------------------------
/examples/imagenet/resume_training.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build/tools/caffe train \
4 | --solver=models/bvlc_reference_caffenet/solver.prototxt \
5 | --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate
6 |
--------------------------------------------------------------------------------
/examples/imagenet/train_caffenet.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build/tools/caffe train \
4 | --solver=models/bvlc_reference_caffenet/solver.prototxt
5 |
--------------------------------------------------------------------------------
/examples/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/examples/images/cat.jpg
--------------------------------------------------------------------------------
/examples/images/cat_gray.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/examples/images/cat_gray.jpg
--------------------------------------------------------------------------------
/examples/images/fish-bike.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/examples/images/fish-bike.jpg
--------------------------------------------------------------------------------
/examples/mnist/create_mnist.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # This script converts the mnist data into lmdb/leveldb format,
3 | # depending on the value assigned to $BACKEND.
4 |
5 | EXAMPLE=examples/mnist
6 | DATA=data/mnist
7 | BUILD=build/examples/mnist
8 |
9 | BACKEND="lmdb"
10 |
11 | echo "Creating ${BACKEND}..."
12 |
13 | rm -rf $EXAMPLE/mnist_train_${BACKEND}
14 | rm -rf $EXAMPLE/mnist_test_${BACKEND}
15 |
16 | $BUILD/convert_mnist_data.bin $DATA/train-images-idx3-ubyte \
17 | $DATA/train-labels-idx1-ubyte $EXAMPLE/mnist_train_${BACKEND} --backend=${BACKEND}
18 | $BUILD/convert_mnist_data.bin $DATA/t10k-images-idx3-ubyte \
19 | $DATA/t10k-labels-idx1-ubyte $EXAMPLE/mnist_test_${BACKEND} --backend=${BACKEND}
20 |
21 | echo "Done."
22 |
--------------------------------------------------------------------------------
/examples/mnist/lenet_auto_solver.prototxt:
--------------------------------------------------------------------------------
1 | # The train/test net protocol buffer definition
2 | train_net: "examples/mnist/lenet_auto_train.prototxt"
3 | test_net: "examples/mnist/lenet_auto_test.prototxt"
4 | # test_iter specifies how many forward passes the test should carry out.
5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
6 | # covering the full 10,000 testing images.
7 | test_iter: 100
8 | # Carry out testing every 500 training iterations.
9 | test_interval: 500
10 | # The base learning rate, momentum and the weight decay of the network.
11 | base_lr: 0.01
12 | momentum: 0.9
13 | weight_decay: 0.0005
14 | # The learning rate policy
15 | lr_policy: "inv"
16 | gamma: 0.0001
17 | power: 0.75
18 | # Display every 100 iterations
19 | display: 100
20 | # The maximum number of iterations
21 | max_iter: 10000
22 | # snapshot intermediate results
23 | snapshot: 5000
24 | snapshot_prefix: "examples/mnist/lenet"
25 |
--------------------------------------------------------------------------------
/examples/mnist/lenet_multistep_solver.prototxt:
--------------------------------------------------------------------------------
1 | # The train/test net protocol buffer definition
2 | net: "examples/mnist/lenet_train_test.prototxt"
3 | # test_iter specifies how many forward passes the test should carry out.
4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
5 | # covering the full 10,000 testing images.
6 | test_iter: 100
7 | # Carry out testing every 500 training iterations.
8 | test_interval: 500
9 | # The base learning rate, momentum and the weight decay of the network.
10 | base_lr: 0.01
11 | momentum: 0.9
12 | weight_decay: 0.0005
13 | # The learning rate policy
14 | lr_policy: "multistep"
15 | gamma: 0.9
16 | stepvalue: 5000
17 | stepvalue: 7000
18 | stepvalue: 8000
19 | stepvalue: 9000
20 | stepvalue: 9500
21 | # Display every 100 iterations
22 | display: 100
23 | # The maximum number of iterations
24 | max_iter: 10000
25 | # snapshot intermediate results
26 | snapshot: 5000
27 | snapshot_prefix: "examples/mnist/lenet_multistep"
28 | # solver mode: CPU or GPU
29 | solver_mode: GPU
30 |
--------------------------------------------------------------------------------
/examples/mnist/lenet_solver.prototxt:
--------------------------------------------------------------------------------
1 | # The train/test net protocol buffer definition
2 | net: "examples/mnist/lenet_train_test.prototxt"
3 | # test_iter specifies how many forward passes the test should carry out.
4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
5 | # covering the full 10,000 testing images.
6 | test_iter: 100
7 | # Carry out testing every 500 training iterations.
8 | test_interval: 500
9 | # The base learning rate, momentum and the weight decay of the network.
10 | base_lr: 0.01
11 | momentum: 0.9
12 | weight_decay: 0.0005
13 | # The learning rate policy
14 | lr_policy: "inv"
15 | gamma: 0.0001
16 | power: 0.75
17 | # Display every 100 iterations
18 | display: 100
19 | # The maximum number of iterations
20 | max_iter: 10000
21 | # snapshot intermediate results
22 | snapshot: 5000
23 | snapshot_prefix: "examples/mnist/lenet"
24 | # solver mode: CPU or GPU
25 | solver_mode: GPU
26 |
--------------------------------------------------------------------------------
/examples/mnist/lenet_stepearly_solver.prototxt:
--------------------------------------------------------------------------------
1 | # The training protocol buffer definition
2 | train_net: "lenet_train.prototxt"
3 | # The testing protocol buffer definition
4 | test_net: "lenet_test.prototxt"
5 | # test_iter specifies how many forward passes the test should carry out.
6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
7 | # covering the full 10,000 testing images.
8 | test_iter: 100
9 | # Carry out testing every 500 training iterations.
10 | test_interval: 500
11 | # The base learning rate, momentum and the weight decay of the network.
12 | base_lr: 0.01
13 | momentum: 0.9
14 | weight_decay: 0.0005
15 | # The learning rate policy
16 | lr_policy: "stepearly"
17 | gamma: 0.9
18 | stepearly: 1
19 | # Display every 100 iterations
20 | display: 100
21 | # The maximum number of iterations
22 | max_iter: 10000
23 | # snapshot intermediate results
24 | snapshot: 5000
25 | snapshot_prefix: "lenet"
26 | # solver mode: 0 for CPU and 1 for GPU
27 | solver_mode: 1
28 | device_id: 1
29 |
--------------------------------------------------------------------------------
/examples/mnist/mnist_autoencoder_solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "examples/mnist/mnist_autoencoder.prototxt"
2 | test_state: { stage: 'test-on-train' }
3 | test_iter: 500
4 | test_state: { stage: 'test-on-test' }
5 | test_iter: 100
6 | test_interval: 500
7 | test_compute_loss: true
8 | base_lr: 0.01
9 | lr_policy: "step"
10 | gamma: 0.1
11 | stepsize: 10000
12 | display: 100
13 | max_iter: 65000
14 | weight_decay: 0.0005
15 | snapshot: 10000
16 | snapshot_prefix: "examples/mnist/mnist_autoencoder"
17 | momentum: 0.9
18 | # solver mode: CPU or GPU
19 | solver_mode: GPU
20 |
--------------------------------------------------------------------------------
/examples/mnist/mnist_autoencoder_solver_adagrad.prototxt:
--------------------------------------------------------------------------------
1 | net: "examples/mnist/mnist_autoencoder.prototxt"
2 | test_state: { stage: 'test-on-train' }
3 | test_iter: 500
4 | test_state: { stage: 'test-on-test' }
5 | test_iter: 100
6 | test_interval: 500
7 | test_compute_loss: true
8 | base_lr: 0.01
9 | lr_policy: "fixed"
10 | display: 100
11 | max_iter: 65000
12 | weight_decay: 0.0005
13 | snapshot: 10000
14 | snapshot_prefix: "examples/mnist/mnist_autoencoder_adagrad_train"
15 | # solver mode: CPU or GPU
16 | solver_mode: GPU
17 | solver_type: ADAGRAD
18 |
--------------------------------------------------------------------------------
/examples/mnist/mnist_autoencoder_solver_nesterov.prototxt:
--------------------------------------------------------------------------------
1 | net: "examples/mnist/mnist_autoencoder.prototxt"
2 | test_state: { stage: 'test-on-train' }
3 | test_iter: 500
4 | test_state: { stage: 'test-on-test' }
5 | test_iter: 100
6 | test_interval: 500
7 | test_compute_loss: true
8 | base_lr: 0.01
9 | lr_policy: "step"
10 | gamma: 0.1
11 | stepsize: 10000
12 | display: 100
13 | max_iter: 65000
14 | weight_decay: 0.0005
15 | snapshot: 10000
16 | snapshot_prefix: "examples/mnist/mnist_autoencoder_nesterov_train"
17 | momentum: 0.95
18 | # solver mode: CPU or GPU
19 | solver_mode: GPU
20 | solver_type: NESTEROV
21 |
--------------------------------------------------------------------------------
/examples/mnist/train_lenet.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt
4 |
--------------------------------------------------------------------------------
/examples/mnist/train_lenet_consolidated.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build/tools/caffe train \
4 | --solver=examples/mnist/lenet_consolidated_solver.prototxt
5 |
--------------------------------------------------------------------------------
/examples/mnist/train_mnist_autoencoder.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ./build/tools/caffe train \
4 | --solver=examples/mnist/mnist_autoencoder_solver.prototxt
5 |
--------------------------------------------------------------------------------
/examples/mnist/train_mnist_autoencoder_adagrad.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./build/tools/caffe train \
4 | --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt
5 |
--------------------------------------------------------------------------------
/examples/mnist/train_mnist_autoencoder_nesterov.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./build/tools/caffe train \
4 | --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt
5 |
--------------------------------------------------------------------------------
/examples/net_surgery/conv.prototxt:
--------------------------------------------------------------------------------
1 | # Simple single-layer network to showcase editing model parameters.
2 | name: "convolution"
3 | input: "data"
4 | input_dim: 1
5 | input_dim: 1
6 | input_dim: 100
7 | input_dim: 100
8 | layer {
9 | name: "conv"
10 | type: "Convolution"
11 | bottom: "data"
12 | top: "conv"
13 | convolution_param {
14 | num_output: 3
15 | kernel_size: 5
16 | stride: 1
17 | weight_filler {
18 | type: "gaussian"
19 | std: 0.01
20 | }
21 | bias_filler {
22 | type: "constant"
23 | value: 0
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/examples/pycaffe/layers/pyloss.py:
--------------------------------------------------------------------------------
1 | import caffe
2 | import numpy as np
3 |
4 |
5 | class EuclideanLossLayer(caffe.Layer):
6 | """
7 | Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
8 | to demonstrate the class interface for developing layers in Python.
9 | """
10 |
11 | def setup(self, bottom, top):
12 | # check input pair
13 | if len(bottom) != 2:
14 | raise Exception("Need two inputs to compute distance.")
15 |
16 | def reshape(self, bottom, top):
17 | # check input dimensions match
18 | if bottom[0].count != bottom[1].count:
19 | raise Exception("Inputs must have the same dimension.")
20 | # difference is shape of inputs
21 | self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
22 | # loss output is scalar
23 | top[0].reshape(1)
24 |
25 | def forward(self, bottom, top):
26 | self.diff[...] = bottom[0].data - bottom[1].data
27 | top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
28 |
29 | def backward(self, top, propagate_down, bottom):
30 | for i in range(2):
31 | if not propagate_down[i]:
32 | continue
33 | if i == 0:
34 | sign = 1
35 | else:
36 | sign = -1
37 | bottom[i].diff[...] = sign * self.diff / bottom[i].num
38 |
--------------------------------------------------------------------------------
/examples/pycaffe/linreg.prototxt:
--------------------------------------------------------------------------------
1 | name: 'LinearRegressionExample'
2 | # define a simple network for linear regression on dummy data
3 | # that computes the loss by a PythonLayer.
4 | layer {
5 | type: 'DummyData'
6 | name: 'x'
7 | top: 'x'
8 | dummy_data_param {
9 | shape: { dim: 10 dim: 3 dim: 2 }
10 | data_filler: { type: 'gaussian' }
11 | }
12 | }
13 | layer {
14 | type: 'DummyData'
15 | name: 'y'
16 | top: 'y'
17 | dummy_data_param {
18 | shape: { dim: 10 dim: 3 dim: 2 }
19 | data_filler: { type: 'gaussian' }
20 | }
21 | }
22 | # include InnerProduct layers for parameters
23 | # so the net will need backward
24 | layer {
25 | type: 'InnerProduct'
26 | name: 'ipx'
27 | top: 'ipx'
28 | bottom: 'x'
29 | inner_product_param {
30 | num_output: 10
31 | weight_filler { type: 'xavier' }
32 | }
33 | }
34 | layer {
35 | type: 'InnerProduct'
36 | name: 'ipy'
37 | top: 'ipy'
38 | bottom: 'y'
39 | inner_product_param {
40 | num_output: 10
41 | weight_filler { type: 'xavier' }
42 | }
43 | }
44 | layer {
45 | type: 'Python'
46 | name: 'loss'
47 | top: 'loss'
48 | bottom: 'ipx'
49 | bottom: 'ipy'
50 | python_param {
51 | # the module name -- usually the filename -- that needs to be in $PYTHONPATH
52 | module: 'pyloss'
53 | # the layer name -- the class name in the module
54 | layer: 'EuclideanLossLayer'
55 | }
56 | # set loss weight so Caffe knows this is a loss layer.
57 | # since PythonLayer inherits directly from Layer, this isn't automatically
58 | # known to Caffe
59 | loss_weight: 1
60 | }
61 |
--------------------------------------------------------------------------------
/examples/siamese/create_mnist_siamese.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # This script converts the mnist data into leveldb format.
3 |
4 | EXAMPLES=./build/examples/siamese
5 | DATA=./data/mnist
6 |
7 | echo "Creating leveldb..."
8 |
9 | rm -rf ./examples/siamese/mnist_siamese_train_leveldb
10 | rm -rf ./examples/siamese/mnist_siamese_test_leveldb
11 |
12 | $EXAMPLES/convert_mnist_siamese_data.bin \
13 | $DATA/train-images-idx3-ubyte \
14 | $DATA/train-labels-idx1-ubyte \
15 | ./examples/siamese/mnist_siamese_train_leveldb
16 | $EXAMPLES/convert_mnist_siamese_data.bin \
17 | $DATA/t10k-images-idx3-ubyte \
18 | $DATA/t10k-labels-idx1-ubyte \
19 | ./examples/siamese/mnist_siamese_test_leveldb
20 |
21 | echo "Done."
22 |
--------------------------------------------------------------------------------
/examples/siamese/mnist_siamese.prototxt:
--------------------------------------------------------------------------------
1 | name: "mnist_siamese"
2 | input: "data"
3 | input_dim: 10000
4 | input_dim: 1
5 | input_dim: 28
6 | input_dim: 28
7 | layer {
8 | name: "conv1"
9 | type: "Convolution"
10 | bottom: "data"
11 | top: "conv1"
12 | param {
13 | lr_mult: 1
14 | }
15 | param {
16 | lr_mult: 2
17 | }
18 | convolution_param {
19 | num_output: 20
20 | kernel_size: 5
21 | stride: 1
22 | }
23 | }
24 | layer {
25 | name: "pool1"
26 | type: "Pooling"
27 | bottom: "conv1"
28 | top: "pool1"
29 | pooling_param {
30 | pool: MAX
31 | kernel_size: 2
32 | stride: 2
33 | }
34 | }
35 | layer {
36 | name: "conv2"
37 | type: "Convolution"
38 | bottom: "pool1"
39 | top: "conv2"
40 | param {
41 | lr_mult: 1
42 | }
43 | param {
44 | lr_mult: 2
45 | }
46 | convolution_param {
47 | num_output: 50
48 | kernel_size: 5
49 | stride: 1
50 | }
51 | }
52 | layer {
53 | name: "pool2"
54 | type: "Pooling"
55 | bottom: "conv2"
56 | top: "pool2"
57 | pooling_param {
58 | pool: MAX
59 | kernel_size: 2
60 | stride: 2
61 | }
62 | }
63 | layer {
64 | name: "ip1"
65 | type: "InnerProduct"
66 | bottom: "pool2"
67 | top: "ip1"
68 | param {
69 | lr_mult: 1
70 | }
71 | param {
72 | lr_mult: 2
73 | }
74 | inner_product_param {
75 | num_output: 500
76 | }
77 | }
78 | layer {
79 | name: "relu1"
80 | type: "ReLU"
81 | bottom: "ip1"
82 | top: "ip1"
83 | }
84 | layer {
85 | name: "ip2"
86 | type: "InnerProduct"
87 | bottom: "ip1"
88 | top: "ip2"
89 | param {
90 | lr_mult: 1
91 | }
92 | param {
93 | lr_mult: 2
94 | }
95 | inner_product_param {
96 | num_output: 10
97 | }
98 | }
99 | layer {
100 | name: "feat"
101 | type: "InnerProduct"
102 | bottom: "ip2"
103 | top: "feat"
104 | param {
105 | lr_mult: 1
106 | }
107 | param {
108 | lr_mult: 2
109 | }
110 | inner_product_param {
111 | num_output: 2
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/examples/siamese/mnist_siamese_solver.prototxt:
--------------------------------------------------------------------------------
1 | # The train/test net protocol buffer definition
2 | net: "examples/siamese/mnist_siamese_train_test.prototxt"
3 | # test_iter specifies how many forward passes the test should carry out.
4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
5 | # covering the full 10,000 testing images.
6 | test_iter: 100
7 | # Carry out testing every 500 training iterations.
8 | test_interval: 500
9 | # The base learning rate, momentum and the weight decay of the network.
10 | base_lr: 0.01
11 | momentum: 0.9
12 | weight_decay: 0.0000
13 | # The learning rate policy
14 | lr_policy: "inv"
15 | gamma: 0.0001
16 | power: 0.75
17 | # Display every 100 iterations
18 | display: 100
19 | # The maximum number of iterations
20 | max_iter: 50000
21 | # snapshot intermediate results
22 | snapshot: 5000
23 | snapshot_prefix: "examples/siamese/mnist_siamese"
24 | # solver mode: CPU or GPU
25 | solver_mode: GPU
26 |
--------------------------------------------------------------------------------
/examples/siamese/train_mnist_siamese.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | TOOLS=./build/tools
4 |
5 | $TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt
6 |
--------------------------------------------------------------------------------
/examples/web_demo/exifutil.py:
--------------------------------------------------------------------------------
1 | """
2 | This script handles the skimage exif problem.
3 | """
4 |
5 | from PIL import Image
6 | import numpy as np
7 |
8 | ORIENTATIONS = { # used in apply_orientation
9 | 2: (Image.FLIP_LEFT_RIGHT,),
10 | 3: (Image.ROTATE_180,),
11 | 4: (Image.FLIP_TOP_BOTTOM,),
12 | 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
13 | 6: (Image.ROTATE_270,),
14 | 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
15 | 8: (Image.ROTATE_90,)
16 | }
17 |
18 |
19 | def open_oriented_im(im_path):
20 | im = Image.open(im_path)
21 | if hasattr(im, '_getexif'):
22 | exif = im._getexif()
23 | if exif is not None and 274 in exif:
24 | orientation = exif[274]
25 | im = apply_orientation(im, orientation)
26 | img = np.asarray(im).astype(np.float32) / 255.
27 | if img.ndim == 2:
28 | img = img[:, :, np.newaxis]
29 | img = np.tile(img, (1, 1, 3))
30 | elif img.shape[2] == 4:
31 | img = img[:, :, :3]
32 | return img
33 |
34 |
35 | def apply_orientation(im, orientation):
36 | if orientation in ORIENTATIONS:
37 | for method in ORIENTATIONS[orientation]:
38 | im = im.transpose(method)
39 | return im
40 |
--------------------------------------------------------------------------------
/examples/web_demo/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Web demo
3 | description: Image classification demo running as a Flask web server.
4 | category: example
5 | include_in_docs: true
6 | priority: 10
7 | ---
8 |
9 | # Web Demo
10 |
11 | ## Requirements
12 |
13 | The demo server requires Python with some dependencies.
14 | To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](/installation.html)).
15 |
16 | Make sure that you have obtained the Reference CaffeNet Model and the ImageNet Auxiliary Data:
17 |
18 | ./scripts/download_model_binary.py models/bvlc_reference_caffenet
19 | ./data/ilsvrc12/get_ilsvrc_aux.sh
20 |
21 | NOTE: if you run into trouble, try re-downloading the auxiliary files.
22 |
23 | ## Run
24 |
25 | Running `python examples/web_demo/app.py` will bring up the demo server, accessible at `http://0.0.0.0:5000`.
26 | You can enable debug mode of the web server, or switch to a different port:
27 |
28 | % python examples/web_demo/app.py -h
29 | Usage: app.py [options]
30 |
31 | Options:
32 | -h, --help show this help message and exit
33 | -d, --debug enable debug mode
34 | -p PORT, --port=PORT which port to serve content on
35 |
36 | ## How are the "maximally accurate" results generated?
37 |
38 | In a nutshell: ImageNet predictions are made at the leaf nodes, but the organization of the project allows leaf nodes to be united via more general parent nodes, with 'entity' at the very top.
39 | To give "maximally accurate" results, we "back off" from maximally specific predictions to maintain a high accuracy.
40 | The `bet_file` that is loaded in the demo provides the graph structure and names of all relevant ImageNet nodes as well as measures of information gain between them.
41 | Please see the "Hedging your bets" paper from [CVPR 2012](http://www.image-net.org/projects/hedging/) for further information.
42 |
--------------------------------------------------------------------------------
/examples/web_demo/requirements.txt:
--------------------------------------------------------------------------------
1 | werkzeug
2 | flask
3 | tornado
4 | numpy
5 | pandas
6 | pillow
7 |
--------------------------------------------------------------------------------
/include/caffe/caffe.hpp:
--------------------------------------------------------------------------------
1 | // caffe.hpp is the header file that you need to include in your code. It wraps
2 | // all the internal caffe header files into one for simpler inclusion.
3 |
4 | #ifndef CAFFE_CAFFE_HPP_
5 | #define CAFFE_CAFFE_HPP_
6 |
7 | #include "caffe/blob.hpp"
8 | #include "caffe/common.hpp"
9 | #include "caffe/filler.hpp"
10 | #include "caffe/layer.hpp"
11 | #include "caffe/layer_factory.hpp"
12 | #include "caffe/net.hpp"
13 | #include "caffe/proto/caffe.pb.h"
14 | #include "caffe/solver.hpp"
15 | #include "caffe/util/benchmark.hpp"
16 | #include "caffe/util/io.hpp"
17 | #include "caffe/vision_layers.hpp"
18 |
19 | #endif // CAFFE_CAFFE_HPP_
20 |
--------------------------------------------------------------------------------
/include/caffe/internal_thread.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_
2 | #define CAFFE_INTERNAL_THREAD_HPP_
3 |
4 | #include "caffe/common.hpp"
5 |
6 | /**
7 | Forward declare boost::thread instead of including boost/thread.hpp
8 | to avoid a boost/NVCC issues (#1009, #1010) on OSX.
9 | */
10 | namespace boost { class thread; }
11 |
12 | namespace caffe {
13 |
14 | /**
15 | * Virtual class encapsulate boost::thread for use in base class
16 | * The child class will acquire the ability to run a single thread,
17 | * by reimplementing the virutal function InternalThreadEntry.
18 | */
19 | class InternalThread {
20 | public:
21 | InternalThread() : thread_() {}
22 | virtual ~InternalThread();
23 |
24 | /** Returns true if the thread was successfully started. **/
25 | bool StartInternalThread();
26 |
27 | /** Will not return until the internal thread has exited. */
28 | bool WaitForInternalThreadToExit();
29 |
30 | bool is_started() const;
31 |
32 | protected:
33 | /* Implement this method in your subclass
34 | with the code you want your thread to run. */
35 | virtual void InternalThreadEntry() {}
36 |
37 | shared_ptr thread_;
38 | };
39 |
40 | } // namespace caffe
41 |
42 | #endif // CAFFE_INTERNAL_THREAD_HPP_
43 |
--------------------------------------------------------------------------------
/include/caffe/python_layer.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_PYTHON_LAYER_HPP_
2 | #define CAFFE_PYTHON_LAYER_HPP_
3 |
4 | #include
5 | #include
6 |
7 | #include "caffe/layer.hpp"
8 |
9 | namespace bp = boost::python;
10 |
11 | namespace caffe {
12 |
13 | template
14 | class PythonLayer : public Layer {
15 | public:
16 | PythonLayer(PyObject* self, const LayerParameter& param)
17 | : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { }
18 |
19 | virtual void LayerSetUp(const vector*>& bottom,
20 | const vector*>& top) {
21 | try {
22 | self_.attr("setup")(bottom, top);
23 | } catch (bp::error_already_set) {
24 | PyErr_Print();
25 | throw;
26 | }
27 | }
28 |
29 | virtual void Reshape(const vector*>& bottom,
30 | const vector*>& top) {
31 | try {
32 | self_.attr("reshape")(bottom, top);
33 | } catch (bp::error_already_set) {
34 | PyErr_Print();
35 | throw;
36 | }
37 | }
38 |
39 | virtual inline const char* type() const { return "Python"; }
40 |
41 | protected:
42 | virtual void Forward_cpu(const vector*>& bottom,
43 | const vector*>& top) {
44 | try {
45 | self_.attr("forward")(bottom, top);
46 | } catch (bp::error_already_set) {
47 | PyErr_Print();
48 | throw;
49 | }
50 | }
51 | virtual void Backward_cpu(const vector*>& top,
52 | const vector& propagate_down, const vector*>& bottom) {
53 | try {
54 | self_.attr("backward")(top, propagate_down, bottom);
55 | } catch (bp::error_already_set) {
56 | PyErr_Print();
57 | throw;
58 | }
59 | }
60 |
61 | private:
62 | bp::object self_;
63 | };
64 |
65 | } // namespace caffe
66 |
67 | #endif
68 |
--------------------------------------------------------------------------------
/include/caffe/test/test_caffe_main.hpp:
--------------------------------------------------------------------------------
1 | // The main caffe test code. Your test cpp code should include this hpp
2 | // to allow a main function to be compiled into the binary.
3 | #ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
4 | #define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
5 |
6 | #include
7 | #include
8 |
9 | #include
10 | #include
11 |
12 | #include "caffe/common.hpp"
13 |
14 | using std::cout;
15 | using std::endl;
16 |
17 | #ifdef CMAKE_BUILD
18 | #include "caffe_config.h"
19 | #else
20 | #define CUDA_TEST_DEVICE -1
21 | #define CMAKE_SOURCE_DIR "src/"
22 | #define EXAMPLES_SOURCE_DIR "examples/"
23 | #define CMAKE_EXT ""
24 | #endif
25 |
26 | int main(int argc, char** argv);
27 |
28 | namespace caffe {
29 |
30 | template
31 | class MultiDeviceTest : public ::testing::Test {
32 | public:
33 | typedef typename TypeParam::Dtype Dtype;
34 | protected:
35 | MultiDeviceTest() {
36 | Caffe::set_mode(TypeParam::device);
37 | }
38 | virtual ~MultiDeviceTest() {}
39 | };
40 |
41 | typedef ::testing::Types TestDtypes;
42 |
43 | template
44 | struct CPUDevice {
45 | typedef TypeParam Dtype;
46 | static const Caffe::Brew device = Caffe::CPU;
47 | };
48 |
49 | template
50 | class CPUDeviceTest : public MultiDeviceTest > {
51 | };
52 |
53 | #ifdef CPU_ONLY
54 |
55 | typedef ::testing::Types,
56 | CPUDevice > TestDtypesAndDevices;
57 |
58 | #else
59 |
60 | template
61 | struct GPUDevice {
62 | typedef TypeParam Dtype;
63 | static const Caffe::Brew device = Caffe::GPU;
64 | };
65 |
66 | template
67 | class GPUDeviceTest : public MultiDeviceTest > {
68 | };
69 |
70 | typedef ::testing::Types, CPUDevice,
71 | GPUDevice, GPUDevice >
72 | TestDtypesAndDevices;
73 |
74 | #endif
75 |
76 | } // namespace caffe
77 |
78 | #endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
79 |
--------------------------------------------------------------------------------
/include/caffe/util/benchmark.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_UTIL_BENCHMARK_H_
2 | #define CAFFE_UTIL_BENCHMARK_H_
3 |
4 | #include
5 |
6 | #include "caffe/util/device_alternate.hpp"
7 |
8 | namespace caffe {
9 |
10 | class Timer {
11 | public:
12 | Timer();
13 | virtual ~Timer();
14 | virtual void Start();
15 | virtual void Stop();
16 | virtual float MilliSeconds();
17 | virtual float MicroSeconds();
18 | virtual float Seconds();
19 |
20 | inline bool initted() { return initted_; }
21 | inline bool running() { return running_; }
22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; }
23 |
24 | protected:
25 | void Init();
26 |
27 | bool initted_;
28 | bool running_;
29 | bool has_run_at_least_once_;
30 | #ifndef CPU_ONLY
31 | cudaEvent_t start_gpu_;
32 | cudaEvent_t stop_gpu_;
33 | #endif
34 | boost::posix_time::ptime start_cpu_;
35 | boost::posix_time::ptime stop_cpu_;
36 | float elapsed_milliseconds_;
37 | float elapsed_microseconds_;
38 | };
39 |
40 | class CPUTimer : public Timer {
41 | public:
42 | explicit CPUTimer();
43 | virtual ~CPUTimer() {}
44 | virtual void Start();
45 | virtual void Stop();
46 | virtual float MilliSeconds();
47 | virtual float MicroSeconds();
48 | };
49 |
50 | } // namespace caffe
51 |
52 | #endif // CAFFE_UTIL_BENCHMARK_H_
53 |
--------------------------------------------------------------------------------
/include/caffe/util/db.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_UTIL_DB_HPP
2 | #define CAFFE_UTIL_DB_HPP
3 |
4 | #include
5 |
6 | #include "caffe/common.hpp"
7 | #include "caffe/proto/caffe.pb.h"
8 |
9 | namespace caffe { namespace db {
10 |
11 | enum Mode { READ, WRITE, NEW };
12 |
13 | class Cursor {
14 | public:
15 | Cursor() { }
16 | virtual ~Cursor() { }
17 | virtual void SeekToFirst() = 0;
18 | virtual void Next() = 0;
19 | virtual string key() = 0;
20 | virtual string value() = 0;
21 | virtual bool valid() = 0;
22 |
23 | DISABLE_COPY_AND_ASSIGN(Cursor);
24 | };
25 |
26 | class Transaction {
27 | public:
28 | Transaction() { }
29 | virtual ~Transaction() { }
30 | virtual void Put(const string& key, const string& value) = 0;
31 | virtual void Commit() = 0;
32 |
33 | DISABLE_COPY_AND_ASSIGN(Transaction);
34 | };
35 |
36 | class DB {
37 | public:
38 | DB() { }
39 | virtual ~DB() { }
40 | virtual void Open(const string& source, Mode mode) = 0;
41 | virtual void Close() = 0;
42 | virtual Cursor* NewCursor() = 0;
43 | virtual Transaction* NewTransaction() = 0;
44 |
45 | DISABLE_COPY_AND_ASSIGN(DB);
46 | };
47 |
48 | DB* GetDB(DataParameter::DB backend);
49 | DB* GetDB(const string& backend);
50 |
51 | } // namespace db
52 | } // namespace caffe
53 |
54 | #endif // CAFFE_UTIL_DB_HPP
55 |
--------------------------------------------------------------------------------
/include/caffe/util/db_leveldb.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_UTIL_DB_LEVELDB_HPP
2 | #define CAFFE_UTIL_DB_LEVELDB_HPP
3 |
4 | #include
5 |
6 | #include "leveldb/db.h"
7 | #include "leveldb/write_batch.h"
8 |
9 | #include "caffe/util/db.hpp"
10 |
11 | namespace caffe { namespace db {
12 |
13 | class LevelDBCursor : public Cursor {
14 | public:
15 | explicit LevelDBCursor(leveldb::Iterator* iter)
16 | : iter_(iter) { SeekToFirst(); }
17 | ~LevelDBCursor() { delete iter_; }
18 | virtual void SeekToFirst() { iter_->SeekToFirst(); }
19 | virtual void Next() { iter_->Next(); }
20 | virtual string key() { return iter_->key().ToString(); }
21 | virtual string value() { return iter_->value().ToString(); }
22 | virtual bool valid() { return iter_->Valid(); }
23 |
24 | private:
25 | leveldb::Iterator* iter_;
26 | };
27 |
28 | class LevelDBTransaction : public Transaction {
29 | public:
30 | explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOTNULL(db_); }
31 | virtual void Put(const string& key, const string& value) {
32 | batch_.Put(key, value);
33 | }
34 | virtual void Commit() {
35 | leveldb::Status status = db_->Write(leveldb::WriteOptions(), &batch_);
36 | CHECK(status.ok()) << "Failed to write batch to leveldb "
37 | << std::endl << status.ToString();
38 | }
39 |
40 | private:
41 | leveldb::DB* db_;
42 | leveldb::WriteBatch batch_;
43 |
44 | DISABLE_COPY_AND_ASSIGN(LevelDBTransaction);
45 | };
46 |
47 | class LevelDB : public DB {
48 | public:
49 | LevelDB() : db_(NULL) { }
50 | virtual ~LevelDB() { Close(); }
51 | virtual void Open(const string& source, Mode mode);
52 | virtual void Close() {
53 | if (db_ != NULL) {
54 | delete db_;
55 | db_ = NULL;
56 | }
57 | }
58 | virtual LevelDBCursor* NewCursor() {
59 | return new LevelDBCursor(db_->NewIterator(leveldb::ReadOptions()));
60 | }
61 | virtual LevelDBTransaction* NewTransaction() {
62 | return new LevelDBTransaction(db_);
63 | }
64 |
65 | private:
66 | leveldb::DB* db_;
67 | };
68 |
69 |
70 | } // namespace db
71 | } // namespace caffe
72 |
73 | #endif // CAFFE_UTIL_DB_LEVELDB_HPP
74 |
--------------------------------------------------------------------------------
/include/caffe/util/im2col.hpp:
--------------------------------------------------------------------------------
1 | #ifndef _CAFFE_UTIL_IM2COL_HPP_
2 | #define _CAFFE_UTIL_IM2COL_HPP_
3 |
4 | namespace caffe {
5 |
6 | template
7 | void im2col_cpu(const Dtype* data_im, const int channels,
8 | const int height, const int width, const int kernel_h, const int kernel_w,
9 | const int pad_h, const int pad_w, const int stride_h,
10 | const int stride_w, Dtype* data_col);
11 |
12 | template
13 | void col2im_cpu(const Dtype* data_col, const int channels,
14 | const int height, const int width, const int patch_h, const int patch_w,
15 | const int pad_h, const int pad_w, const int stride_h,
16 | const int stride_w, Dtype* data_im);
17 |
18 | template
19 | void im2col_gpu(const Dtype* data_im, const int channels,
20 | const int height, const int width, const int kernel_h, const int kernel_w,
21 | const int pad_h, const int pad_w, const int stride_h,
22 | const int stride_w, Dtype* data_col);
23 |
24 | template
25 | void col2im_gpu(const Dtype* data_col, const int channels,
26 | const int height, const int width, const int patch_h, const int patch_w,
27 | const int pad_h, const int pad_w, const int stride_h,
28 | const int stride_w, Dtype* data_im);
29 |
30 | } // namespace caffe
31 |
32 | #endif // CAFFE_UTIL_IM2COL_HPP_
33 |
--------------------------------------------------------------------------------
/include/caffe/util/insert_splits.hpp:
--------------------------------------------------------------------------------
1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_
2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_
3 |
4 | #include
5 |
6 | #include "caffe/proto/caffe.pb.h"
7 |
8 | namespace caffe {
9 |
10 | // Copy NetParameters with SplitLayers added to replace any shared bottom
11 | // blobs with unique bottom blobs provided by the SplitLayer.
12 | void InsertSplits(const NetParameter& param, NetParameter* param_split);
13 |
14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name,
15 | const int blob_idx, const int split_count, const float loss_weight,
16 | LayerParameter* split_layer_param);
17 |
18 | string SplitLayerName(const string& layer_name, const string& blob_name,
19 | const int blob_idx);
20 |
21 | string SplitBlobName(const string& layer_name, const string& blob_name,
22 | const int blob_idx, const int split_idx);
23 |
24 | } // namespace caffe
25 |
26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_
27 |
--------------------------------------------------------------------------------
/include/caffe/util/rng.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CAFFE_RNG_CPP_HPP_
2 | #define CAFFE_RNG_CPP_HPP_
3 |
4 | #include
5 | #include
6 |
7 | #include "boost/random/mersenne_twister.hpp"
8 | #include "boost/random/uniform_int.hpp"
9 |
10 | #include "caffe/common.hpp"
11 |
12 | namespace caffe {
13 |
14 | typedef boost::mt19937 rng_t;
15 |
16 | inline rng_t* caffe_rng() {
17 | return static_cast(Caffe::rng_stream().generator());
18 | }
19 |
20 | // Fisher–Yates algorithm
21 | template
22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end,
23 | RandomGenerator* gen) {
24 | typedef typename std::iterator_traits::difference_type
25 | difference_type;
26 | typedef typename boost::uniform_int dist_type;
27 |
28 | difference_type length = std::distance(begin, end);
29 | if (length <= 0) return;
30 |
31 | for (difference_type i = length - 1; i > 0; --i) {
32 | dist_type dist(0, i);
33 | std::iter_swap(begin + i, begin + dist(*gen));
34 | }
35 | }
36 |
37 | template
38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) {
39 | shuffle(begin, end, caffe_rng());
40 | }
41 | } // namespace caffe
42 |
43 | #endif // CAFFE_RNG_HPP_
44 |
--------------------------------------------------------------------------------
/matlab/+caffe/+test/test_solver.m:
--------------------------------------------------------------------------------
1 | classdef test_solver < matlab.unittest.TestCase
2 |
3 | properties
4 | num_output
5 | solver
6 | end
7 |
8 | methods
9 | function self = test_solver()
10 | self.num_output = 13;
11 | model_file = caffe.test.test_net.simple_net_file(self.num_output);
12 | solver_file = tempname();
13 |
14 | fid = fopen(solver_file, 'w');
15 | fprintf(fid, [ ...
16 | 'net: "' model_file '"\n' ...
17 | 'test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n' ...
18 | 'weight_decay: 0.0005 lr_policy: "inv" gamma: 0.0001 power: 0.75\n' ...
19 | 'display: 100 max_iter: 100 snapshot_after_train: false\n' ]);
20 | fclose(fid);
21 |
22 | self.solver = caffe.Solver(solver_file);
23 | % also make sure get_solver runs
24 | caffe.get_solver(solver_file);
25 | caffe.set_mode_cpu();
26 | % fill in valid labels
27 | self.solver.net.blobs('label').set_data(randi( ...
28 | self.num_output - 1, self.solver.net.blobs('label').shape));
29 | self.solver.test_nets(1).blobs('label').set_data(randi( ...
30 | self.num_output - 1, self.solver.test_nets(1).blobs('label').shape));
31 |
32 | delete(solver_file);
33 | delete(model_file);
34 | end
35 | end
36 | methods (Test)
37 | function test_solve(self)
38 | self.verifyEqual(self.solver.iter(), 0)
39 | self.solver.step(30);
40 | self.verifyEqual(self.solver.iter(), 30)
41 | self.solver.solve()
42 | self.verifyEqual(self.solver.iter(), 100)
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/matlab/+caffe/Layer.m:
--------------------------------------------------------------------------------
1 | classdef Layer < handle
2 | % Wrapper class of caffe::Layer in matlab
3 |
4 | properties (Access = private)
5 | hLayer_self
6 | attributes
7 | % attributes fields:
8 | % hBlob_blobs
9 | end
10 | properties (SetAccess = private)
11 | params
12 | end
13 |
14 | methods
15 | function self = Layer(hLayer_layer)
16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle');
17 |
18 | % setup self handle and attributes
19 | self.hLayer_self = hLayer_layer;
20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self);
21 |
22 | % setup weights
23 | self.params = caffe.Blob.empty();
24 | for n = 1:length(self.attributes.hBlob_blobs)
25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n));
26 | end
27 | end
28 | function layer_type = type(self)
29 | layer_type = caffe_('layer_get_type', self.hLayer_self);
30 | end
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/matlab/+caffe/Solver.m:
--------------------------------------------------------------------------------
1 | classdef Solver < handle
2 | % Wrapper class of caffe::SGDSolver in matlab
3 |
4 | properties (Access = private)
5 | hSolver_self
6 | attributes
7 | % attribute fields
8 | % hNet_net
9 | % hNet_test_nets
10 | end
11 | properties (SetAccess = private)
12 | net
13 | test_nets
14 | end
15 |
16 | methods
17 | function self = Solver(varargin)
18 | % decide whether to construct a solver from solver_file or handle
19 | if ~(nargin == 1 && isstruct(varargin{1}))
20 | % construct a solver from solver_file
21 | self = caffe.get_solver(varargin{:});
22 | return
23 | end
24 | % construct a solver from handle
25 | hSolver_solver = varargin{1};
26 | CHECK(is_valid_handle(hSolver_solver), 'invalid Solver handle');
27 |
28 | % setup self handle and attributes
29 | self.hSolver_self = hSolver_solver;
30 | self.attributes = caffe_('solver_get_attr', self.hSolver_self);
31 |
32 | % setup net and test_nets
33 | self.net = caffe.Net(self.attributes.hNet_net);
34 | self.test_nets = caffe.Net.empty();
35 | for n = 1:length(self.attributes.hNet_test_nets)
36 | self.test_nets(n) = caffe.Net(self.attributes.hNet_test_nets(n));
37 | end
38 | end
39 | function iter = iter(self)
40 | iter = caffe_('solver_get_iter', self.hSolver_self);
41 | end
42 | function restore(self, snapshot_filename)
43 | CHECK(ischar(snapshot_filename), 'snapshot_filename must be a string');
44 | CHECK_FILE_EXIST(snapshot_filename);
45 | caffe_('solver_restore', self.hSolver_self, snapshot_filename);
46 | end
47 | function solve(self)
48 | caffe_('solver_solve', self.hSolver_self);
49 | end
50 | function step(self, iters)
51 | CHECK(isscalar(iters) && iters > 0, 'iters must be positive integer');
52 | iters = double(iters);
53 | caffe_('solver_step', self.hSolver_self, iters);
54 | end
55 | end
56 | end
57 |
--------------------------------------------------------------------------------
/matlab/+caffe/get_net.m:
--------------------------------------------------------------------------------
1 | function net = get_net(varargin)
2 | % net = get_net(model_file, phase_name) or
3 | % net = get_net(model_file, weights_file, phase_name)
4 | % Construct a net from model_file, and load weights from weights_file
5 | % phase_name can only be 'train' or 'test'
6 |
7 | CHECK(nargin == 2 || nargin == 3, ['usage: ' ...
8 | 'net = get_net(model_file, phase_name) or ' ...
9 | 'net = get_net(model_file, weights_file, phase_name)']);
10 | if nargin == 3
11 | model_file = varargin{1};
12 | weights_file = varargin{2};
13 | phase_name = varargin{3};
14 | elseif nargin == 2
15 | model_file = varargin{1};
16 | phase_name = varargin{2};
17 | end
18 |
19 | CHECK(ischar(model_file), 'model_file must be a string');
20 | CHECK(ischar(phase_name), 'phase_name must be a string');
21 | CHECK_FILE_EXIST(model_file);
22 | CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ...
23 | sprintf('phase_name can only be %strain%s or %stest%s', ...
24 | char(39), char(39), char(39), char(39)));
25 |
26 | % construct caffe net from model_file
27 | hNet = caffe_('get_net', model_file, phase_name);
28 | net = caffe.Net(hNet);
29 |
30 | % load weights from weights_file
31 | if nargin == 3
32 | CHECK(ischar(weights_file), 'weights_file must be a string');
33 | CHECK_FILE_EXIST(weights_file);
34 | net.copy_from(weights_file);
35 | end
36 |
37 | end
38 |
--------------------------------------------------------------------------------
/matlab/+caffe/get_solver.m:
--------------------------------------------------------------------------------
1 | function solver = get_solver(solver_file)
2 | % solver = get_solver(solver_file)
3 | % Construct a Solver object from solver_file
4 |
5 | CHECK(ischar(solver_file), 'solver_file must be a string');
6 | CHECK_FILE_EXIST(solver_file);
7 | pSolver = caffe_('get_solver', solver_file);
8 | solver = caffe.Solver(pSolver);
9 |
10 | end
11 |
--------------------------------------------------------------------------------
/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat
--------------------------------------------------------------------------------
/matlab/+caffe/io.m:
--------------------------------------------------------------------------------
1 | classdef io
2 | % a class for input and output functions
3 |
4 | methods (Static)
5 | function im_data = load_image(im_file)
6 | % im_data = load_image(im_file)
7 | % load an image from disk into Caffe-supported data format
8 | % switch channels from RGB to BGR, make width the fastest dimension
9 | % and convert to single
10 | % returns im_data in W x H x C. For colored images, C = 3 in BGR
11 | % channels, and for grayscale images, C = 1
12 | CHECK(ischar(im_file), 'im_file must be a string');
13 | CHECK_FILE_EXIST(im_file);
14 | im_data = imread(im_file);
15 | % permute channels from RGB to BGR for colored images
16 | if size(im_data, 3) == 3
17 | im_data = im_data(:, :, [3, 2, 1]);
18 | end
19 | % flip width and height to make width the fastest dimension
20 | im_data = permute(im_data, [2, 1, 3]);
21 | % convert from uint8 to single
22 | im_data = single(im_data);
23 | end
24 | function mean_data = read_mean(mean_proto_file)
25 | % mean_data = read_mean(mean_proto_file)
26 | % read image mean data from binaryproto file
27 | % returns mean_data in W x H x C with BGR channels
28 | CHECK(ischar(mean_proto_file), 'mean_proto_file must be a string');
29 | CHECK_FILE_EXIST(mean_proto_file);
30 | mean_data = caffe_('read_mean', mean_proto_file);
31 | end
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/matlab/+caffe/private/CHECK.m:
--------------------------------------------------------------------------------
1 | function CHECK(expr, error_msg)
2 |
3 | if ~expr
4 | error(error_msg);
5 | end
6 |
7 | end
8 |
--------------------------------------------------------------------------------
/matlab/+caffe/private/CHECK_FILE_EXIST.m:
--------------------------------------------------------------------------------
1 | function CHECK_FILE_EXIST(filename)
2 |
3 | if exist(filename, 'file') == 0
4 | error('%s does not exist', filename);
5 | end
6 |
7 | end
8 |
--------------------------------------------------------------------------------
/matlab/+caffe/private/is_valid_handle.m:
--------------------------------------------------------------------------------
1 | function valid = is_valid_handle(hObj)
2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key')
3 | % Check if a handle is valid (has the right data type and init_key matches)
4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++;
5 |
6 | % a handle is a struct array with the following fields
7 | % (uint64) ptr : the pointer to the C++ object
8 | % (double) init_key : caffe initialization key
9 |
10 | persistent init_key;
11 | if isempty(init_key)
12 | init_key = caffe_('get_init_key');
13 | end
14 |
15 | % is_valid_handle('get_new_init_key') to get new init_key from C++;
16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key')
17 | init_key = caffe_('get_init_key');
18 | return
19 | else
20 | % check whether data types are correct and init_key matches
21 | valid = isstruct(hObj) ...
22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ...
23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ...
24 | && hObj.init_key == init_key;
25 | end
26 |
27 | end
28 |
--------------------------------------------------------------------------------
/matlab/+caffe/reset_all.m:
--------------------------------------------------------------------------------
1 | function reset_all()
2 | % reset_all()
3 | % clear all solvers and stand-alone nets and reset Caffe to initial status
4 |
5 | caffe_('reset');
6 | is_valid_handle('get_new_init_key');
7 |
8 | end
9 |
--------------------------------------------------------------------------------
/matlab/+caffe/run_tests.m:
--------------------------------------------------------------------------------
1 | function results = run_tests()
2 | % results = run_tests()
3 | % run all tests in this caffe matlab wrapper package
4 |
5 | % use CPU for testing
6 | caffe.set_mode_cpu();
7 |
8 | % reset caffe before testing
9 | caffe.reset_all();
10 |
11 | % put all test cases here
12 | results = [...
13 | run(caffe.test.test_net) ...
14 | run(caffe.test.test_solver) ];
15 |
16 | % reset caffe after testing
17 | caffe.reset_all();
18 |
19 | end
20 |
--------------------------------------------------------------------------------
/matlab/+caffe/set_device.m:
--------------------------------------------------------------------------------
1 | function set_device(device_id)
2 | % set_device(device_id)
3 | % set Caffe's GPU device ID
4 |
5 | CHECK(isscalar(device_id) && device_id >= 0, ...
6 | 'device_id must be non-negative integer');
7 | device_id = double(device_id);
8 |
9 | caffe_('set_device', device_id);
10 |
11 | end
12 |
--------------------------------------------------------------------------------
/matlab/+caffe/set_mode_cpu.m:
--------------------------------------------------------------------------------
1 | function set_mode_cpu()
2 | % set_mode_cpu()
3 | % set Caffe to CPU mode
4 |
5 | caffe_('set_mode_cpu');
6 |
7 | end
8 |
--------------------------------------------------------------------------------
/matlab/+caffe/set_mode_gpu.m:
--------------------------------------------------------------------------------
1 | function set_mode_gpu()
2 | % set_mode_gpu()
3 | % set Caffe to GPU mode
4 |
5 | caffe_('set_mode_gpu');
6 |
7 | end
8 |
--------------------------------------------------------------------------------
/matlab/hdf5creation/.gitignore:
--------------------------------------------------------------------------------
1 | *.h5
2 | list.txt
3 |
--------------------------------------------------------------------------------
/models/bvlc_alexnet/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: BVLC AlexNet Model
3 | caffemodel: bvlc_alexnet.caffemodel
4 | caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel
5 | license: unrestricted
6 | sha1: 9116a64c0fbe4459d18f4bb6b56d647b63920377
7 | caffe_commit: 709dc15af4a06bebda027c1eb2b3f3e3375d5077
8 | ---
9 |
10 | This model is a replication of the model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication.
11 |
12 | Differences:
13 | - not training with the relighting data-augmentation;
14 | - initializing non-zero biases to 0.1 instead of 1 (found necessary for training, as initialization to 1 gave flat loss).
15 |
16 | The bundled model is the iteration 360,000 snapshot.
17 | The best validation performance during training was iteration 358,000 with validation accuracy 57.258% and loss 1.83948.
18 | This model obtains a top-1 accuracy 57.1% and a top-5 accuracy 80.2% on the validation set, using just the center crop.
19 | (Using the average of 10 crops, (4 + 1 center) * 2 mirror, should obtain a bit higher accuracy.)
20 |
21 | This model was trained by Evan Shelhamer @shelhamer
22 |
23 | ## License
24 |
25 | This model is released for unrestricted use.
26 |
--------------------------------------------------------------------------------
/models/bvlc_alexnet/solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "models/bvlc_alexnet/train_val.prototxt"
2 | test_iter: 1000
3 | test_interval: 1000
4 | base_lr: 0.01
5 | lr_policy: "step"
6 | gamma: 0.1
7 | stepsize: 100000
8 | display: 20
9 | max_iter: 450000
10 | momentum: 0.9
11 | weight_decay: 0.0005
12 | snapshot: 10000
13 | snapshot_prefix: "models/bvlc_alexnet/caffe_alexnet_train"
14 | solver_mode: GPU
15 |
--------------------------------------------------------------------------------
/models/bvlc_googlenet/quick_solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "models/bvlc_googlenet/train_val.prototxt"
2 | test_iter: 1000
3 | test_interval: 4000
4 | test_initialization: false
5 | display: 40
6 | average_loss: 40
7 | base_lr: 0.01
8 | lr_policy: "poly"
9 | power: 0.5
10 | max_iter: 2400000
11 | momentum: 0.9
12 | weight_decay: 0.0002
13 | snapshot: 40000
14 | snapshot_prefix: "models/bvlc_googlenet/bvlc_googlenet_quick"
15 | solver_mode: GPU
16 |
--------------------------------------------------------------------------------
/models/bvlc_googlenet/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: BVLC GoogleNet Model
3 | caffemodel: bvlc_googlenet.caffemodel
4 | caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel
5 | license: unrestricted
6 | sha1: 405fc5acd08a3bb12de8ee5e23a96bec22f08204
7 | caffe_commit: bc614d1bd91896e3faceaf40b23b72dab47d44f5
8 | ---
9 |
10 | This model is a replication of the model described in the [GoogleNet](http://arxiv.org/abs/1409.4842) publication. We would like to thank Christian Szegedy for all his help in the replication of GoogleNet model.
11 |
12 | Differences:
13 | - not training with the relighting data-augmentation;
14 | - not training with the scale or aspect-ratio data-augmentation;
15 | - uses "xavier" to initialize the weights instead of "gaussian";
16 | - quick_solver.prototxt uses a different learning rate decay policy than the original solver.prototxt, that allows a much faster training (60 epochs vs 250 epochs);
17 |
18 | The bundled model is the iteration 2,400,000 snapshot (60 epochs) using quick_solver.prototxt
19 |
20 | This bundled model obtains a top-1 accuracy 68.7% (31.3% error) and a top-5 accuracy 88.9% (11.1% error) on the validation set, using just the center crop.
21 | (Using the average of 10 crops, (4 + 1 center) * 2 mirror, should obtain a bit higher accuracy.)
22 |
23 | Timings for bvlc_googlenet with cuDNN using batch_size:128 on a K40c:
24 | - Average Forward pass: 562.841 ms.
25 | - Average Backward pass: 1123.84 ms.
26 | - Average Forward-Backward: 1688.8 ms.
27 |
28 | This model was trained by Sergio Guadarrama @sguada
29 |
30 | ## License
31 |
32 | This model is released for unrestricted use.
33 |
--------------------------------------------------------------------------------
/models/bvlc_googlenet/solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "models/bvlc_googlenet/train_val.prototxt"
2 | test_iter: 1000
3 | test_interval: 4000
4 | test_initialization: false
5 | display: 40
6 | average_loss: 40
7 | base_lr: 0.01
8 | lr_policy: "step"
9 | stepsize: 320000
10 | gamma: 0.96
11 | max_iter: 10000000
12 | momentum: 0.9
13 | weight_decay: 0.0002
14 | snapshot: 40000
15 | snapshot_prefix: "models/bvlc_googlenet/bvlc_googlenet"
16 | solver_mode: GPU
17 |
--------------------------------------------------------------------------------
/models/bvlc_reference_caffenet/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: BVLC CaffeNet Model
3 | caffemodel: bvlc_reference_caffenet.caffemodel
4 | caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel
5 | license: unrestricted
6 | sha1: 4c8d77deb20ea792f84eb5e6d0a11ca0a8660a46
7 | caffe_commit: 709dc15af4a06bebda027c1eb2b3f3e3375d5077
8 | ---
9 |
10 | This model is the result of following the Caffe [ImageNet model training instructions](http://caffe.berkeleyvision.org/gathered/examples/imagenet.html).
11 | It is a replication of the model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication with some differences:
12 |
13 | - not training with the relighting data-augmentation;
14 | - the order of pooling and normalization layers is switched (in CaffeNet, pooling is done before normalization).
15 |
16 | This model is snapshot of iteration 310,000.
17 | The best validation performance during training was iteration 313,000 with validation accuracy 57.412% and loss 1.82328.
18 | This model obtains a top-1 accuracy 57.4% and a top-5 accuracy 80.4% on the validation set, using just the center crop.
19 | (Using the average of 10 crops, (4 + 1 center) * 2 mirror, should obtain a bit higher accuracy still.)
20 |
21 | This model was trained by Jeff Donahue @jeffdonahue
22 |
23 | ## License
24 |
25 | This model is released for unrestricted use.
26 |
--------------------------------------------------------------------------------
/models/bvlc_reference_caffenet/solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "models/bvlc_reference_caffenet/train_val.prototxt"
2 | test_iter: 1000
3 | test_interval: 1000
4 | base_lr: 0.01
5 | lr_policy: "step"
6 | gamma: 0.1
7 | stepsize: 100000
8 | display: 20
9 | max_iter: 450000
10 | momentum: 0.9
11 | weight_decay: 0.0005
12 | snapshot: 10000
13 | snapshot_prefix: "models/bvlc_reference_caffenet/caffenet_train"
14 | solver_mode: GPU
15 |
--------------------------------------------------------------------------------
/models/bvlc_reference_rcnn_ilsvrc13/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: BVLC Reference RCNN ILSVRC13 Model
3 | caffemodel: bvlc_reference_rcnn_ilsvrc13.caffemodel
4 | caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_reference_rcnn_ilsvrc13.caffemodel
5 | license: unrestricted
6 | sha1: bdd8abb885819cba5e2fe1eb36235f2319477e64
7 | caffe_commit: a7e397abbda52c0b90323c23ab95bdeabee90a98
8 | ---
9 |
10 | The pure Caffe instantiation of the [R-CNN](https://github.com/rbgirshick/rcnn) model for ILSVRC13 detection.
11 | This model was made by transplanting the R-CNN SVM classifiers into a `fc-rcnn` classification layer, provided here as an off-the-shelf Caffe detector.
12 | Try the [detection example](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/detection.ipynb) to see it in action.
13 |
14 | *N.B. For research purposes, make use of the official R-CNN package and not this example.*
15 |
16 | This model was trained by Ross Girshick @rbgirshick
17 |
18 | ## License
19 |
20 | This model is released for unrestricted use.
21 |
--------------------------------------------------------------------------------
/models/finetune_flickr_style/readme.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Finetuning CaffeNet on Flickr Style
3 | caffemodel: finetune_flickr_style.caffemodel
4 | caffemodel_url: http://dl.caffe.berkeleyvision.org/finetune_flickr_style.caffemodel
5 | license: non-commercial
6 | sha1: b61b5cef7d771b53b0c488e78d35ccadc073e9cf
7 | caffe_commit: 737ea5e936821b5c69f9c3952d72693ae5843370
8 | gist_id: 034c6ac3865563b69e60
9 | ---
10 |
11 | This model is trained exactly as described in `docs/finetune_flickr_style/readme.md`, using all 80000 images.
12 | The final performance:
13 |
14 | I1017 07:36:17.370688 31333 solver.cpp:228] Iteration 100000, loss = 0.757952
15 | I1017 07:36:17.370730 31333 solver.cpp:247] Iteration 100000, Testing net (#0)
16 | I1017 07:36:34.248730 31333 solver.cpp:298] Test net output #0: accuracy = 0.3916
17 |
18 | This model was trained by Sergey Karayev @sergeyk
19 |
20 | ## License
21 |
22 | The Flickr Style dataset contains only URLs to images.
23 | Some of the images may have copyright.
24 | Training a category-recognition model for research/non-commercial use may constitute fair use of this data, but the result should not be used for commercial purposes.
25 |
--------------------------------------------------------------------------------
/models/finetune_flickr_style/solver.prototxt:
--------------------------------------------------------------------------------
1 | net: "models/finetune_flickr_style/train_val.prototxt"
2 | test_iter: 100
3 | test_interval: 1000
4 | # lr for fine-tuning should be lower than when starting from scratch
5 | base_lr: 0.001
6 | lr_policy: "step"
7 | gamma: 0.1
8 | # stepsize should also be lower, as we're closer to being done
9 | stepsize: 20000
10 | display: 20
11 | max_iter: 100000
12 | momentum: 0.9
13 | weight_decay: 0.0005
14 | snapshot: 10000
15 | snapshot_prefix: "models/finetune_flickr_style/finetune_flickr_style"
16 | # uncomment the following to default to CPU mode solving
17 | # solver_mode: CPU
18 |
--------------------------------------------------------------------------------
/python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | if(NOT HAVE_PYTHON)
2 | message(STATUS "Python interface is disabled or not all required dependecies found. Building without it...")
3 | return()
4 | endif()
5 |
6 | include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS})
7 | file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp)
8 |
9 | add_library(pycaffe SHARED ${python_srcs})
10 | target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES})
11 | set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe")
12 | caffe_default_properties(pycaffe)
13 |
14 | if(UNIX OR APPLE)
15 | set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so")
16 | add_custom_command(TARGET pycaffe POST_BUILD
17 | COMMAND ln -sf $ "${__linkname}"
18 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/python/caffe/proto
19 | COMMAND touch ${PROJECT_SOURCE_DIR}/python/caffe/proto/__init__.py
20 | COMMAND cp ${proto_gen_folder}/*.py ${PROJECT_SOURCE_DIR}/python/caffe/proto/
21 | COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/_caffe${CAffe_POSTFIX}.so")
22 | endif()
23 |
24 | # ---[ Install
25 | file(GLOB files1 *.py requirements.txt)
26 | install(FILES ${files1} DESTINATION python)
27 |
28 | file(GLOB files2 caffe/*.py)
29 | install(FILES ${files2} DESTINATION python/caffe)
30 | install(TARGETS pycaffe DESTINATION python/caffe)
31 | install(DIRECTORY caffe/imagenet caffe/proto caffe/test DESTINATION python/caffe)
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/python/caffe/__init__.py:
--------------------------------------------------------------------------------
1 | from .pycaffe import Net, SGDSolver
2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver
3 | from .proto.caffe_pb2 import TRAIN, TEST
4 | from .classifier import Classifier
5 | from .detector import Detector
6 | from . import io
7 | from .net_spec import layers, params, NetSpec, to_proto
8 |
--------------------------------------------------------------------------------
/python/caffe/imagenet/ilsvrc_2012_mean.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ShaharKatz/Caffe-Data-Augmentation/1c9776bebbbcda541707e4ee30fe36b5aada6ff8/python/caffe/imagenet/ilsvrc_2012_mean.npy
--------------------------------------------------------------------------------
/python/caffe/test/test_solver.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import tempfile
3 | import os
4 | import numpy as np
5 | import six
6 |
7 | import caffe
8 | from test_net import simple_net_file
9 |
10 |
11 | class TestSolver(unittest.TestCase):
12 | def setUp(self):
13 | self.num_output = 13
14 | net_f = simple_net_file(self.num_output)
15 | f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
16 | f.write("""net: '""" + net_f + """'
17 | test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
18 | weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
19 | display: 100 max_iter: 100 snapshot_after_train: false""")
20 | f.close()
21 | self.solver = caffe.SGDSolver(f.name)
22 | # also make sure get_solver runs
23 | caffe.get_solver(f.name)
24 | caffe.set_mode_cpu()
25 | # fill in valid labels
26 | self.solver.net.blobs['label'].data[...] = \
27 | np.random.randint(self.num_output,
28 | size=self.solver.net.blobs['label'].data.shape)
29 | self.solver.test_nets[0].blobs['label'].data[...] = \
30 | np.random.randint(self.num_output,
31 | size=self.solver.test_nets[0].blobs['label'].data.shape)
32 | os.remove(f.name)
33 | os.remove(net_f)
34 |
35 | def test_solve(self):
36 | self.assertEqual(self.solver.iter, 0)
37 | self.solver.solve()
38 | self.assertEqual(self.solver.iter, 100)
39 |
40 | def test_net_memory(self):
41 | """Check that nets survive after the solver is destroyed."""
42 |
43 | nets = [self.solver.net] + list(self.solver.test_nets)
44 | self.assertEqual(len(nets), 2)
45 | del self.solver
46 |
47 | total = 0
48 | for net in nets:
49 | for ps in six.itervalues(net.params):
50 | for p in ps:
51 | total += p.data.sum() + p.diff.sum()
52 | for bl in six.itervalues(net.blobs):
53 | total += bl.data.sum() + bl.diff.sum()
54 |
--------------------------------------------------------------------------------
/python/draw_net.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Draw a graph of the net architecture.
4 | """
5 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
6 | from google.protobuf import text_format
7 |
8 | import caffe
9 | import caffe.draw
10 | from caffe.proto import caffe_pb2
11 |
12 |
13 | def parse_args():
14 | """Parse input arguments
15 | """
16 |
17 | parser = ArgumentParser(description=__doc__,
18 | formatter_class=ArgumentDefaultsHelpFormatter)
19 |
20 | parser.add_argument('input_net_proto_file',
21 | help='Input network prototxt file')
22 | parser.add_argument('output_image_file',
23 | help='Output image file')
24 | parser.add_argument('--rankdir',
25 | help=('One of TB (top-bottom, i.e., vertical), '
26 | 'RL (right-left, i.e., horizontal), or another '
27 | 'valid dot option; see '
28 | 'http://www.graphviz.org/doc/info/'
29 | 'attrs.html#k:rankdir'),
30 | default='LR')
31 |
32 | args = parser.parse_args()
33 | return args
34 |
35 |
36 | def main():
37 | args = parse_args()
38 | net = caffe_pb2.NetParameter()
39 | text_format.Merge(open(args.input_net_proto_file).read(), net)
40 | print('Drawing net to %s' % args.output_image_file)
41 | caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir)
42 |
43 |
44 | if __name__ == '__main__':
45 | main()
46 |
--------------------------------------------------------------------------------
/python/requirements.txt:
--------------------------------------------------------------------------------
1 | Cython>=0.19.2
2 | numpy>=1.7.1
3 | scipy>=0.13.2
4 | scikit-image>=0.9.3
5 | matplotlib>=1.3.1
6 | ipython>=3.0.0
7 | h5py>=2.2.0
8 | leveldb>=0.191
9 | networkx>=1.8.1
10 | nose>=1.3.0
11 | pandas>=0.12.0
12 | python-dateutil>=1.4,<2
13 | protobuf>=2.5.0
14 | python-gflags>=2.0
15 | pyyaml>=3.10
16 | Pillow>=2.3.0
17 | six>=1.1.0
--------------------------------------------------------------------------------
/scripts/build_docs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Build documentation for display in web browser.
3 |
4 | PORT=${1:-4000}
5 |
6 | echo "usage: build_docs.sh [port]"
7 |
8 | # Find the docs dir, no matter where the script is called
9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )"
10 | cd $ROOT_DIR
11 |
12 | # Gather docs.
13 | scripts/gather_examples.sh
14 |
15 | # Generate developer docs.
16 | make docs
17 |
18 | # Display docs using web server.
19 | cd docs
20 | jekyll serve -w -s . -d _site --port=$PORT
21 |
--------------------------------------------------------------------------------
/scripts/copy_notebook.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Takes as arguments:
4 | 1. the path to a JSON file (such as an IPython notebook).
5 | 2. the path to output file
6 |
7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true,
8 | then copies the file to output file, appending the 'metadata' property
9 | as YAML front-matter, adding the field 'category' with value 'notebook'.
10 | """
11 | import os
12 | import sys
13 | import json
14 |
15 | filename = sys.argv[1]
16 | output_filename = sys.argv[2]
17 | content = json.load(open(filename))
18 |
19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
20 | yaml_frontmatter = ['---']
21 | for key, val in content['metadata'].iteritems():
22 | if key == 'example_name':
23 | key = 'title'
24 | if val == '':
25 | val = os.path.basename(filename)
26 | yaml_frontmatter.append('{}: {}'.format(key, val))
27 | yaml_frontmatter += ['category: notebook']
28 | yaml_frontmatter += ['original_path: ' + filename]
29 |
30 | with open(output_filename, 'w') as fo:
31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
32 | fo.write(open(filename).read())
33 |
--------------------------------------------------------------------------------
/scripts/deploy_docs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Publish documentation to the gh-pages site.
3 |
4 | # The remote for pushing the docs (defaults to origin).
5 | # This is where you will submit the PR to BVLC:gh-pages from.
6 | REMOTE=${1:-origin}
7 |
8 | echo "Generating docs and pushing to $REMOTE:gh-pages..."
9 | echo "To build and view docs when not on master, simply do 'jekyll serve -s docs'."
10 | echo
11 |
12 | REMOTE_URL=`git config --get remote.${REMOTE}.url`
13 | BRANCH=`git rev-parse --abbrev-ref HEAD`
14 | MSG=`git log --oneline -1`
15 |
16 | if [[ $BRANCH = 'master' ]]; then
17 | # Find the docs dir, no matter where the script is called
18 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
19 | DOCS_SITE_DIR=$DIR/../docs/_site
20 |
21 | # Make sure that docs/_site tracks remote:gh-pages.
22 | # If not, then we make a new repo and check out just that branch.
23 | mkdir -p $DOCS_SITE_DIR
24 | cd $DOCS_SITE_DIR
25 | SITE_REMOTE_URL=`git config --get remote.${REMOTE}.url`
26 | SITE_BRANCH=`git rev-parse --abbrev-ref HEAD`
27 |
28 | echo $SITE_REMOTE_URL
29 | echo $SITE_BRANCH
30 | echo `pwd`
31 |
32 | if [[ ( $SITE_REMOTE_URL = $REMOTE_URL ) && ( $SITE_BRANCH = 'gh-pages' ) ]]; then
33 | echo "Confirmed that docs/_site has same remote as main repo, and is on gh-pages."
34 | else
35 | echo "Checking out $REMOTE:gh-pages into docs/_site (will take a little time)."
36 | git init .
37 | git remote add -t gh-pages -f $REMOTE $REMOTE_URL
38 | git checkout gh-pages
39 | fi
40 |
41 | echo "Building the site into docs/_site, and committing the changes."
42 | jekyll build -s .. -d .
43 | git add --all .
44 | git commit -m "$MSG"
45 | git push $REMOTE gh-pages
46 |
47 | echo "All done!"
48 | cd ../..
49 | else echo "You must run this deployment script from the 'master' branch."
50 | fi
51 |
--------------------------------------------------------------------------------
/scripts/download_model_from_gist.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | GIST=$1
4 | DIRNAME=${2:-./models}
5 |
6 | if [ -z $GIST ]; then
7 | echo "usage: download_model_from_gist.sh "
8 | exit
9 | fi
10 |
11 | GIST_DIR=$(echo $GIST | tr '/' '-')
12 | MODEL_DIR="$DIRNAME/$GIST_DIR"
13 |
14 | if [ -d $MODEL_DIR ]; then
15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!"
16 | exit
17 | fi
18 |
19 | echo "Downloading Caffe model info to $MODEL_DIR ..."
20 | mkdir -p $MODEL_DIR
21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.tar.gz
22 | tar xzf $MODEL_DIR/gist.tar.gz --directory=$MODEL_DIR --strip-components=1
23 | rm $MODEL_DIR/gist.tar.gz
24 | echo "Done"
25 |
--------------------------------------------------------------------------------
/scripts/gather_examples.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Assemble documentation for the project into one directory via symbolic links.
3 |
4 | # Find the docs dir, no matter where the script is called
5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )"
6 | cd $ROOT_DIR
7 |
8 | # Gather docs from examples/**/readme.md
9 | GATHERED_DIR=docs/gathered
10 | rm -r $GATHERED_DIR
11 | mkdir $GATHERED_DIR
12 | for README_FILENAME in $(find examples -iname "readme.md"); do
13 | # Only use file if it is to be included in docs.
14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then
15 | # Make link to readme.md in docs/gathered/.
16 | # Since everything is called readme.md, rename it by its dirname.
17 | README_DIRNAME=`dirname $README_FILENAME`
18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md
19 | mkdir -p `dirname $DOCS_FILENAME`
20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME
21 | fi
22 | done
23 |
24 | # Gather docs from examples/*.ipynb and add YAML front-matter.
25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do
26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME
27 | mkdir -p `dirname $DOCS_FILENAME`
28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME
29 | done
30 |
--------------------------------------------------------------------------------
/scripts/travis/travis_build_and_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script called by Travis to do a CPU-only build of and test Caffe.
3 |
4 | set -e
5 | MAKE="make --jobs=$NUM_THREADS --keep-going"
6 |
7 | if $WITH_CMAKE; then
8 | mkdir build
9 | cd build
10 | CPU_ONLY=" -DCPU_ONLY=ON"
11 | if ! $WITH_CUDA; then
12 | CPU_ONLY=" -DCPU_ONLY=OFF"
13 | fi
14 | PYTHON_ARGS=""
15 | if [ "$PYTHON_VERSION" = "3" ]; then
16 | PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/"
17 | fi
18 | cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" ..
19 | $MAKE
20 | $MAKE pytest
21 | if ! $WITH_CUDA; then
22 | $MAKE runtest
23 | $MAKE lint
24 | fi
25 | $MAKE clean
26 | cd -
27 | else
28 | if ! $WITH_CUDA; then
29 | export CPU_ONLY=1
30 | fi
31 | $MAKE all test pycaffe warn lint || true
32 | if ! $WITH_CUDA; then
33 | $MAKE runtest
34 | fi
35 | $MAKE all
36 | $MAKE test
37 | $MAKE pycaffe
38 | $MAKE pytest
39 | $MAKE warn
40 | if ! $WITH_CUDA; then
41 | $MAKE lint
42 | fi
43 | fi
44 |
--------------------------------------------------------------------------------
/scripts/travis/travis_setup_makefile_config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | mv Makefile.config.example Makefile.config
6 |
7 | if $WITH_CUDA; then
8 | # Only generate compute_50.
9 | GENCODE="-gencode arch=compute_50,code=sm_50"
10 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50"
11 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config
12 | fi
13 |
14 | cat << 'EOF' >> Makefile.config
15 | # Travis' nvcc doesn't like newer boost versions
16 | NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used
17 | ANACONDA_HOME := $(CONDA_DIR)
18 | PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
19 | $(ANACONDA_HOME)/include/python2.7 \
20 | $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include
21 | PYTHON_LIB := $(ANACONDA_HOME)/lib
22 | INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
23 | LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
24 | WITH_PYTHON_LAYER := 1
25 | EOF
26 |
--------------------------------------------------------------------------------
/scripts/upload_model_to_gist.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check for valid directory
4 | DIRNAME=$1
5 | if [ ! -f $DIRNAME/readme.md ]; then
6 | echo "usage: upload_model_to_gist.sh "
7 | echo " /readme.md must exist"
8 | fi
9 | cd $DIRNAME
10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo`
11 |
12 | # Check for gist tool.
13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; }
14 |
15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md`
16 | if [ -z "$NAME" ]; then
17 | echo " /readme.md must contain name field in the front-matter."
18 | fi
19 |
20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md`
21 | if [ -z "$GIST" ]; then
22 | echo "Uploading new Gist"
23 | gist -p -d "$NAME" $FILES
24 | else
25 | echo "Updating existing Gist, id $GIST"
26 | gist -u $GIST -d "$NAME" $FILES
27 | fi
28 |
29 | RESULT=$?
30 | if [ $RESULT -eq 0 ]; then
31 | echo "You've uploaded your model!"
32 | echo "Don't forget to add the gist_id field to your /readme.md now!"
33 | echo "Run the command again after you do that, to make sure the Gist id propagates."
34 | echo ""
35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo"
36 | else
37 | echo "Something went wrong!"
38 | fi
39 |
--------------------------------------------------------------------------------
/src/caffe/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # generate protobuf sources
2 | file(GLOB proto_files proto/*.proto)
3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files})
4 |
5 | # include python files either to force generation
6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python})
7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend!
8 | caffe_default_properties(proto)
9 |
10 | # --[ Caffe library
11 |
12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists
13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR})
14 |
15 | if(HAVE_CUDA)
16 | caffe_cuda_compile(cuda_objs ${cuda})
17 | list(APPEND srcs ${cuda_objs} ${cuda})
18 | endif()
19 |
20 | add_library(caffe ${srcs})
21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS})
22 | caffe_default_properties(caffe)
23 |
24 | # ---[ Tests
25 | add_subdirectory(test)
26 |
27 | # ---[ Install
28 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include)
29 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto)
30 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib)
31 |
32 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py)
33 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py)
34 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto)
35 |
36 |
37 |
--------------------------------------------------------------------------------
/src/caffe/internal_thread.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include "caffe/internal_thread.hpp"
3 |
4 | namespace caffe {
5 |
6 | InternalThread::~InternalThread() {
7 | WaitForInternalThreadToExit();
8 | }
9 |
10 | bool InternalThread::is_started() const {
11 | return thread_.get() != NULL && thread_->joinable();
12 | }
13 |
14 |
15 | bool InternalThread::StartInternalThread() {
16 | if (!WaitForInternalThreadToExit()) {
17 | return false;
18 | }
19 | try {
20 | thread_.reset(
21 | new boost::thread(&InternalThread::InternalThreadEntry, this));
22 | } catch (...) {
23 | return false;
24 | }
25 | return true;
26 | }
27 |
28 | /** Will not return until the internal thread has exited. */
29 | bool InternalThread::WaitForInternalThreadToExit() {
30 | if (is_started()) {
31 | try {
32 | thread_->join();
33 | } catch (...) {
34 | return false;
35 | }
36 | }
37 | return true;
38 | }
39 |
40 | } // namespace caffe
41 |
--------------------------------------------------------------------------------
/src/caffe/layers/absval_layer.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "caffe/layer.hpp"
4 | #include "caffe/neuron_layers.hpp"
5 | #include "caffe/util/math_functions.hpp"
6 |
7 | namespace caffe {
8 |
9 | template
10 | void AbsValLayer::LayerSetUp(const vector*>& bottom,
11 | const vector*>& top) {
12 | NeuronLayer::LayerSetUp(bottom, top);
13 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not "
14 | "allow in-place computation.";
15 | }
16 |
17 | template
18 | void AbsValLayer::Forward_cpu(
19 | const vector*>& bottom, const vector*>& top) {
20 | const int count = top[0]->count();
21 | Dtype* top_data = top[0]->mutable_cpu_data();
22 | caffe_abs(count, bottom[0]->cpu_data(), top_data);
23 | }
24 |
25 | template
26 | void AbsValLayer::Backward_cpu(const vector*>& top,
27 | const vector& propagate_down, const vector*>& bottom) {
28 | const int count = top[0]->count();
29 | const Dtype* top_diff = top[0]->cpu_diff();
30 | if (propagate_down[0]) {
31 | const Dtype* bottom_data = bottom[0]->cpu_data();
32 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
33 | caffe_cpu_sign(count, bottom_data, bottom_diff);
34 | caffe_mul(count, bottom_diff, top_diff, bottom_diff);
35 | }
36 | }
37 |
38 | #ifdef CPU_ONLY
39 | STUB_GPU(AbsValLayer);
40 | #endif
41 |
42 | INSTANTIATE_CLASS(AbsValLayer);
43 | REGISTER_LAYER_CLASS(AbsVal);
44 |
45 | } // namespace caffe
46 |
--------------------------------------------------------------------------------
/src/caffe/layers/absval_layer.cu:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "caffe/layer.hpp"
4 | #include "caffe/util/math_functions.hpp"
5 | #include "caffe/vision_layers.hpp"
6 |
7 | namespace caffe {
8 |
9 | template
10 | void AbsValLayer::Forward_gpu(
11 | const vector*>& bottom, const vector*>& top) {
12 | const int count = top[0]->count();
13 | Dtype* top_data = top[0]->mutable_gpu_data();
14 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data);
15 | }
16 |
17 | template
18 | void AbsValLayer::Backward_gpu(const vector*>& top,
19 | const vector& propagate_down, const vector*>& bottom) {
20 | const int count = top[0]->count();
21 | const Dtype* top_diff = top[0]->gpu_diff();
22 | if (propagate_down[0]) {
23 | const Dtype* bottom_data = bottom[0]->gpu_data();
24 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
25 | caffe_gpu_sign(count, bottom_data, bottom_diff);
26 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
27 | }
28 | }
29 |
30 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer);
31 |
32 |
33 | } // namespace caffe
34 |
--------------------------------------------------------------------------------
/src/caffe/layers/base_data_layer.cu:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "caffe/data_layers.hpp"
4 |
5 | namespace caffe {
6 |
7 | template
8 | void BasePrefetchingDataLayer::Forward_gpu(
9 | const vector*>& bottom, const vector*>& top) {
10 | // First, join the thread
11 | JoinPrefetchThread();
12 | // Reshape to loaded data.
13 | top[0]->ReshapeLike(this->prefetch_data_);
14 | // Copy the data
15 | caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(),
16 | top[0]->mutable_gpu_data());
17 | if (this->output_labels_) {
18 | // Reshape to loaded labels.
19 | top[1]->ReshapeLike(prefetch_label_);
20 | // Copy the labels.
21 | caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(),
22 | top[1]->mutable_gpu_data());
23 | }
24 | // Start a new prefetch thread
25 | CreatePrefetchThread();
26 | }
27 |
28 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
29 |
30 | } // namespace caffe
31 |
--------------------------------------------------------------------------------
/src/caffe/layers/bnll_layer.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "caffe/layer.hpp"
5 | #include "caffe/vision_layers.hpp"
6 |
7 | namespace caffe {
8 |
9 | const float kBNLL_THRESHOLD = 50.;
10 |
11 | template
12 | void BNLLLayer::Forward_cpu(const vector*>& bottom,
13 | const vector*>& top) {
14 | const Dtype* bottom_data = bottom[0]->cpu_data();
15 | Dtype* top_data = top[0]->mutable_cpu_data();
16 | const int count = bottom[0]->count();
17 | for (int i = 0; i < count; ++i) {
18 | top_data[i] = bottom_data[i] > 0 ?
19 | bottom_data[i] + log(1. + exp(-bottom_data[i])) :
20 | log(1. + exp(bottom_data[i]));
21 | }
22 | }
23 |
24 | template
25 | void BNLLLayer::Backward_cpu(const vector*>& top,
26 | const vector& propagate_down,
27 | const vector*>& bottom) {
28 | if (propagate_down[0]) {
29 | const Dtype* bottom_data = bottom[0]->cpu_data();
30 | const Dtype* top_diff = top[0]->cpu_diff();
31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
32 | const int count = bottom[0]->count();
33 | Dtype expval;
34 | for (int i = 0; i < count; ++i) {
35 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD)));
36 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.);
37 | }
38 | }
39 | }
40 |
41 | #ifdef CPU_ONLY
42 | STUB_GPU(BNLLLayer);
43 | #endif
44 |
45 | INSTANTIATE_CLASS(BNLLLayer);
46 | REGISTER_LAYER_CLASS(BNLL);
47 |
48 | } // namespace caffe
49 |
--------------------------------------------------------------------------------
/src/caffe/layers/bnll_layer.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "caffe/layer.hpp"
5 | #include "caffe/vision_layers.hpp"
6 |
7 | namespace caffe {
8 |
9 | const float kBNLL_THRESHOLD = 50.;
10 |
11 | template
12 | __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
13 | CUDA_KERNEL_LOOP(index, n) {
14 | out[index] = in[index] > 0 ?
15 | in[index] + log(1. + exp(-in[index])) :
16 | log(1. + exp(in[index]));
17 | }
18 | }
19 |
20 | template
21 | void BNLLLayer::Forward_gpu(const vector*>& bottom,
22 | const vector*>& top) {
23 | const Dtype* bottom_data = bottom[0]->gpu_data();
24 | Dtype* top_data = top[0]->mutable_gpu_data();
25 | const int count = bottom[0]->count();
26 | // NOLINT_NEXT_LINE(whitespace/operators)
27 | BNLLForward<<>>(
28 | count, bottom_data, top_data);
29 | CUDA_POST_KERNEL_CHECK;
30 | }
31 |
32 | template
33 | __global__ void BNLLBackward(const int n, const Dtype* in_diff,
34 | const Dtype* in_data, Dtype* out_diff) {
35 | CUDA_KERNEL_LOOP(index, n) {
36 | Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
37 | out_diff[index] = in_diff[index] * expval / (expval + 1.);
38 | }
39 | }
40 |
41 | template
42 | void BNLLLayer::Backward_gpu(const vector*>& top,
43 | const vector& propagate_down,
44 | const vector*>& bottom) {
45 | if (propagate_down[0]) {
46 | const Dtype* bottom_data = bottom[0]->gpu_data();
47 | const Dtype* top_diff = top[0]->gpu_diff();
48 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
49 | const int count = bottom[0]->count();
50 | // NOLINT_NEXT_LINE(whitespace/operators)
51 | BNLLBackward<<>>(
52 | count, top_diff, bottom_data, bottom_diff);
53 | CUDA_POST_KERNEL_CHECK;
54 | }
55 | }
56 |
57 | INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
58 |
59 |
60 | } // namespace caffe
61 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_pooling_layer.cpp:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 |
4 | #include "caffe/filler.hpp"
5 | #include "caffe/layer.hpp"
6 | #include "caffe/util/im2col.hpp"
7 | #include "caffe/util/math_functions.hpp"
8 | #include "caffe/vision_layers.hpp"
9 |
10 | namespace caffe {
11 |
12 | template
13 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom,
14 | const vector*>& top) {
15 | PoolingLayer::LayerSetUp(bottom, top);
16 | CUDNN_CHECK(cudnnCreate(&handle_));
17 | cudnn::createTensor4dDesc(&bottom_desc_);
18 | cudnn::createTensor4dDesc(&top_desc_);
19 | cudnn::createPoolingDesc(&pooling_desc_,
20 | this->layer_param_.pooling_param().pool(), &mode_,
21 | this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_,
22 | this->stride_h_, this->stride_w_);
23 | handles_setup_ = true;
24 | }
25 |
26 | template
27 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom,
28 | const vector*>& top) {
29 | PoolingLayer::Reshape(bottom, top);
30 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(),
31 | this->channels_, this->height_, this->width_);
32 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(),
33 | this->channels_, this->pooled_height_, this->pooled_width_);
34 | }
35 |
36 | template
37 | CuDNNPoolingLayer::~CuDNNPoolingLayer() {
38 | // Check that handles have been setup before destroying.
39 | if (!handles_setup_) { return; }
40 |
41 | cudnnDestroyTensorDescriptor(bottom_desc_);
42 | cudnnDestroyTensorDescriptor(top_desc_);
43 | cudnnDestroyPoolingDescriptor(pooling_desc_);
44 | cudnnDestroy(handle_);
45 | }
46 |
47 | INSTANTIATE_CLASS(CuDNNPoolingLayer);
48 |
49 | } // namespace caffe
50 | #endif
51 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_pooling_layer.cu:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 |
4 | #include "caffe/filler.hpp"
5 | #include "caffe/layer.hpp"
6 | #include "caffe/util/im2col.hpp"
7 | #include "caffe/util/math_functions.hpp"
8 | #include "caffe/vision_layers.hpp"
9 |
10 | namespace caffe {
11 |
12 | template
13 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom,
14 | const vector*>& top) {
15 | const Dtype* bottom_data = bottom[0]->gpu_data();
16 | Dtype* top_data = top[0]->mutable_gpu_data();
17 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_,
18 | cudnn::dataType::one,
19 | bottom_desc_, bottom_data,
20 | cudnn::dataType::zero,
21 | top_desc_, top_data));
22 | }
23 |
24 | template
25 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top,
26 | const vector& propagate_down, const vector*>& bottom) {
27 | if (!propagate_down[0]) {
28 | return;
29 | }
30 | const Dtype* top_diff = top[0]->gpu_diff();
31 | const Dtype* top_data = top[0]->gpu_data();
32 | const Dtype* bottom_data = bottom[0]->gpu_data();
33 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
34 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_,
35 | cudnn::dataType::one,
36 | top_desc_, top_data, top_desc_, top_diff,
37 | bottom_desc_, bottom_data,
38 | cudnn::dataType::zero,
39 | bottom_desc_, bottom_diff));
40 | }
41 |
42 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer);
43 |
44 | } // namespace caffe
45 | #endif
46 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_relu_layer.cpp:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 |
5 | #include "caffe/layer.hpp"
6 | #include "caffe/vision_layers.hpp"
7 |
8 | namespace caffe {
9 |
10 | template
11 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom,
12 | const vector*>& top) {
13 | ReLULayer::LayerSetUp(bottom, top);
14 | // initialize cuDNN
15 | CUDNN_CHECK(cudnnCreate(&handle_));
16 | cudnn::createTensor4dDesc(&bottom_desc_);
17 | cudnn::createTensor4dDesc(&top_desc_);
18 | handles_setup_ = true;
19 | }
20 |
21 | template
22 | void CuDNNReLULayer::Reshape(const vector*>& bottom,
23 | const vector*>& top) {
24 | ReLULayer::Reshape(bottom, top);
25 | const int N = bottom[0]->num();
26 | const int K = bottom[0]->channels();
27 | const int H = bottom[0]->height();
28 | const int W = bottom[0]->width();
29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W);
30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W);
31 | }
32 |
33 | template
34 | CuDNNReLULayer::~CuDNNReLULayer() {
35 | // Check that handles have been setup before destroying.
36 | if (!handles_setup_) { return; }
37 |
38 | cudnnDestroyTensorDescriptor(this->bottom_desc_);
39 | cudnnDestroyTensorDescriptor(this->top_desc_);
40 | cudnnDestroy(this->handle_);
41 | }
42 |
43 | INSTANTIATE_CLASS(CuDNNReLULayer);
44 |
45 | } // namespace caffe
46 | #endif
47 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_relu_layer.cu:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 |
5 | #include "caffe/layer.hpp"
6 | #include "caffe/vision_layers.hpp"
7 |
8 | namespace caffe {
9 |
10 | template
11 | void CuDNNReLULayer::Forward_gpu(const vector*>& bottom,
12 | const vector*>& top) {
13 | // Fallback to standard Caffe for leaky ReLU.
14 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) {
15 | return ReLULayer::Forward_gpu(bottom, top);
16 | }
17 |
18 | const Dtype* bottom_data = bottom[0]->gpu_data();
19 | Dtype* top_data = top[0]->mutable_gpu_data();
20 | CUDNN_CHECK(cudnnActivationForward(this->handle_,
21 | CUDNN_ACTIVATION_RELU,
22 | cudnn::dataType::one,
23 | this->bottom_desc_, bottom_data,
24 | cudnn::dataType::zero,
25 | this->top_desc_, top_data));
26 | }
27 |
28 | template
29 | void CuDNNReLULayer::Backward_gpu(const vector*>& top,
30 | const vector& propagate_down,
31 | const vector*>& bottom) {
32 | if (!propagate_down[0]) {
33 | return;
34 | }
35 |
36 | // Fallback to standard Caffe for leaky ReLU.
37 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) {
38 | return ReLULayer::Backward_gpu(top, propagate_down, bottom);
39 | }
40 |
41 | const Dtype* top_data = top[0]->gpu_data();
42 | const Dtype* top_diff = top[0]->gpu_diff();
43 | const Dtype* bottom_data = bottom[0]->gpu_data();
44 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
45 | CUDNN_CHECK(cudnnActivationBackward(this->handle_,
46 | CUDNN_ACTIVATION_RELU,
47 | cudnn::dataType::one,
48 | this->top_desc_, top_data, this->top_desc_, top_diff,
49 | this->bottom_desc_, bottom_data,
50 | cudnn::dataType::zero,
51 | this->bottom_desc_, bottom_diff));
52 | }
53 |
54 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer);
55 |
56 | } // namespace caffe
57 | #endif
58 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_sigmoid_layer.cpp:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 |
5 | #include "caffe/layer.hpp"
6 | #include "caffe/vision_layers.hpp"
7 |
8 | namespace caffe {
9 |
10 | template
11 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom,
12 | const vector*>& top) {
13 | SigmoidLayer::LayerSetUp(bottom, top);
14 | // initialize cuDNN
15 | CUDNN_CHECK(cudnnCreate(&handle_));
16 | cudnn::createTensor4dDesc(&bottom_desc_);
17 | cudnn::createTensor4dDesc(&top_desc_);
18 | handles_setup_ = true;
19 | }
20 |
21 | template
22 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom,
23 | const vector*>& top) {
24 | SigmoidLayer::Reshape(bottom, top);
25 | const int N = bottom[0]->num();
26 | const int K = bottom[0]->channels();
27 | const int H = bottom[0]->height();
28 | const int W = bottom[0]->width();
29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W);
30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W);
31 | }
32 |
33 | template
34 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() {
35 | // Check that handles have been setup before destroying.
36 | if (!handles_setup_) { return; }
37 |
38 | cudnnDestroyTensorDescriptor(this->bottom_desc_);
39 | cudnnDestroyTensorDescriptor(this->top_desc_);
40 | cudnnDestroy(this->handle_);
41 | }
42 |
43 | INSTANTIATE_CLASS(CuDNNSigmoidLayer);
44 |
45 | } // namespace caffe
46 | #endif
47 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_sigmoid_layer.cu:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 |
5 | #include "caffe/layer.hpp"
6 | #include "caffe/vision_layers.hpp"
7 |
8 | namespace caffe {
9 |
10 | template
11 | void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom,
12 | const vector*>& top) {
13 | const Dtype* bottom_data = bottom[0]->gpu_data();
14 | Dtype* top_data = top[0]->mutable_gpu_data();
15 | CUDNN_CHECK(cudnnActivationForward(this->handle_,
16 | CUDNN_ACTIVATION_SIGMOID,
17 | cudnn::dataType::one,
18 | this->bottom_desc_, bottom_data,
19 | cudnn::dataType::zero,
20 | this->top_desc_, top_data));
21 | }
22 |
23 | template
24 | void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top,
25 | const vector& propagate_down,
26 | const vector*>& bottom) {
27 | if (!propagate_down[0]) {
28 | return;
29 | }
30 |
31 | const Dtype* top_data = top[0]->gpu_data();
32 | const Dtype* top_diff = top[0]->gpu_diff();
33 | const Dtype* bottom_data = bottom[0]->gpu_data();
34 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
35 | CUDNN_CHECK(cudnnActivationBackward(this->handle_,
36 | CUDNN_ACTIVATION_SIGMOID,
37 | cudnn::dataType::one,
38 | this->top_desc_, top_data, this->top_desc_, top_diff,
39 | this->bottom_desc_, bottom_data,
40 | cudnn::dataType::zero,
41 | this->bottom_desc_, bottom_diff));
42 | }
43 |
44 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer);
45 |
46 | } // namespace caffe
47 | #endif
48 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_softmax_layer.cpp:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 | #include
5 |
6 | #include "thrust/device_vector.h"
7 |
8 | #include "caffe/layer.hpp"
9 | #include "caffe/util/math_functions.hpp"
10 | #include "caffe/vision_layers.hpp"
11 |
12 | namespace caffe {
13 |
14 | template
15 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom,
16 | const vector*>& top) {
17 | SoftmaxLayer::LayerSetUp(bottom, top);
18 | // Initialize CUDNN.
19 | CUDNN_CHECK(cudnnCreate(&handle_));
20 | cudnn::createTensor4dDesc(&bottom_desc_);
21 | cudnn::createTensor4dDesc(&top_desc_);
22 | handles_setup_ = true;
23 | }
24 |
25 | template
26 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom,
27 | const vector*>& top) {
28 | SoftmaxLayer::Reshape(bottom, top);
29 | int N = this->outer_num_;
30 | int K = bottom[0]->shape(this->softmax_axis_);
31 | int H = this->inner_num_;
32 | int W = 1;
33 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W);
34 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W);
35 | }
36 |
37 | template
38 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() {
39 | // Check that handles have been setup before destroying.
40 | if (!handles_setup_) { return; }
41 |
42 | cudnnDestroyTensorDescriptor(bottom_desc_);
43 | cudnnDestroyTensorDescriptor(top_desc_);
44 | cudnnDestroy(handle_);
45 | }
46 |
47 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer);
48 |
49 | } // namespace caffe
50 | #endif
51 |
--------------------------------------------------------------------------------
/src/caffe/layers/cudnn_softmax_layer.cu:
--------------------------------------------------------------------------------
1 | #ifdef USE_CUDNN
2 | #include
3 | #include
4 | #include
5 |
6 | #include "thrust/device_vector.h"
7 |
8 | #include "caffe/layer.hpp"
9 | #include "caffe/util/math_functions.hpp"
10 | #include "caffe/vision_layers.hpp"
11 |
12 | namespace caffe {
13 |
14 | template
15 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom,
16 | const vector*>& top) {
17 | const Dtype* bottom_data = bottom[0]->gpu_data();
18 | Dtype* top_data = top[0]->mutable_gpu_data();
19 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE,
20 | CUDNN_SOFTMAX_MODE_CHANNEL,
21 | cudnn::dataType::one,
22 | bottom_desc_, bottom_data,
23 | cudnn::dataType::zero,
24 | top_desc_, top_data));
25 | }
26 |
27 | template
28 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top,
29 | const vector& propagate_down, const vector*>& bottom) {
30 | if (propagate_down[0]) {
31 | const Dtype* top_data = top[0]->gpu_data();
32 | const Dtype* top_diff = top[0]->gpu_diff();
33 | const Dtype* bottom_data = bottom[0]->gpu_data();
34 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
35 |
36 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE,
37 | CUDNN_SOFTMAX_MODE_CHANNEL,
38 | cudnn::dataType