├── docs ├── tutorial │ ├── fig │ │ ├── .gitignore │ │ ├── layer.jpg │ │ ├── logreg.jpg │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ └── forward_backward.png │ └── convolution.md ├── CNAME ├── images │ ├── caffeine-icon.png │ └── GitHub-Mark-64px.png ├── _config.yml ├── README.md ├── stylesheets │ └── reset.css ├── install_yum.md └── install_apt.md ├── matlab ├── hdf5creation │ └── .gitignore └── +caffe │ ├── private │ ├── CHECK.m │ ├── CHECK_FILE_EXIST.m │ └── is_valid_handle.m │ ├── imagenet │ └── ilsvrc_2012_mean.mat │ ├── set_mode_cpu.m │ ├── set_mode_gpu.m │ ├── version.m │ ├── reset_all.m │ ├── set_device.m │ ├── get_solver.m │ ├── run_tests.m │ ├── +test │ ├── test_io.m │ └── test_solver.m │ ├── Layer.m │ ├── get_net.m │ └── io.m ├── src ├── caffe │ ├── test │ │ ├── test_data │ │ │ ├── solver_data_list.txt │ │ │ ├── sample_data.h5 │ │ │ ├── sample_data_list.txt │ │ │ ├── solver_data.h5 │ │ │ └── sample_data_2_gzip.h5 │ │ ├── test_protobuf.cpp │ │ ├── test_internal_thread.cpp │ │ ├── test_caffe_main.cpp │ │ ├── CMakeLists.txt │ │ ├── test_solver_factory.cpp │ │ └── test_layer_factory.cpp │ ├── layers │ │ ├── neuron_layer.cpp │ │ ├── silence_layer.cpp │ │ ├── silence_layer.cu │ │ ├── input_layer.cpp │ │ ├── threshold_layer.cu │ │ ├── threshold_layer.cpp │ │ ├── absval_layer.cu │ │ ├── base_data_layer.cu │ │ ├── split_layer.cu │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ ├── tanh_layer.cpp │ │ ├── sigmoid_layer.cpp │ │ ├── exp_layer.cu │ │ ├── cudnn_pooling_layer.cu │ │ ├── cudnn_softmax_layer.cpp │ │ ├── hdf5_output_layer.cu │ │ ├── euclidean_loss_layer.cu │ │ ├── bnll_layer.cpp │ │ ├── absval_layer.cpp │ │ ├── cudnn_lrn_layer.cu │ │ ├── relu_layer.cpp │ │ ├── cudnn_relu_layer.cpp │ │ ├── cudnn_tanh_layer.cpp │ │ ├── cudnn_softmax_layer.cu │ │ ├── elu_layer.cpp │ │ ├── cudnn_sigmoid_layer.cpp │ │ ├── flatten_layer.cpp │ │ ├── cudnn_lcn_layer.cu │ │ ├── cudnn_pooling_layer.cpp │ │ ├── hdf5_data_layer.cu │ │ ├── cudnn_lrn_layer.cpp │ │ └── euclidean_loss_layer.cpp │ ├── layer.cpp │ ├── util │ │ ├── cudnn.cpp │ │ ├── db_leveldb.cpp │ │ └── db.cpp │ ├── solvers │ │ ├── sgd_solver.cu │ │ ├── adagrad_solver.cu │ │ ├── nesterov_solver.cu │ │ ├── rmsprop_solver.cu │ │ ├── adadelta_solver.cu │ │ └── adam_solver.cu │ ├── CMakeLists.txt │ └── internal_thread.cpp └── gtest │ ├── CMakeLists.txt │ └── gtest_main.cc ├── python ├── caffe │ ├── imagenet │ │ └── ilsvrc_2012_mean.npy │ ├── test │ │ ├── test_layer_type_list.py │ │ └── test_io.py │ └── __init__.py ├── requirements.txt ├── draw_net.py └── CMakeLists.txt ├── data ├── convert_labels │ ├── VOCcode │ │ ├── PASemptyrecord.m │ │ ├── VOCreadxml.m │ │ ├── PASreadrecord.m │ │ ├── PASemptyobject.m │ │ ├── PASerrmsg.m │ │ ├── VOClabelcolormap.m │ │ ├── VOCwritexml.m │ │ └── VOCevalcls.m │ └── readLabels.m ├── KITTI-val │ ├── labelmap_voc.prototxt │ ├── extract_car_label.sh │ ├── create_data.sh │ └── create_list.sh └── KITTI-car │ ├── labelmap_voc.prototxt │ ├── extract_car_label.sh │ ├── create_data.sh │ └── create_list.sh ├── tools ├── test_net.cpp ├── device_query.cpp ├── train_net.cpp ├── finetune_net.cpp ├── net_speed_benchmark.cpp ├── CMakeLists.txt ├── extra │ └── launch_resize_and_crop_images.sh ├── upgrade_net_proto_binary.cpp ├── upgrade_net_proto_text.cpp └── upgrade_solver_proto_text.cpp ├── INSTALL.md ├── cmake ├── Templates │ ├── CaffeConfigVersion.cmake.in │ └── caffe_config.h.in ├── Modules │ ├── FindSnappy.cmake │ ├── FindLMDB.cmake │ ├── FindvecLib.cmake │ ├── FindGlog.cmake │ ├── FindGFlags.cmake │ ├── FindOpenBLAS.cmake │ ├── FindAtlas.cmake │ ├── FindLevelDB.cmake │ └── FindMatlabMex.cmake ├── lint.cmake ├── External │ └── glog.cmake └── Misc.cmake ├── scripts ├── build_docs.sh ├── download_model_from_gist.sh ├── copy_notebook.py ├── travis │ ├── travis_setup_makefile_config.sh │ └── travis_build_and_test.sh ├── gather_examples.sh ├── upload_model_to_gist.sh └── deploy_docs.sh ├── include └── caffe │ ├── util │ ├── format.hpp │ ├── signal_handler.h │ ├── insert_splits.hpp │ ├── blocking_queue.hpp │ ├── gpu_util.cuh │ ├── hdf5.hpp │ ├── db.hpp │ ├── rng.hpp │ ├── benchmark.hpp │ └── sampler.hpp │ ├── caffe.hpp │ ├── layers │ ├── neuron_layer.hpp │ ├── data_layer.hpp │ ├── cudnn_lrn_layer.hpp │ ├── cudnn_softmax_layer.hpp │ ├── tile_layer.hpp │ ├── cudnn_relu_layer.hpp │ ├── cudnn_tanh_layer.hpp │ ├── image_data_layer.hpp │ ├── annotated_data_layer.hpp │ ├── cudnn_sigmoid_layer.hpp │ ├── cudnn_lcn_layer.hpp │ ├── input_layer.hpp │ ├── silence_layer.hpp │ ├── split_layer.hpp │ ├── video_data_layer.hpp │ ├── mvn_layer.hpp │ ├── cudnn_pooling_layer.hpp │ ├── softmax_layer.hpp │ ├── eltwise_layer.hpp │ ├── slice_layer.hpp │ ├── dummy_data_layer.hpp │ ├── normalize_layer.hpp │ ├── embed_layer.hpp │ ├── window_data_layer.hpp │ ├── inner_product_layer.hpp │ ├── python_layer.hpp │ └── bias_layer.hpp │ └── internal_thread.hpp ├── CONTRIBUTORS.md ├── caffe.cloc └── .gitignore /docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /matlab/hdf5creation/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | list.txt 3 | -------------------------------------------------------------------------------- /src/caffe/test/test_data/solver_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/solver_data.h5 2 | -------------------------------------------------------------------------------- /docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /docs/tutorial/fig/logreg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/tutorial/fig/logreg.jpg -------------------------------------------------------------------------------- /docs/images/caffeine-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/images/caffeine-icon.png -------------------------------------------------------------------------------- /docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /matlab/+caffe/private/CHECK.m: -------------------------------------------------------------------------------- 1 | function CHECK(expr, error_msg) 2 | 3 | if ~expr 4 | error(error_msg); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /src/caffe/test/test_data/solver_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/src/caffe/test/test_data/solver_data.h5 -------------------------------------------------------------------------------- /matlab/+caffe/imagenet/ilsvrc_2012_mean.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat -------------------------------------------------------------------------------- /python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /matlab/+caffe/set_mode_cpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_cpu() 2 | % set_mode_cpu() 3 | % set Caffe to CPU mode 4 | 5 | caffe_('set_mode_cpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /matlab/+caffe/set_mode_gpu.m: -------------------------------------------------------------------------------- 1 | function set_mode_gpu() 2 | % set_mode_gpu() 3 | % set Caffe to GPU mode 4 | 5 | caffe_('set_mode_gpu'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaohaoChen/rrc_detection/HEAD/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /matlab/+caffe/version.m: -------------------------------------------------------------------------------- 1 | function version_str = version() 2 | % version() 3 | % show Caffe's version. 4 | 5 | version_str = caffe_('version'); 6 | 7 | end 8 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /matlab/+caffe/private/CHECK_FILE_EXIST.m: -------------------------------------------------------------------------------- 1 | function CHECK_FILE_EXIST(filename) 2 | 3 | if exist(filename, 'file') == 0 4 | error('%s does not exist', filename); 5 | end 6 | 7 | end 8 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/PASemptyrecord.m: -------------------------------------------------------------------------------- 1 | function record=PASemptyrecord 2 | record.imgname=''; 3 | record.imgsize=[]; 4 | record.database=''; 5 | record.objects=PASemptyobject; 6 | return -------------------------------------------------------------------------------- /tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /data/KITTI-val/labelmap_voc.prototxt: -------------------------------------------------------------------------------- 1 | item { 2 | name: "none_of_the_above" 3 | label: 0 4 | display_name: "background" 5 | } 6 | item { 7 | name: "Car" 8 | label: 1 9 | display_name: "Car" 10 | } 11 | -------------------------------------------------------------------------------- /src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /matlab/+caffe/reset_all.m: -------------------------------------------------------------------------------- 1 | function reset_all() 2 | % reset_all() 3 | % clear all solvers and stand-alone nets and reset Caffe to initial status 4 | 5 | caffe_('reset'); 6 | is_valid_handle('get_new_init_key'); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the users group in case you need help: 7 | https://groups.google.com/forum/#!forum/caffe-users 8 | -------------------------------------------------------------------------------- /tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/VOCreadxml.m: -------------------------------------------------------------------------------- 1 | function rec = VOCreadxml(path) 2 | 3 | if length(path)>5&&strcmp(path(1:5),'http:') 4 | xml=urlread(path)'; 5 | else 6 | f=fopen(path,'r'); 7 | xml=fread(f,'*char')'; 8 | fclose(f); 9 | end 10 | rec=VOCxml2struct(xml); 11 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/PASreadrecord.m: -------------------------------------------------------------------------------- 1 | function rec = PASreadrecord(path) 2 | 3 | if length(path)<4 4 | error('unable to determine format: %s',path); 5 | end 6 | 7 | if strcmp(path(end-3:end),'.txt') 8 | rec=PASreadrectxt(path); 9 | else 10 | rec=VOCreadrecxml(path); 11 | end 12 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/PASemptyobject.m: -------------------------------------------------------------------------------- 1 | function object=PASemptyobject 2 | object.label=''; 3 | object.orglabel=''; 4 | object.bbox=[]; 5 | object.polygon=[]; 6 | object.mask=''; 7 | object.class=''; 8 | object.view=''; 9 | object.truncated=false; 10 | object.difficult=false; 11 | return -------------------------------------------------------------------------------- /matlab/+caffe/set_device.m: -------------------------------------------------------------------------------- 1 | function set_device(device_id) 2 | % set_device(device_id) 3 | % set Caffe's GPU device ID 4 | 5 | CHECK(isscalar(device_id) && device_id >= 0, ... 6 | 'device_id must be non-negative integer'); 7 | device_id = double(device_id); 8 | 9 | caffe_('set_device', device_id); 10 | 11 | end 12 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/PASerrmsg.m: -------------------------------------------------------------------------------- 1 | function PASerrmsg(PASerr,SYSerr) 2 | fprintf('Pascal Error Message: %s\n',PASerr); 3 | fprintf('System Error Message: %s\n',SYSerr); 4 | k=input('Enter K for keyboard, any other key to continue or ^C to quit ...','s'); 5 | if (~isempty(k)), if (lower(k)=='k'), keyboard; end; end; 6 | fprintf('\n'); 7 | return -------------------------------------------------------------------------------- /python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | matplotlib>=1.3.1 6 | ipython>=3.0.0 7 | h5py>=2.2.0 8 | leveldb>=0.191 9 | networkx>=1.8.1 10 | nose>=1.3.0 11 | pandas>=0.12.0 12 | python-dateutil>=1.4,<2 13 | protobuf>=2.5.0 14 | python-gflags>=2.0 15 | pyyaml>=3.10 16 | Pillow>=2.3.0 17 | six>=1.1.0 -------------------------------------------------------------------------------- /matlab/+caffe/get_solver.m: -------------------------------------------------------------------------------- 1 | function solver = get_solver(solver_file) 2 | % solver = get_solver(solver_file) 3 | % Construct a Solver object from solver_file 4 | 5 | CHECK(ischar(solver_file), 'solver_file must be a string'); 6 | CHECK_FILE_EXIST(solver_file); 7 | pSolver = caffe_('get_solver', solver_file); 8 | solver = caffe.Solver(pSolver); 9 | 10 | end 11 | -------------------------------------------------------------------------------- /data/KITTI-car/labelmap_voc.prototxt: -------------------------------------------------------------------------------- 1 | item { 2 | name: "none_of_the_above" 3 | label: 0 4 | display_name: "background" 5 | } 6 | item { 7 | name: "Car" 8 | label: 1 9 | display_name: "Car" 10 | } 11 | item { 12 | name: "Pedestrian" 13 | label: 2 14 | display_name: "Pedestrian" 15 | } 16 | item { 17 | name: "Cyclist" 18 | label: 3 19 | display_name: "Cyclist" 20 | } 21 | -------------------------------------------------------------------------------- /src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/neuron_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void NeuronLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | top[0]->ReshapeLike(*bottom[0]); 11 | } 12 | 13 | INSTANTIATE_CLASS(NeuronLayer); 14 | 15 | } // namespace caffe 16 | -------------------------------------------------------------------------------- /python/caffe/test/test_layer_type_list.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import caffe 4 | 5 | class TestLayerTypeList(unittest.TestCase): 6 | 7 | def test_standard_types(self): 8 | #removing 'Data' from list 9 | for type_name in ['Data', 'Convolution', 'InnerProduct']: 10 | self.assertIn(type_name, caffe.layer_type_list(), 11 | '%s not in layer_type_list()' % type_name) 12 | -------------------------------------------------------------------------------- /python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver 2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list 3 | from ._caffe import __version__ 4 | from .proto.caffe_pb2 import TRAIN, TEST 5 | from .classifier import Classifier 6 | from .detector import Detector 7 | from . import io 8 | from .net_spec import layers, params, NetSpec, to_proto 9 | -------------------------------------------------------------------------------- /cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /matlab/+caffe/run_tests.m: -------------------------------------------------------------------------------- 1 | function results = run_tests() 2 | % results = run_tests() 3 | % run all tests in this caffe matlab wrapper package 4 | 5 | % use CPU for testing 6 | caffe.set_mode_cpu(); 7 | 8 | % reset caffe before testing 9 | caffe.reset_all(); 10 | 11 | % put all test cases here 12 | results = [... 13 | run(caffe.test.test_net) ... 14 | run(caffe.test.test_solver) ... 15 | run(caffe.test.test_io) ]; 16 | 17 | % reset caffe after testing 18 | caffe.reset_all(); 19 | 20 | end 21 | -------------------------------------------------------------------------------- /scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build_docs.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Generate developer docs. 16 | make docs 17 | 18 | # Display docs using web server. 19 | cd docs 20 | jekyll serve -w -s . -d _site --port=$PORT 21 | -------------------------------------------------------------------------------- /include/caffe/util/format.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_FORMAT_H_ 2 | #define CAFFE_UTIL_FORMAT_H_ 3 | 4 | #include // NOLINT(readability/streams) 5 | #include // NOLINT(readability/streams) 6 | #include 7 | 8 | namespace caffe { 9 | 10 | inline std::string format_int(int n, int numberOfLeadingZeros = 0 ) { 11 | std::ostringstream s; 12 | s << std::setw(numberOfLeadingZeros) << std::setfill('0') << n; 13 | return s.str(); 14 | } 15 | 16 | } 17 | 18 | #endif // CAFFE_UTIL_FORMAT_H_ 19 | -------------------------------------------------------------------------------- /src/caffe/layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void Layer::InitMutex() { 8 | forward_mutex_.reset(new boost::mutex()); 9 | } 10 | 11 | template 12 | void Layer::Lock() { 13 | if (IsShared()) { 14 | forward_mutex_->lock(); 15 | } 16 | } 17 | 18 | template 19 | void Layer::Unlock() { 20 | if (IsShared()) { 21 | forward_mutex_->unlock(); 22 | } 23 | } 24 | 25 | INSTANTIATE_CLASS(Layer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip 22 | unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR 23 | rm $MODEL_DIR/gist.zip 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/parallel.hpp" 14 | #include "caffe/proto/caffe.pb.h" 15 | #include "caffe/solver.hpp" 16 | #include "caffe/solver_factory.hpp" 17 | #include "caffe/util/benchmark.hpp" 18 | #include "caffe/util/io.hpp" 19 | #include "caffe/util/upgrade_proto.hpp" 20 | 21 | #endif // CAFFE_CAFFE_HPP_ 22 | -------------------------------------------------------------------------------- /matlab/+caffe/+test/test_io.m: -------------------------------------------------------------------------------- 1 | classdef test_io < matlab.unittest.TestCase 2 | methods (Test) 3 | function test_read_write_mean(self) 4 | % randomly generate mean data 5 | width = 200; 6 | height = 300; 7 | channels = 3; 8 | mean_data_write = 255 * rand(width, height, channels, 'single'); 9 | % write mean data to binary proto 10 | mean_proto_file = tempname(); 11 | caffe.io.write_mean(mean_data_write, mean_proto_file); 12 | % read mean data from saved binary proto and test whether they are equal 13 | mean_data_read = caffe.io.read_mean(mean_proto_file); 14 | self.verifyEqual(mean_data_write, mean_data_read) 15 | delete(mean_proto_file); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /data/KITTI-car/extract_car_label.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | kitti_path="$HOME/data/KITTI/" #path to kitti dataset 4 | kitti_label_path="$kitti_path/training/label_2" #path to the labels of kitti 5 | kitti_car_label_path="$kitti_path/training/label_2car" #path to the labels of cars in kitti 6 | 7 | cd $kitti_label_path 8 | if [ ! -d $kitti_car_label_path ];then 9 | mkdir $kitti_car_label_path 10 | for file_a in ${kitti_label_path}/*.txt; do 11 | temp_file=`basename $file_a` 12 | cat $temp_file | while read line 13 | do 14 | type_name=`basename ${line%% *}` 15 | if [ "$type_name" = "Car" ] 16 | then 17 | echo $line >> "$kitti_car_label_path/$temp_file" 18 | fi 19 | done 20 | done 21 | fi -------------------------------------------------------------------------------- /data/KITTI-val/extract_car_label.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | kitti_path="/$HOME/data/KITTI/" #path to kitti dataset 4 | kitti_label_path="$kitti_path/training/label_2" #path to the labels of kitti 5 | kitti_car_label_path="$kitti_path/training/label_2car" #path to the labels of cars in kitti 6 | 7 | cd $kitti_label_path 8 | if [ ! -d $kitti_car_label_path ];then 9 | mkdir $kitti_car_label_path 10 | for file_a in ${kitti_label_path}/*.txt; do 11 | temp_file=`basename $file_a` 12 | cat $temp_file | while read line 13 | do 14 | type_name=`basename ${line%% *}` 15 | if [ "$type_name" = "Car" ] 16 | then 17 | echo $line >> "$kitti_car_label_path/$temp_file" 18 | fi 19 | done 20 | done 21 | fi -------------------------------------------------------------------------------- /docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Backward_cpu(const vector*>& top, 10 | const vector& propagate_down, const vector*>& bottom) { 11 | for (int i = 0; i < bottom.size(); ++i) { 12 | if (propagate_down[i]) { 13 | caffe_set(bottom[i]->count(), Dtype(0), 14 | bottom[i]->mutable_cpu_diff()); 15 | } 16 | } 17 | } 18 | 19 | #ifdef CPU_ONLY 20 | STUB_GPU(SilenceLayer); 21 | #endif 22 | 23 | INSTANTIATE_CLASS(SilenceLayer); 24 | REGISTER_LAYER_CLASS(Silence); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /src/caffe/util/cudnn.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include "caffe/util/cudnn.hpp" 3 | 4 | namespace caffe { 5 | namespace cudnn { 6 | 7 | float dataType::oneval = 1.0; 8 | float dataType::zeroval = 0.0; 9 | const void* dataType::one = 10 | static_cast(&dataType::oneval); 11 | const void* dataType::zero = 12 | static_cast(&dataType::zeroval); 13 | 14 | double dataType::oneval = 1.0; 15 | double dataType::zeroval = 0.0; 16 | const void* dataType::one = 17 | static_cast(&dataType::oneval); 18 | const void* dataType::zero = 19 | static_cast(&dataType::zeroval); 20 | 21 | } // namespace cudnn 22 | } // namespace caffe 23 | #endif 24 | -------------------------------------------------------------------------------- /include/caffe/util/signal_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 2 | #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 3 | 4 | #include "caffe/proto/caffe.pb.h" 5 | #include "caffe/solver.hpp" 6 | 7 | namespace caffe { 8 | 9 | class SignalHandler { 10 | public: 11 | // Contructor. Specify what action to take when a signal is received. 12 | SignalHandler(SolverAction::Enum SIGINT_action, 13 | SolverAction::Enum SIGHUP_action); 14 | ~SignalHandler(); 15 | ActionCallback GetActionFunction(); 16 | private: 17 | SolverAction::Enum CheckForSignals() const; 18 | SolverAction::Enum SIGINT_action_; 19 | SolverAction::Enum SIGHUP_action_; 20 | }; 21 | 22 | } // namespace caffe 23 | 24 | #endif // INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ 25 | -------------------------------------------------------------------------------- /src/caffe/util/db_leveldb.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_LEVELDB 2 | #include "caffe/util/db_leveldb.hpp" 3 | 4 | #include 5 | 6 | namespace caffe { namespace db { 7 | 8 | void LevelDB::Open(const string& source, Mode mode) { 9 | leveldb::Options options; 10 | options.block_size = 65536; 11 | options.write_buffer_size = 268435456; 12 | options.max_open_files = 100; 13 | options.error_if_exists = mode == NEW; 14 | options.create_if_missing = mode != READ; 15 | leveldb::Status status = leveldb::DB::Open(options, source, &db_); 16 | CHECK(status.ok()) << "Failed to open leveldb " << source 17 | << std::endl << status.ToString(); 18 | LOG(INFO) << "Opened leveldb " << source; 19 | } 20 | 21 | } // namespace db 22 | } // namespace caffe 23 | #endif // USE_LEVELDB 24 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/VOClabelcolormap.m: -------------------------------------------------------------------------------- 1 | % VOCLABELCOLORMAP Creates a label color map such that adjacent indices have different 2 | % colors. Useful for reading and writing index images which contain large indices, 3 | % by encoding them as RGB images. 4 | % 5 | % CMAP = VOCLABELCOLORMAP(N) creates a label color map with N entries. 6 | function cmap = labelcolormap(N) 7 | 8 | if nargin==0 9 | N=256 10 | end 11 | cmap = zeros(N,3); 12 | for i=1:N 13 | id = i-1; r=0;g=0;b=0; 14 | for j=0:7 15 | r = bitor(r, bitshift(bitget(id,1),7 - j)); 16 | g = bitor(g, bitshift(bitget(id,2),7 - j)); 17 | b = bitor(b, bitshift(bitget(id,3),7 - j)); 18 | id = bitshift(id,-3); 19 | end 20 | cmap(i,1)=r; cmap(i,2)=g; cmap(i,3)=b; 21 | end 22 | cmap = cmap / 255; 23 | -------------------------------------------------------------------------------- /src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/silence_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | // Do nothing. 12 | } 13 | 14 | template 15 | void SilenceLayer::Backward_gpu(const vector*>& top, 16 | const vector& propagate_down, const vector*>& bottom) { 17 | for (int i = 0; i < bottom.size(); ++i) { 18 | if (propagate_down[i]) { 19 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 20 | bottom[i]->mutable_gpu_diff()); 21 | } 22 | } 23 | } 24 | 25 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /src/caffe/solvers/sgd_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | g[i] = h[i] = momentum*h[i] + local_rate*g[i]; 11 | } 12 | } 13 | template 14 | void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 15 | Dtype local_rate) { 16 | SGDUpdate // NOLINT_NEXT_LINE(whitespace/operators) 17 | <<>>( 18 | N, g, h, momentum, local_rate); 19 | CUDA_POST_KERNEL_CHECK; 20 | } 21 | template void sgd_update_gpu(int, float*, float*, float, float); 22 | template void sgd_update_gpu(int, double*, double*, double, double); 23 | 24 | } // namespace caffe 25 | -------------------------------------------------------------------------------- /data/KITTI-car/create_data.sh: -------------------------------------------------------------------------------- 1 | cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) 2 | root_dir=$cur_dir/../.. 3 | 4 | cd $root_dir 5 | 6 | redo=1 7 | data_root_dir="$HOME/data/KITTI/" 8 | dataset_name="KITTI-car" 9 | mapfile="$root_dir/data/$dataset_name/labelmap_voc.prototxt" 10 | anno_type="detection" 11 | db="lmdb" 12 | min_dim=0 13 | max_dim=0 14 | width=0 15 | height=0 16 | 17 | extra_cmd="--encode-type=png --encoded" 18 | if [ $redo ] 19 | then 20 | extra_cmd="$extra_cmd --redo" 21 | fi 22 | for subset in training testing 23 | do 24 | python $root_dir/scripts/create_annoset.py --anno-type=$anno_type --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$subset.txt $data_root_dir/$db/$dataset_name"_"$subset"_"$db $root_dir/data/$dataset_name/lmdb 25 | done 26 | -------------------------------------------------------------------------------- /data/KITTI-val/create_data.sh: -------------------------------------------------------------------------------- 1 | cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) 2 | root_dir=$cur_dir/../.. 3 | 4 | cd $root_dir 5 | 6 | redo=1 7 | data_root_dir="$HOME/data/KITTI/" 8 | dataset_name="KITTI-val" 9 | mapfile="$root_dir/data/$dataset_name/labelmap_voc.prototxt" 10 | anno_type="detection" 11 | db="lmdb" 12 | min_dim=0 13 | max_dim=0 14 | width=0 15 | height=0 16 | 17 | extra_cmd="--encode-type=png --encoded" 18 | if [ $redo ] 19 | then 20 | extra_cmd="$extra_cmd --redo" 21 | fi 22 | for subset in training testing 23 | do 24 | python $root_dir/scripts/create_annoset.py --anno-type=$anno_type --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$subset.txt $data_root_dir/$db/$dataset_name"_"$subset"_"$db $root_dir/data/$dataset_name/lmdb 25 | done 26 | -------------------------------------------------------------------------------- /tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /src/caffe/solvers/adagrad_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, 8 | Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = h[i] + gi*gi; 12 | g[i] = local_rate * gi / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, 17 | Dtype local_rate) { 18 | AdaGradUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void adagrad_update_gpu(int, float*, float*, float, float); 24 | template void adagrad_update_gpu(int, double*, double*, double, double); 25 | 26 | } // namespace caffe 27 | -------------------------------------------------------------------------------- /src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /src/caffe/layers/input_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/input_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void InputLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | const int num_top = top.size(); 11 | const InputParameter& param = this->layer_param_.input_param(); 12 | const int num_shape = param.shape_size(); 13 | CHECK(num_shape == 0 || num_shape == 1 || num_shape == num_top) 14 | << "Must specify 'shape' once, once per top blob, or not at all: " 15 | << num_top << " tops vs. " << num_shape << " shapes."; 16 | if (num_shape > 0) { 17 | for (int i = 0; i < num_top; ++i) { 18 | const int shape_index = (param.shape_size() == 1) ? 0 : i; 19 | top[i]->Reshape(param.shape(shape_index)); 20 | } 21 | } 22 | } 23 | 24 | INSTANTIATE_CLASS(InputLayer); 25 | REGISTER_LAYER_CLASS(Input); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /src/caffe/solvers/nesterov_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void NesterovUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype momentum, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float hi = h[i]; 11 | float hi_new = h[i] = momentum * hi + local_rate * g[i]; 12 | g[i] = (1+momentum) * hi_new - momentum * hi; 13 | } 14 | } 15 | template 16 | void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, 17 | Dtype local_rate) { 18 | NesterovUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, momentum, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void nesterov_update_gpu(int, float*, float*, float, float); 24 | template void nesterov_update_gpu(int, double*, double*, double, 25 | double); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /matlab/+caffe/Layer.m: -------------------------------------------------------------------------------- 1 | classdef Layer < handle 2 | % Wrapper class of caffe::Layer in matlab 3 | 4 | properties (Access = private) 5 | hLayer_self 6 | attributes 7 | % attributes fields: 8 | % hBlob_blobs 9 | end 10 | properties (SetAccess = private) 11 | params 12 | end 13 | 14 | methods 15 | function self = Layer(hLayer_layer) 16 | CHECK(is_valid_handle(hLayer_layer), 'invalid Layer handle'); 17 | 18 | % setup self handle and attributes 19 | self.hLayer_self = hLayer_layer; 20 | self.attributes = caffe_('layer_get_attr', self.hLayer_self); 21 | 22 | % setup weights 23 | self.params = caffe.Blob.empty(); 24 | for n = 1:length(self.attributes.hBlob_blobs) 25 | self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n)); 26 | end 27 | end 28 | function layer_type = type(self) 29 | layer_type = caffe_('layer_get_type', self.hLayer_self); 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* NVIDA Cuda */ 8 | #cmakedefine HAVE_CUDA 9 | 10 | /* NVIDA cuDNN */ 11 | #cmakedefine HAVE_CUDNN 12 | #cmakedefine USE_CUDNN 13 | 14 | /* NVIDA cuDNN */ 15 | #cmakedefine CPU_ONLY 16 | 17 | /* Test device */ 18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 19 | 20 | /* Temporary (TODO: remove) */ 21 | #if 1 22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 24 | #define CMAKE_EXT ".gen.cmake" 25 | #else 26 | #define CMAKE_SOURCE_DIR "src/" 27 | #define EXAMPLES_SOURCE_DIR "examples/" 28 | #define CMAKE_EXT "" 29 | #endif 30 | 31 | /* Matlab */ 32 | #cmakedefine HAVE_MATLAB 33 | 34 | /* IO libraries */ 35 | #cmakedefine USE_OPENCV 36 | #cmakedefine USE_LEVELDB 37 | #cmakedefine USE_LMDB 38 | #cmakedefine ALLOW_LMDB_NOLOCK 39 | -------------------------------------------------------------------------------- /src/caffe/util/db.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/util/db.hpp" 2 | #include "caffe/util/db_leveldb.hpp" 3 | #include "caffe/util/db_lmdb.hpp" 4 | 5 | #include 6 | 7 | namespace caffe { namespace db { 8 | 9 | DB* GetDB(DataParameter::DB backend) { 10 | switch (backend) { 11 | #ifdef USE_LEVELDB 12 | case DataParameter_DB_LEVELDB: 13 | return new LevelDB(); 14 | #endif // USE_LEVELDB 15 | #ifdef USE_LMDB 16 | case DataParameter_DB_LMDB: 17 | return new LMDB(); 18 | #endif // USE_LMDB 19 | default: 20 | LOG(FATAL) << "Unknown database backend"; 21 | return NULL; 22 | } 23 | } 24 | 25 | DB* GetDB(const string& backend) { 26 | #ifdef USE_LEVELDB 27 | if (backend == "leveldb") { 28 | return new LevelDB(); 29 | } 30 | #endif // USE_LEVELDB 31 | #ifdef USE_LMDB 32 | if (backend == "lmdb") { 33 | return new LMDB(); 34 | } 35 | #endif // USE_LMDB 36 | LOG(FATAL) << "Unknown database backend"; 37 | return NULL; 38 | } 39 | 40 | } // namespace db 41 | } // namespace caffe 42 | -------------------------------------------------------------------------------- /src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | __global__ void ThresholdForward(const int n, const Dtype threshold, 9 | const Dtype* in, Dtype* out) { 10 | CUDA_KERNEL_LOOP(index, n) { 11 | out[index] = in[index] > threshold ? 1 : 0; 12 | } 13 | } 14 | 15 | template 16 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | const int count = bottom[0]->count(); 21 | // NOLINT_NEXT_LINE(whitespace/operators) 22 | ThresholdForward<<>>( 23 | count, threshold_, bottom_data, top_data); 24 | CUDA_POST_KERNEL_CHECK; 25 | } 26 | 27 | 28 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 29 | 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /src/caffe/solvers/rmsprop_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h, 8 | Dtype rms_decay, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi; 12 | g[i] = local_rate * g[i] / (sqrt(hi) + delta); 13 | } 14 | } 15 | template 16 | void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay, 17 | Dtype delta, Dtype local_rate) { 18 | RMSPropUpdate // NOLINT_NEXT_LINE(whitespace/operators) 19 | <<>>( 20 | N, g, h, rms_decay, delta, local_rate); 21 | CUDA_POST_KERNEL_CHECK; 22 | } 23 | template void rmsprop_update_gpu(int, float*, float*, float, float, 24 | float); 25 | template void rmsprop_update_gpu(int, double*, double*, double, double, 26 | double); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /include/caffe/layers/neuron_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_NEURON_LAYER_HPP_ 2 | #define CAFFE_NEURON_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief An interface for layers that take one blob as input (@f$ x @f$) 14 | * and produce one equally-sized blob as output (@f$ y @f$), where 15 | * each element of the output depends only on the corresponding input 16 | * element. 17 | */ 18 | template 19 | class NeuronLayer : public Layer { 20 | public: 21 | explicit NeuronLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | }; 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_NEURON_LAYER_HPP_ 33 | -------------------------------------------------------------------------------- /src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/threshold_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 9 | const vector*>& top) { 10 | NeuronLayer::LayerSetUp(bottom, top); 11 | threshold_ = this->layer_param_.threshold_param().threshold(); 12 | } 13 | 14 | template 15 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->cpu_data(); 18 | Dtype* top_data = top[0]->mutable_cpu_data(); 19 | const int count = bottom[0]->count(); 20 | for (int i = 0; i < count; ++i) { 21 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 22 | } 23 | } 24 | 25 | #ifdef CPU_ONLY 26 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 27 | #endif 28 | 29 | INSTANTIATE_CLASS(ThresholdLayer); 30 | REGISTER_LAYER_CLASS(Threshold); 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /matlab/+caffe/private/is_valid_handle.m: -------------------------------------------------------------------------------- 1 | function valid = is_valid_handle(hObj) 2 | % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') 3 | % Check if a handle is valid (has the right data type and init_key matches) 4 | % Use is_valid_handle('get_new_init_key') to get new init_key from C++; 5 | 6 | % a handle is a struct array with the following fields 7 | % (uint64) ptr : the pointer to the C++ object 8 | % (double) init_key : caffe initialization key 9 | 10 | persistent init_key; 11 | if isempty(init_key) 12 | init_key = caffe_('get_init_key'); 13 | end 14 | 15 | % is_valid_handle('get_new_init_key') to get new init_key from C++; 16 | if ischar(hObj) && strcmp(hObj, 'get_new_init_key') 17 | init_key = caffe_('get_init_key'); 18 | return 19 | else 20 | % check whether data types are correct and init_key matches 21 | valid = isstruct(hObj) ... 22 | && isscalar(hObj.ptr) && isa(hObj.ptr, 'uint64') ... 23 | && isscalar(hObj.init_key) && isa(hObj.init_key, 'double') ... 24 | && hObj.init_key == init_key; 25 | end 26 | 27 | end 28 | -------------------------------------------------------------------------------- /include/caffe/util/blocking_queue.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 2 | #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | namespace caffe { 8 | 9 | template 10 | class BlockingQueue { 11 | public: 12 | explicit BlockingQueue(); 13 | 14 | void push(const T& t); 15 | 16 | bool try_pop(T* t); 17 | 18 | // This logs a message if the threads needs to be blocked 19 | // useful for detecting e.g. when data feeding is too slow 20 | T pop(const string& log_on_wait = ""); 21 | 22 | bool try_peek(T* t); 23 | 24 | // Return element without removing it 25 | T peek(); 26 | 27 | size_t size() const; 28 | 29 | protected: 30 | /** 31 | Move synchronization fields out instead of including boost/thread.hpp 32 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. Also fails on 33 | Linux CUDA 7.0.18. 34 | */ 35 | class sync; 36 | 37 | std::queue queue_; 38 | shared_ptr sync_; 39 | 40 | DISABLE_COPY_AND_ASSIGN(BlockingQueue); 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::Forward_gpu( 10 | const vector*>& bottom, const vector*>& top) { 11 | const int count = top[0]->count(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 14 | } 15 | 16 | template 17 | void AbsValLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | const int count = top[0]->count(); 20 | const Dtype* top_diff = top[0]->gpu_diff(); 21 | if (propagate_down[0]) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 24 | caffe_gpu_sign(count, bottom_data, bottom_diff); 25 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 26 | } 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 30 | 31 | 32 | } // namespace caffe 33 | -------------------------------------------------------------------------------- /src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/base_data_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); 11 | // Reshape to loaded data. 12 | top[0]->ReshapeLike(batch->data_); 13 | // Copy the data 14 | caffe_copy(batch->data_.count(), batch->data_.gpu_data(), 15 | top[0]->mutable_gpu_data()); 16 | if (this->output_labels_) { 17 | // Reshape to loaded labels. 18 | top[1]->ReshapeLike(batch->label_); 19 | // Copy the labels. 20 | caffe_copy(batch->label_.count(), batch->label_.gpu_data(), 21 | top[1]->mutable_gpu_data()); 22 | } 23 | // Ensure the copy is synchronous wrt the host, so that the next batch isn't 24 | // copied in meanwhile. 25 | CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); 26 | prefetch_free_.push(batch); 27 | } 28 | 29 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); 30 | 31 | } // namespace caffe 32 | -------------------------------------------------------------------------------- /src/caffe/solvers/adadelta_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdaDeltaUpdate(int N, Dtype* g, Dtype* h, Dtype* h2, 8 | Dtype momentum, Dtype delta, Dtype local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float hi = h[i] = momentum * h[i] + (1-momentum) * gi * gi; 12 | gi = gi * sqrt((h2[i] + delta) / (hi + delta)); 13 | h2[i] = momentum * h2[i] + (1-momentum) * gi * gi; 14 | g[i] = local_rate * gi; 15 | } 16 | } 17 | template 18 | void adadelta_update_gpu(int N, Dtype* g, Dtype* h, Dtype* h2, Dtype momentum, 19 | Dtype delta, Dtype local_rate) { 20 | AdaDeltaUpdate // NOLINT_NEXT_LINE(whitespace/operators) 21 | <<>>( 22 | N, g, h, h2, momentum, delta, local_rate); 23 | CUDA_POST_KERNEL_CHECK; 24 | } 25 | template void adadelta_update_gpu(int , float*, float*, float*, 26 | float, float, float); 27 | template void adadelta_update_gpu(int, double*, double*, double*, 28 | double, double, double); 29 | 30 | } // namespace caffe 31 | -------------------------------------------------------------------------------- /src/caffe/solvers/adam_solver.cu: -------------------------------------------------------------------------------- 1 | #include "caffe/util/math_functions.hpp" 2 | 3 | 4 | namespace caffe { 5 | 6 | template 7 | __global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v, 8 | Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 9 | CUDA_KERNEL_LOOP(i, N) { 10 | float gi = g[i]; 11 | float mi = m[i] = m[i]*beta1 + gi*(1-beta1); 12 | float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); 13 | g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); 14 | } 15 | } 16 | template 17 | void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, 18 | Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) { 19 | AdamUpdate // NOLINT_NEXT_LINE(whitespace/operators) 20 | <<>>( 21 | N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); 22 | CUDA_POST_KERNEL_CHECK; 23 | } 24 | template void adam_update_gpu(int, float*, float*, float*, 25 | float, float, float, float); 26 | template void adam_update_gpu(int, double*, double*, double*, 27 | double, double, double, double); 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /scripts/travis/travis_setup_makefile_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mv Makefile.config.example Makefile.config 6 | 7 | if $WITH_CUDA; then 8 | # Only generate compute_50. 9 | GENCODE="-gencode arch=compute_50,code=sm_50" 10 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" 11 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config 12 | fi 13 | 14 | # Remove IO library settings from Makefile.config 15 | # to avoid conflicts with CI configuration 16 | sed -i -e '/USE_LMDB/d' Makefile.config 17 | sed -i -e '/USE_LEVELDB/d' Makefile.config 18 | sed -i -e '/USE_OPENCV/d' Makefile.config 19 | 20 | cat << 'EOF' >> Makefile.config 21 | # Travis' nvcc doesn't like newer boost versions 22 | NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used 23 | ANACONDA_HOME := $(CONDA_DIR) 24 | PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ 25 | $(ANACONDA_HOME)/include/python2.7 \ 26 | $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include 27 | PYTHON_LIB := $(ANACONDA_HOME)/lib 28 | INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include 29 | LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib 30 | WITH_PYTHON_LAYER := 1 31 | EOF 32 | -------------------------------------------------------------------------------- /cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /include/caffe/util/gpu_util.cuh: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_GPU_UTIL_H_ 2 | #define CAFFE_UTIL_GPU_UTIL_H_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); 8 | 9 | template <> 10 | inline __device__ 11 | float caffe_gpu_atomic_add(const float val, float* address) { 12 | return atomicAdd(address, val); 13 | } 14 | 15 | // double atomicAdd implementation taken from: 16 | // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG 17 | template <> 18 | inline __device__ 19 | double caffe_gpu_atomic_add(const double val, double* address) { 20 | unsigned long long int* address_as_ull = // NOLINT(runtime/int) 21 | // NOLINT_NEXT_LINE(runtime/int) 22 | reinterpret_cast(address); 23 | unsigned long long int old = *address_as_ull; // NOLINT(runtime/int) 24 | unsigned long long int assumed; // NOLINT(runtime/int) 25 | do { 26 | assumed = old; 27 | old = atomicCAS(address_as_ull, assumed, 28 | __double_as_longlong(val + __longlong_as_double(assumed))); 29 | } while (assumed != old); 30 | return __longlong_as_double(old); 31 | } 32 | 33 | } // namespace caffe 34 | 35 | #endif // CAFFE_UTIL_GPU_UTIL_H_ 36 | -------------------------------------------------------------------------------- /include/caffe/util/hdf5.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_HDF5_H_ 2 | #define CAFFE_UTIL_HDF5_H_ 3 | 4 | #include 5 | 6 | #include "hdf5.h" 7 | #include "hdf5_hl.h" 8 | 9 | #include "caffe/blob.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void hdf5_load_nd_dataset_helper( 15 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 16 | Blob* blob); 17 | 18 | template 19 | void hdf5_load_nd_dataset( 20 | hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, 21 | Blob* blob); 22 | 23 | template 24 | void hdf5_save_nd_dataset( 25 | const hid_t file_id, const string& dataset_name, const Blob& blob, 26 | bool write_diff = false); 27 | 28 | int hdf5_load_int(hid_t loc_id, const string& dataset_name); 29 | void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); 30 | string hdf5_load_string(hid_t loc_id, const string& dataset_name); 31 | void hdf5_save_string(hid_t loc_id, const string& dataset_name, 32 | const string& s); 33 | 34 | int hdf5_get_num_links(hid_t loc_id); 35 | string hdf5_get_name_by_idx(hid_t loc_id, int idx); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_HDF5_H_ 40 | -------------------------------------------------------------------------------- /scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/split_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SplitLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | for (int i = 0; i < top.size(); ++i) { 12 | top[i]->ShareData(*bottom[0]); 13 | } 14 | } 15 | 16 | template 17 | void SplitLayer::Backward_gpu(const vector*>& top, 18 | const vector& propagate_down, const vector*>& bottom) { 19 | if (!propagate_down[0]) { return; } 20 | if (top.size() == 1) { 21 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 22 | return; 23 | } 24 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 25 | bottom[0]->mutable_gpu_diff()); 26 | // Add remaining top blob diffs. 27 | for (int i = 2; i < top.size(); ++i) { 28 | const Dtype* top_diff = top[i]->gpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 10 | const vector*>& top, const vector& propagate_down, 11 | const vector*>& bottom) { 12 | if (propagate_down[1]) { 13 | LOG(FATAL) << this->type() 14 | << " Layer cannot backpropagate to label inputs."; 15 | } 16 | if (propagate_down[0]) { 17 | // First, compute the diff 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 21 | const Dtype* target = bottom[1]->gpu_data(); 22 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 23 | caffe_copy(count, sigmoid_output_data, bottom_diff); 24 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 25 | // Scale down gradient 26 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 27 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /include/caffe/util/db.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_DB_HPP 2 | #define CAFFE_UTIL_DB_HPP 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/proto/caffe.pb.h" 8 | 9 | namespace caffe { namespace db { 10 | 11 | enum Mode { READ, WRITE, NEW }; 12 | 13 | class Cursor { 14 | public: 15 | Cursor() { } 16 | virtual ~Cursor() { } 17 | virtual void SeekToFirst() = 0; 18 | virtual void Next() = 0; 19 | virtual string key() = 0; 20 | virtual string value() = 0; 21 | virtual bool valid() = 0; 22 | 23 | DISABLE_COPY_AND_ASSIGN(Cursor); 24 | }; 25 | 26 | class Transaction { 27 | public: 28 | Transaction() { } 29 | virtual ~Transaction() { } 30 | virtual void Put(const string& key, const string& value) = 0; 31 | virtual void Commit() = 0; 32 | 33 | DISABLE_COPY_AND_ASSIGN(Transaction); 34 | }; 35 | 36 | class DB { 37 | public: 38 | DB() { } 39 | virtual ~DB() { } 40 | virtual void Open(const string& source, Mode mode) = 0; 41 | virtual void Close() = 0; 42 | virtual Cursor* NewCursor() = 0; 43 | virtual Transaction* NewTransaction() = 0; 44 | 45 | DISABLE_COPY_AND_ASSIGN(DB); 46 | }; 47 | 48 | DB* GetDB(DataParameter::DB backend); 49 | DB* GetDB(const string& backend); 50 | 51 | } // namespace db 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_UTIL_DB_HPP 55 | -------------------------------------------------------------------------------- /include/caffe/layers/data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/data_reader.hpp" 8 | #include "caffe/data_transformer.hpp" 9 | #include "caffe/internal_thread.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layers/base_data_layer.hpp" 12 | #include "caffe/proto/caffe.pb.h" 13 | #include "caffe/util/db.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class DataLayer : public BasePrefetchingDataLayer { 19 | public: 20 | explicit DataLayer(const LayerParameter& param); 21 | virtual ~DataLayer(); 22 | virtual void DataLayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | // DataLayer uses DataReader instead for sharing for parallelism 25 | virtual inline bool ShareInParallel() const { return false; } 26 | virtual inline const char* type() const { return "Data"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 0; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | virtual inline int MaxTopBlobs() const { return 2; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | }; 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_DATA_LAYER_HPP_ 40 | -------------------------------------------------------------------------------- /include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/VOCwritexml.m: -------------------------------------------------------------------------------- 1 | function VOCwritexml(rec, path) 2 | 3 | fid=fopen(path,'w'); 4 | writexml(fid,rec,0); 5 | fclose(fid); 6 | 7 | function xml = writexml(fid,rec,depth) 8 | 9 | fn=fieldnames(rec); 10 | for i=1:length(fn) 11 | f=rec.(fn{i}); 12 | if ~isempty(f) 13 | if isstruct(f) 14 | for j=1:length(f) 15 | fprintf(fid,'%s',repmat(char(9),1,depth)); 16 | fprintf(fid,'<%s>\n',fn{i}); 17 | writexml(fid,rec.(fn{i})(j),depth+1); 18 | fprintf(fid,'%s',repmat(char(9),1,depth)); 19 | fprintf(fid,'\n',fn{i}); 20 | end 21 | else 22 | if ~iscell(f) 23 | f={f}; 24 | end 25 | for j=1:length(f) 26 | fprintf(fid,'%s',repmat(char(9),1,depth)); 27 | fprintf(fid,'<%s>',fn{i}); 28 | if ischar(f{j}) 29 | fprintf(fid,'%s',f{j}); 30 | elseif isnumeric(f{j})&&numel(f{j})==1 31 | fprintf(fid,'%s',num2str(f{j})); 32 | else 33 | error('unsupported type'); 34 | end 35 | fprintf(fid,'\n',fn{i}); 36 | end 37 | end 38 | end 39 | end 40 | 41 | -------------------------------------------------------------------------------- /include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /matlab/+caffe/get_net.m: -------------------------------------------------------------------------------- 1 | function net = get_net(varargin) 2 | % net = get_net(model_file, phase_name) or 3 | % net = get_net(model_file, weights_file, phase_name) 4 | % Construct a net from model_file, and load weights from weights_file 5 | % phase_name can only be 'train' or 'test' 6 | 7 | CHECK(nargin == 2 || nargin == 3, ['usage: ' ... 8 | 'net = get_net(model_file, phase_name) or ' ... 9 | 'net = get_net(model_file, weights_file, phase_name)']); 10 | if nargin == 3 11 | model_file = varargin{1}; 12 | weights_file = varargin{2}; 13 | phase_name = varargin{3}; 14 | elseif nargin == 2 15 | model_file = varargin{1}; 16 | phase_name = varargin{2}; 17 | end 18 | 19 | CHECK(ischar(model_file), 'model_file must be a string'); 20 | CHECK(ischar(phase_name), 'phase_name must be a string'); 21 | CHECK_FILE_EXIST(model_file); 22 | CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ... 23 | sprintf('phase_name can only be %strain%s or %stest%s', ... 24 | char(39), char(39), char(39), char(39))); 25 | 26 | % construct caffe net from model_file 27 | hNet = caffe_('get_net', model_file, phase_name); 28 | net = caffe.Net(hNet); 29 | 30 | % load weights from weights_file 31 | if nargin == 3 32 | CHECK(ischar(weights_file), 'weights_file must be a string'); 33 | CHECK_FILE_EXIST(weights_file); 34 | net.copy_from(weights_file); 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | 6 | #include "caffe/layers/tanh_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void TanHLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = tanh(bottom_data[i]); 18 | } 19 | } 20 | 21 | template 22 | void TanHLayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* top_data = top[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype tanhx; 31 | for (int i = 0; i < count; ++i) { 32 | tanhx = top_data[i]; 33 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 34 | } 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(TanHLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(TanHLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /data/convert_labels/readLabels.m: -------------------------------------------------------------------------------- 1 | function objects = readLabels(label_dir,img_idx) 2 | 3 | % parse input file 4 | fid = fopen(sprintf('%s/%06d.txt',label_dir,img_idx),'r'); 5 | C = textscan(fid,'%s %f %d %f %f %f %f %f %f %f %f %f %f %f %f','delimiter', ' '); 6 | fclose(fid); 7 | 8 | % for all objects do 9 | objects = []; 10 | for o = 1:numel(C{1}) 11 | 12 | % extract label, truncation, occlusion 13 | lbl = C{1}(o); % for converting: cell -> string 14 | objects(o).type = lbl{1}; % 'Car', 'Pedestrian', ... 15 | objects(o).truncation = C{2}(o); % truncated pixel ratio ([0..1]) 16 | objects(o).occlusion = C{3}(o); % 0 = visible, 1 = partly occluded, 2 = fully occluded, 3 = unknown 17 | objects(o).alpha = C{4}(o); % object observation angle ([-pi..pi]) 18 | 19 | % extract 2D bounding box in 0-based coordinates 20 | objects(o).x1 = C{5}(o); % left 21 | objects(o).y1 = C{6}(o); % top 22 | objects(o).x2 = C{7}(o); % right 23 | objects(o).y2 = C{8}(o); % bottom 24 | 25 | % extract 3D bounding box information 26 | objects(o).h = C{9} (o); % box width 27 | objects(o).w = C{10}(o); % box height 28 | objects(o).l = C{11}(o); % box length 29 | objects(o).t(1) = C{12}(o); % location (x) 30 | objects(o).t(2) = C{13}(o); % location (y) 31 | objects(o).t(3) = C{14}(o); % location (z) 32 | objects(o).ry = C{15}(o); % yaw angle 33 | end 34 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_lrn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LRN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | template 16 | class CuDNNLRNLayer : public LRNLayer { 17 | public: 18 | explicit CuDNNLRNLayer(const LayerParameter& param) 19 | : LRNLayer(param), handles_setup_(false) {} 20 | virtual void LayerSetUp(const vector*>& bottom, 21 | const vector*>& top); 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual ~CuDNNLRNLayer(); 25 | 26 | protected: 27 | virtual void Forward_gpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom); 31 | 32 | bool handles_setup_; 33 | cudnnHandle_t handle_; 34 | cudnnLRNDescriptor_t norm_desc_; 35 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 36 | 37 | int size_; 38 | Dtype alpha_, beta_, k_; 39 | }; 40 | #endif 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_CUDNN_LRN_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | #include "caffe/test/test_caffe_main.hpp" 8 | 9 | namespace caffe { 10 | 11 | 12 | class InternalThreadTest : public ::testing::Test {}; 13 | 14 | TEST_F(InternalThreadTest, TestStartAndExit) { 15 | InternalThread thread; 16 | EXPECT_FALSE(thread.is_started()); 17 | thread.StartInternalThread(); 18 | EXPECT_TRUE(thread.is_started()); 19 | thread.StopInternalThread(); 20 | EXPECT_FALSE(thread.is_started()); 21 | } 22 | 23 | class TestThreadA : public InternalThread { 24 | void InternalThreadEntry() { 25 | EXPECT_EQ(4244559767, caffe_rng_rand()); 26 | } 27 | }; 28 | 29 | class TestThreadB : public InternalThread { 30 | void InternalThreadEntry() { 31 | EXPECT_EQ(1726478280, caffe_rng_rand()); 32 | } 33 | }; 34 | 35 | TEST_F(InternalThreadTest, TestRandomSeed) { 36 | TestThreadA t1; 37 | Caffe::set_random_seed(9658361); 38 | t1.StartInternalThread(); 39 | t1.StopInternalThread(); 40 | 41 | TestThreadA t2; 42 | Caffe::set_random_seed(9658361); 43 | t2.StartInternalThread(); 44 | t2.StopInternalThread(); 45 | 46 | TestThreadB t3; 47 | Caffe::set_random_seed(3435563); 48 | t3.StartInternalThread(); 49 | t3.StopInternalThread(); 50 | } 51 | 52 | } // namespace caffe 53 | 54 | -------------------------------------------------------------------------------- /src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | 4 | #include "caffe/caffe.hpp" 5 | #include "caffe/test/test_caffe_main.hpp" 6 | 7 | namespace caffe { 8 | #ifndef CPU_ONLY 9 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 10 | #endif 11 | } 12 | 13 | #ifndef CPU_ONLY 14 | using caffe::CAFFE_TEST_CUDA_PROP; 15 | #endif 16 | 17 | int main(int argc, char** argv) { 18 | ::testing::InitGoogleTest(&argc, argv); 19 | caffe::GlobalInit(&argc, &argv); 20 | #ifndef CPU_ONLY 21 | // Before starting testing, let's first print out a few cuda defice info. 22 | int device; 23 | cudaGetDeviceCount(&device); 24 | cout << "Cuda number of devices: " << device << endl; 25 | if (argc > 1) { 26 | // Use the given device 27 | device = atoi(argv[1]); 28 | cudaSetDevice(device); 29 | cout << "Setting to use device " << device << endl; 30 | } else if (CUDA_TEST_DEVICE >= 0) { 31 | // Use the device assigned in build configuration; but with a lower priority 32 | device = CUDA_TEST_DEVICE; 33 | } 34 | cudaGetDevice(&device); 35 | cout << "Current device id: " << device << endl; 36 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 37 | cout << "Current device name: " << CAFFE_TEST_CUDA_PROP.name << endl; 38 | #endif 39 | // invoke the test. 40 | return RUN_ALL_TESTS(); 41 | } 42 | -------------------------------------------------------------------------------- /data/KITTI-car/create_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | root_dir="$HOME/data/KITTI/" #your path to kitti dataset 4 | bash_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | label_dir="label_2car" #path to labels of car 6 | name="image_2" 7 | for dataset in training testing 8 | do 9 | dst_file=$bash_dir/$dataset.txt 10 | if [ -f $dst_file ] 11 | then 12 | rm -f $dst_file 13 | fi 14 | 15 | echo "Create list for $name $dataset..." 16 | dataset_file=./main/$dataset.txt 17 | 18 | img_file=$bash_dir/$dataset"_img.txt" 19 | cp $dataset_file $img_file 20 | sed -i "s/^/training\/$name\//g" $img_file 21 | sed -i "s/$/.png/g" $img_file 22 | 23 | label_file=$bash_dir/$dataset"_label.txt" 24 | cp $dataset_file $label_file 25 | sed -i "s/^/training\/$label_dir\/xml\//g" $label_file 26 | sed -i "s/$/.xml/g" $label_file 27 | 28 | paste -d' ' $img_file $label_file >> $dst_file 29 | 30 | rm -f $label_file 31 | rm -f $img_file 32 | 33 | # Generate image name and size infomation. 34 | if [ "$dataset" == "testing" ] 35 | then 36 | $bash_dir/../../build/tools/get_image_size $root_dir $dst_file $bash_dir/$dataset"_name_size.txt" 37 | fi 38 | 39 | # Shuffle trainval file. 40 | if [ $dataset == "training" ] 41 | then 42 | rand_file=$dst_file.random 43 | cat $dst_file | perl -MList::Util=shuffle -e 'print shuffle();' > $rand_file 44 | mv $rand_file $dst_file 45 | fi 46 | done 47 | -------------------------------------------------------------------------------- /data/KITTI-val/create_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | root_dir="$HOME/data/KITTI/" #your path to kitti dataset 4 | bash_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | label_dir="label_2car" #path to labels of car 6 | name="image_2" 7 | for dataset in training testing 8 | do 9 | dst_file=$bash_dir/$dataset.txt 10 | if [ -f $dst_file ] 11 | then 12 | rm -f $dst_file 13 | fi 14 | 15 | echo "Create list for $name $dataset..." 16 | dataset_file=./main/$dataset.txt 17 | 18 | img_file=$bash_dir/$dataset"_img.txt" 19 | cp $dataset_file $img_file 20 | sed -i "s/^/training\/$name\//g" $img_file 21 | sed -i "s/$/.png/g" $img_file 22 | 23 | label_file=$bash_dir/$dataset"_label.txt" 24 | cp $dataset_file $label_file 25 | sed -i "s/^/training\/$label_dir\/xml\//g" $label_file 26 | sed -i "s/$/.xml/g" $label_file 27 | 28 | paste -d' ' $img_file $label_file >> $dst_file 29 | 30 | rm -f $label_file 31 | rm -f $img_file 32 | 33 | # Generate image name and size infomation. 34 | if [ "$dataset" == "testing" ] 35 | then 36 | $bash_dir/../../build/tools/get_image_size $root_dir $dst_file $bash_dir/$dataset"_name_size.txt" 37 | fi 38 | 39 | # Shuffle trainval file. 40 | if [ $dataset == "training" ] 41 | then 42 | rand_file=$dst_file.random 43 | cat $dst_file | perl -MList::Util=shuffle -e 'print shuffle();' > $rand_file 44 | mv $rand_file $dst_file 45 | fi 46 | done 47 | -------------------------------------------------------------------------------- /src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | inline Dtype sigmoid(Dtype x) { 10 | return 1. / (1. + exp(-x)); 11 | } 12 | 13 | template 14 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 15 | const vector*>& top) { 16 | const Dtype* bottom_data = bottom[0]->cpu_data(); 17 | Dtype* top_data = top[0]->mutable_cpu_data(); 18 | const int count = bottom[0]->count(); 19 | for (int i = 0; i < count; ++i) { 20 | top_data[i] = sigmoid(bottom_data[i]); 21 | } 22 | } 23 | 24 | template 25 | void SigmoidLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* top_data = top[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | for (int i = 0; i < count; ++i) { 34 | const Dtype sigmoid_x = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(SigmoidLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(SigmoidLayer); 45 | 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /cmake/Modules/FindvecLib.cmake: -------------------------------------------------------------------------------- 1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework 2 | # 3 | # The following are set after configuration is done: 4 | # VECLIB_FOUND 5 | # vecLib_INCLUDE_DIR 6 | # vecLib_LINKER_LIBS 7 | 8 | 9 | if(NOT APPLE) 10 | return() 11 | endif() 12 | 13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") 14 | 15 | find_path(vecLib_INCLUDE_DIR vecLib.h 16 | DOC "vecLib include directory" 17 | PATHS /System/Library/${__veclib_include_suffix} 18 | /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} 19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) 20 | 21 | include(FindPackageHandleStandardArgs) 22 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) 23 | 24 | if(VECLIB_FOUND) 25 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") 26 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") 27 | message(STATUS "Found standalone vecLib.framework") 28 | else() 29 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") 30 | message(STATUS "Found vecLib as part of Accelerate.framework") 31 | endif() 32 | 33 | mark_as_advanced(vecLib_INCLUDE_DIR) 34 | endif() 35 | -------------------------------------------------------------------------------- /src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | set_target_properties(caffe PROPERTIES 24 | VERSION ${CAFFE_TARGET_VERSION} 25 | SOVERSION ${CAFFE_TARGET_SOVERSION} 26 | ) 27 | 28 | # ---[ Tests 29 | add_subdirectory(test) 30 | 31 | # ---[ Install 32 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 33 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 34 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 35 | 36 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 37 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 38 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 39 | 40 | 41 | -------------------------------------------------------------------------------- /src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/exp_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ExpLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const int count = bottom[0]->count(); 12 | const Dtype* bottom_data = bottom[0]->gpu_data(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | if (inner_scale_ == Dtype(1)) { 15 | caffe_gpu_exp(count, bottom_data, top_data); 16 | } else { 17 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 18 | caffe_gpu_exp(count, top_data, top_data); 19 | } 20 | if (outer_scale_ != Dtype(1)) { 21 | caffe_gpu_scal(count, outer_scale_, top_data); 22 | } 23 | } 24 | 25 | template 26 | void ExpLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | if (!propagate_down[0]) { return; } 29 | const int count = bottom[0]->count(); 30 | const Dtype* top_data = top[0]->gpu_data(); 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 33 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 34 | if (inner_scale_ != Dtype(1)) { 35 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 36 | } 37 | } 38 | 39 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 40 | 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/softmax_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /** 16 | * @brief cuDNN implementation of SoftmaxLayer. 17 | * Fallback to SoftmaxLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNSoftmaxLayer : public SoftmaxLayer { 21 | public: 22 | explicit CuDNNSoftmaxLayer(const LayerParameter& param) 23 | : SoftmaxLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSoftmaxLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | }; 41 | #endif 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /include/caffe/layers/tile_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_TILE_LAYER_HPP_ 2 | #define CAFFE_TILE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Copy a Blob along specified dimensions. 14 | */ 15 | template 16 | class TileLayer : public Layer { 17 | public: 18 | explicit TileLayer(const LayerParameter& param) 19 | : Layer(param) {} 20 | virtual void Reshape(const vector*>& bottom, 21 | const vector*>& top); 22 | 23 | virtual inline const char* type() const { return "Tile"; } 24 | virtual inline int ExactNumBottomBlobs() const { return 1; } 25 | virtual inline int ExactNumTopBlobs() const { return 1; } 26 | 27 | protected: 28 | virtual void Forward_cpu(const vector*>& bottom, 29 | const vector*>& top); 30 | virtual void Forward_gpu(const vector*>& bottom, 31 | const vector*>& top); 32 | 33 | virtual void Backward_cpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | virtual void Backward_gpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | 38 | unsigned int axis_, tiles_, outer_dim_, inner_dim_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_TILE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_relu_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ 2 | #define CAFFE_CUDNN_RELU_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/relu_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of ReLULayer. 18 | */ 19 | template 20 | class CuDNNReLULayer : public ReLULayer { 21 | public: 22 | explicit CuDNNReLULayer(const LayerParameter& param) 23 | : ReLULayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNReLULayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_RELU_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_tanh_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ 2 | #define CAFFE_CUDNN_TANH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/tanh_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of TanHLayer. 18 | */ 19 | template 20 | class CuDNNTanHLayer : public TanHLayer { 21 | public: 22 | explicit CuDNNTanHLayer(const LayerParameter& param) 23 | : TanHLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNTanHLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_TANH_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /include/caffe/layers/image_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ 2 | #define CAFFE_IMAGE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from image files. 19 | * 20 | * TODO(dox): thorough documentation for Forward and proto params. 21 | */ 22 | template 23 | class ImageDataLayer : public BasePrefetchingDataLayer { 24 | public: 25 | explicit ImageDataLayer(const LayerParameter& param) 26 | : BasePrefetchingDataLayer(param) {} 27 | virtual ~ImageDataLayer(); 28 | virtual void DataLayerSetUp(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "ImageData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int ExactNumTopBlobs() const { return 2; } 34 | 35 | protected: 36 | shared_ptr prefetch_rng_; 37 | virtual void ShuffleImages(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | vector > lines_; 41 | int lines_id_; 42 | }; 43 | 44 | 45 | } // namespace caffe 46 | 47 | #endif // CAFFE_IMAGE_DATA_LAYER_HPP_ 48 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 14 | cudnn::dataType::one, 15 | bottom_desc_, bottom_data, 16 | cudnn::dataType::zero, 17 | top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, const vector*>& bottom) { 23 | if (!propagate_down[0]) { 24 | return; 25 | } 26 | const Dtype* top_diff = top[0]->gpu_diff(); 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* bottom_data = bottom[0]->gpu_data(); 29 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 30 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 31 | cudnn::dataType::one, 32 | top_desc_, top_data, top_desc_, top_diff, 33 | bottom_desc_, bottom_data, 34 | cudnn::dataType::zero, 35 | bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /scripts/travis/travis_build_and_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script called by Travis to build and test Caffe. 3 | # Travis CI tests are CPU-only for lack of compatible hardware. 4 | 5 | set -e 6 | MAKE="make --jobs=$NUM_THREADS --keep-going" 7 | 8 | if $WITH_CMAKE; then 9 | mkdir build 10 | cd build 11 | CPU_ONLY=" -DCPU_ONLY=ON" 12 | if ! $WITH_CUDA; then 13 | CPU_ONLY=" -DCPU_ONLY=OFF" 14 | fi 15 | PYTHON_ARGS="" 16 | if [ "$PYTHON_VERSION" = "3" ]; then 17 | PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/" 18 | fi 19 | if $WITH_IO; then 20 | IO_ARGS="-DUSE_OPENCV=ON -DUSE_LMDB=ON -DUSE_LEVELDB=ON" 21 | else 22 | IO_ARGS="-DUSE_OPENCV=OFF -DUSE_LMDB=OFF -DUSE_LEVELDB=OFF" 23 | fi 24 | cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" $IO_ARGS .. 25 | $MAKE 26 | $MAKE pytest 27 | if ! $WITH_CUDA; then 28 | $MAKE runtest 29 | $MAKE lint 30 | fi 31 | $MAKE clean 32 | cd - 33 | else 34 | if ! $WITH_CUDA; then 35 | export CPU_ONLY=1 36 | fi 37 | if $WITH_IO; then 38 | export USE_LMDB=1 39 | export USE_LEVELDB=1 40 | export USE_OPENCV=1 41 | fi 42 | $MAKE all test pycaffe warn lint || true 43 | if ! $WITH_CUDA; then 44 | $MAKE runtest 45 | fi 46 | $MAKE all 47 | $MAKE test 48 | $MAKE pycaffe 49 | $MAKE pytest 50 | $MAKE warn 51 | if ! $WITH_CUDA; then 52 | $MAKE lint 53 | fi 54 | fi 55 | -------------------------------------------------------------------------------- /include/caffe/layers/annotated_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DATA_LAYER_HPP_ 2 | #define CAFFE_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/data_reader.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | #include "caffe/util/db.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class AnnotatedDataLayer : public BasePrefetchingDataLayer { 20 | public: 21 | explicit AnnotatedDataLayer(const LayerParameter& param); 22 | virtual ~AnnotatedDataLayer(); 23 | virtual void DataLayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // AnnotatedDataLayer uses DataReader instead for sharing for parallelism 26 | virtual inline bool ShareInParallel() const { return false; } 27 | virtual inline const char* type() const { return "AnnotatedData"; } 28 | virtual inline int ExactNumBottomBlobs() const { return 0; } 29 | virtual inline int MinTopBlobs() const { return 1; } 30 | 31 | protected: 32 | virtual void load_batch(Batch* batch); 33 | 34 | DataReader reader_; 35 | bool has_anno_type_; 36 | AnnotatedDatum_AnnotationType anno_type_; 37 | vector batch_samplers_; 38 | string label_map_file_; 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_DATA_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | SoftmaxLayer::LayerSetUp(bottom, top); 14 | // Initialize CUDNN. 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SoftmaxLayer::Reshape(bottom, top); 25 | int N = this->outer_num_; 26 | int K = bottom[0]->shape(this->softmax_axis_); 27 | int H = this->inner_num_; 28 | int W = 1; 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(bottom_desc_); 39 | cudnnDestroyTensorDescriptor(top_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/layers/hdf5_output_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | CHECK_GE(bottom.size(), 2); 14 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 15 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 16 | bottom[0]->height(), bottom[0]->width()); 17 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 18 | bottom[1]->height(), bottom[1]->width()); 19 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 20 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 21 | 22 | for (int i = 0; i < bottom[0]->num(); ++i) { 23 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 24 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 25 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 26 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 27 | } 28 | SaveBlobs(); 29 | } 30 | 31 | template 32 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | return; 35 | } 36 | 37 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); 38 | 39 | } // namespace caffe 40 | -------------------------------------------------------------------------------- /src/caffe/layers/euclidean_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/euclidean_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | int count = bottom[0]->count(); 12 | caffe_gpu_sub( 13 | count, 14 | bottom[0]->gpu_data(), 15 | bottom[1]->gpu_data(), 16 | diff_.mutable_gpu_data()); 17 | Dtype dot; 18 | caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); 19 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 20 | top[0]->mutable_cpu_data()[0] = loss; 21 | } 22 | 23 | template 24 | void EuclideanLossLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | for (int i = 0; i < 2; ++i) { 27 | if (propagate_down[i]) { 28 | const Dtype sign = (i == 0) ? 1 : -1; 29 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 30 | caffe_gpu_axpby( 31 | bottom[i]->count(), // count 32 | alpha, // alpha 33 | diff_.gpu_data(), // a 34 | Dtype(0), // beta 35 | bottom[i]->mutable_gpu_diff()); // b 36 | } 37 | } 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer); 41 | 42 | } // namespace caffe 43 | -------------------------------------------------------------------------------- /src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/bnll_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | const float kBNLL_THRESHOLD = 50.; 9 | 10 | template 11 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->cpu_data(); 14 | Dtype* top_data = top[0]->mutable_cpu_data(); 15 | const int count = bottom[0]->count(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = bottom_data[i] > 0 ? 18 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 19 | log(1. + exp(bottom_data[i])); 20 | } 21 | } 22 | 23 | template 24 | void BNLLLayer::Backward_cpu(const vector*>& top, 25 | const vector& propagate_down, 26 | const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* bottom_data = bottom[0]->cpu_data(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 31 | const int count = bottom[0]->count(); 32 | Dtype expval; 33 | for (int i = 0; i < count; ++i) { 34 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 35 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(BNLLLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(BNLLLayer); 45 | REGISTER_LAYER_CLASS(BNLL); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /data/convert_labels/VOCcode/VOCevalcls.m: -------------------------------------------------------------------------------- 1 | function [rec,prec,ap] = VOCevalcls(VOCopts,id,cls,draw) 2 | 3 | % load test set 4 | [gtids,gt]=textread(sprintf(VOCopts.clsimgsetpath,cls,VOCopts.testset),'%s %d'); 5 | 6 | % load results 7 | [ids,confidence]=textread(sprintf(VOCopts.clsrespath,id,cls),'%s %f'); 8 | 9 | % map results to ground truth images 10 | out=ones(size(gt))*-inf; 11 | tic; 12 | for i=1:length(ids) 13 | % display progress 14 | if toc>1 15 | fprintf('%s: pr: %d/%d\n',cls,i,length(ids)); 16 | drawnow; 17 | tic; 18 | end 19 | 20 | % find ground truth image 21 | j=strmatch(ids{i},gtids,'exact'); 22 | if isempty(j) 23 | error('unrecognized image "%s"',ids{i}); 24 | elseif length(j)>1 25 | error('multiple image "%s"',ids{i}); 26 | else 27 | out(j)=confidence(i); 28 | end 29 | end 30 | 31 | % compute precision/recall 32 | 33 | [so,si]=sort(-out); 34 | tp=gt(si)>0; 35 | fp=gt(si)<0; 36 | 37 | fp=cumsum(fp); 38 | tp=cumsum(tp); 39 | rec=tp/sum(gt>0); 40 | prec=tp./(fp+tp); 41 | 42 | % compute average precision 43 | 44 | ap=0; 45 | for t=0:0.1:1 46 | p=max(prec(rec>=t)); 47 | if isempty(p) 48 | p=0; 49 | end 50 | ap=ap+p/11; 51 | end 52 | 53 | if draw 54 | % plot precision/recall 55 | plot(rec,prec,'-'); 56 | grid; 57 | xlabel 'recall' 58 | ylabel 'precision' 59 | title(sprintf('class: %s, subset: %s, AP = %.3f',cls,VOCopts.testset,ap)); 60 | end 61 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_sigmoid_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 2 | #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | #include "caffe/layers/sigmoid_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | /** 17 | * @brief CuDNN acceleration of SigmoidLayer. 18 | */ 19 | template 20 | class CuDNNSigmoidLayer : public SigmoidLayer { 21 | public: 22 | explicit CuDNNSigmoidLayer(const LayerParameter& param) 23 | : SigmoidLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNSigmoidLayer(); 29 | 30 | protected: 31 | virtual void Forward_gpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | bool handles_setup_; 37 | cudnnHandle_t handle_; 38 | cudnnTensorDescriptor_t bottom_desc_; 39 | cudnnTensorDescriptor_t top_desc_; 40 | cudnnActivationDescriptor_t activ_desc_; 41 | }; 42 | #endif 43 | 44 | } // namespace caffe 45 | 46 | #endif // CAFFE_CUDNN_SIGMOID_LAYER_HPP_ 47 | -------------------------------------------------------------------------------- /src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/absval_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | NeuronLayer::LayerSetUp(bottom, top); 12 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 13 | "allow in-place computation."; 14 | } 15 | 16 | template 17 | void AbsValLayer::Forward_cpu( 18 | const vector*>& bottom, const vector*>& top) { 19 | const int count = top[0]->count(); 20 | Dtype* top_data = top[0]->mutable_cpu_data(); 21 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 22 | } 23 | 24 | template 25 | void AbsValLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const int count = top[0]->count(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | if (propagate_down[0]) { 30 | const Dtype* bottom_data = bottom[0]->cpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | caffe_cpu_sign(count, bottom_data, bottom_diff); 33 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 34 | } 35 | } 36 | 37 | #ifdef CPU_ONLY 38 | STUB_GPU(AbsValLayer); 39 | #endif 40 | 41 | INSTANTIATE_CLASS(AbsValLayer); 42 | REGISTER_LAYER_CLASS(AbsVal); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_lrn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnLRNCrossChannelForward( 15 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | cudnn::dataType::zero, 19 | top_desc_, top_data) ); 20 | } 21 | 22 | template 23 | void CuDNNLRNLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, const vector*>& bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | const Dtype* top_data = top[0]->gpu_data(); 27 | const Dtype* bottom_data = bottom[0]->gpu_data(); 28 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 29 | 30 | CUDNN_CHECK(cudnnLRNCrossChannelBackward( 31 | handle_, norm_desc_, CUDNN_LRN_CROSS_CHANNEL_DIM1, 32 | cudnn::dataType::one, 33 | top_desc_, top_data, 34 | top_desc_, top_diff, 35 | bottom_desc_, bottom_data, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff) ); 38 | } 39 | 40 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLRNLayer); 41 | 42 | }; // namespace caffe 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ReLULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 18 | } 19 | } 20 | 21 | template 22 | void ReLULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_diff = top[0]->cpu_diff(); 28 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 29 | const int count = bottom[0]->count(); 30 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 31 | for (int i = 0; i < count; ++i) { 32 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 33 | + negative_slope * (bottom_data[i] <= 0)); 34 | } 35 | } 36 | } 37 | 38 | 39 | #ifdef CPU_ONLY 40 | STUB_GPU(ReLULayer); 41 | #endif 42 | 43 | INSTANTIATE_CLASS(ReLULayer); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /tools/extra/launch_resize_and_crop_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer 3 | 4 | # If you encounter error that the address already in use, kill the process. 5 | # 11235 is the port of server process 6 | # https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py 7 | # sudo netstat -ap | grep 11235 8 | # The last column of the output is PID/Program name 9 | # kill -9 PID 10 | # Second solution: 11 | # nmap localhost 12 | # fuser -k 11235/tcp 13 | # Or just wait a few seconds. 14 | 15 | ## Launch your Mapreduce locally 16 | # num_clients: number of processes 17 | # image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. 18 | # input: the file containing one image path relative to input_folder each line 19 | # input_folder: where are the original images 20 | # output_folder: where to save the resized and cropped images 21 | ./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 22 | 23 | ## Launch your Mapreduce with MPI 24 | # mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 25 | -------------------------------------------------------------------------------- /python/draw_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Draw a graph of the net architecture. 4 | """ 5 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter 6 | from google.protobuf import text_format 7 | 8 | import caffe 9 | import caffe.draw 10 | from caffe.proto import caffe_pb2 11 | 12 | 13 | def parse_args(): 14 | """Parse input arguments 15 | """ 16 | 17 | parser = ArgumentParser(description=__doc__, 18 | formatter_class=ArgumentDefaultsHelpFormatter) 19 | 20 | parser.add_argument('input_net_proto_file', 21 | help='Input network prototxt file') 22 | parser.add_argument('output_image_file', 23 | help='Output image file') 24 | parser.add_argument('--rankdir', 25 | help=('One of TB (top-bottom, i.e., vertical), ' 26 | 'RL (right-left, i.e., horizontal), or another ' 27 | 'valid dot option; see ' 28 | 'http://www.graphviz.org/doc/info/' 29 | 'attrs.html#k:rankdir'), 30 | default='LR') 31 | 32 | args = parser.parse_args() 33 | return args 34 | 35 | 36 | def main(): 37 | args = parse_args() 38 | net = caffe_pb2.NetParameter() 39 | text_format.Merge(open(args.input_net_proto_file).read(), net) 40 | print('Drawing net to %s' % args.output_image_file) 41 | caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_relu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | ReLULayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_RELU); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | ReLULayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNReLULayer::~CuDNNReLULayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroy(this->handle_); 40 | } 41 | 42 | INSTANTIATE_CLASS(CuDNNReLULayer); 43 | 44 | } // namespace caffe 45 | #endif 46 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_tanh_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | TanHLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, CUDNN_ACTIVATION_TANH); 17 | handles_setup_ = true; 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 22 | const vector*>& top) { 23 | TanHLayer::Reshape(bottom, top); 24 | const int N = bottom[0]->num(); 25 | const int K = bottom[0]->channels(); 26 | const int H = bottom[0]->height(); 27 | const int W = bottom[0]->width(); 28 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 29 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 30 | } 31 | 32 | template 33 | CuDNNTanHLayer::~CuDNNTanHLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 38 | cudnnDestroyTensorDescriptor(this->top_desc_); 39 | cudnnDestroy(this->handle_); 40 | } 41 | 42 | INSTANTIATE_CLASS(CuDNNTanHLayer); 43 | 44 | } // namespace caffe 45 | #endif 46 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_lcn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ 2 | #define CAFFE_CUDNN_LCN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/lrn_layer.hpp" 11 | #include "caffe/layers/power_layer.hpp" 12 | 13 | namespace caffe { 14 | 15 | #ifdef USE_CUDNN 16 | template 17 | class CuDNNLCNLayer : public LRNLayer { 18 | public: 19 | explicit CuDNNLCNLayer(const LayerParameter& param) 20 | : LRNLayer(param), handles_setup_(false), tempDataSize(0), 21 | tempData1(NULL), tempData2(NULL) {} 22 | virtual void LayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual void Reshape(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual ~CuDNNLCNLayer(); 27 | 28 | protected: 29 | virtual void Forward_gpu(const vector*>& bottom, 30 | const vector*>& top); 31 | virtual void Backward_gpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | 34 | bool handles_setup_; 35 | cudnnHandle_t handle_; 36 | cudnnLRNDescriptor_t norm_desc_; 37 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 38 | 39 | int size_, pre_pad_; 40 | Dtype alpha_, beta_, k_; 41 | 42 | size_t tempDataSize; 43 | void *tempData1, *tempData2; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_LCN_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_softmax_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "thrust/device_vector.h" 5 | 6 | #include "caffe/layers/cudnn_softmax_layer.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, 16 | CUDNN_SOFTMAX_MODE_CHANNEL, 17 | cudnn::dataType::one, 18 | bottom_desc_, bottom_data, 19 | cudnn::dataType::zero, 20 | top_desc_, top_data)); 21 | } 22 | 23 | template 24 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, 25 | const vector& propagate_down, const vector*>& bottom) { 26 | if (propagate_down[0]) { 27 | const Dtype* top_data = top[0]->gpu_data(); 28 | const Dtype* top_diff = top[0]->gpu_diff(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, 33 | CUDNN_SOFTMAX_MODE_CHANNEL, 34 | cudnn::dataType::one, 35 | top_desc_, top_data, top_desc_, top_diff, 36 | cudnn::dataType::zero, 37 | bottom_desc_, bottom_diff)); 38 | } 39 | } 40 | 41 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSoftmaxLayer); 42 | 43 | } // namespace caffe 44 | #endif 45 | -------------------------------------------------------------------------------- /src/caffe/layers/elu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/elu_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void ELULayer::Forward_cpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->cpu_data(); 12 | Dtype* top_data = top[0]->mutable_cpu_data(); 13 | const int count = bottom[0]->count(); 14 | Dtype alpha = this->layer_param_.elu_param().alpha(); 15 | for (int i = 0; i < count; ++i) { 16 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 17 | + alpha * (exp(std::min(bottom_data[i], Dtype(0))) - Dtype(1)); 18 | } 19 | } 20 | 21 | template 22 | void ELULayer::Backward_cpu(const vector*>& top, 23 | const vector& propagate_down, 24 | const vector*>& bottom) { 25 | if (propagate_down[0]) { 26 | const Dtype* bottom_data = bottom[0]->cpu_data(); 27 | const Dtype* top_data = top[0]->cpu_data(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 30 | const int count = bottom[0]->count(); 31 | Dtype alpha = this->layer_param_.elu_param().alpha(); 32 | for (int i = 0; i < count; ++i) { 33 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 34 | + (alpha + top_data[i]) * (bottom_data[i] <= 0)); 35 | } 36 | } 37 | } 38 | 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(ELULayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(ELULayer); 45 | REGISTER_LAYER_CLASS(ELU); 46 | 47 | } // namespace caffe 48 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_sigmoid_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | SigmoidLayer::LayerSetUp(bottom, top); 12 | // initialize cuDNN 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | cudnn::createTensor4dDesc(&bottom_desc_); 15 | cudnn::createTensor4dDesc(&top_desc_); 16 | cudnn::createActivationDescriptor(&activ_desc_, 17 | CUDNN_ACTIVATION_SIGMOID); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SigmoidLayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensorDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensorDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /include/caffe/internal_thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_ 2 | #define CAFFE_INTERNAL_THREAD_HPP_ 3 | 4 | #include "caffe/common.hpp" 5 | 6 | /** 7 | Forward declare boost::thread instead of including boost/thread.hpp 8 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. 9 | */ 10 | namespace boost { class thread; } 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * Virtual class encapsulate boost::thread for use in base class 16 | * The child class will acquire the ability to run a single thread, 17 | * by reimplementing the virtual function InternalThreadEntry. 18 | */ 19 | class InternalThread { 20 | public: 21 | InternalThread() : thread_() {} 22 | virtual ~InternalThread(); 23 | 24 | /** 25 | * Caffe's thread local state will be initialized using the current 26 | * thread values, e.g. device id, solver index etc. The random seed 27 | * is initialized using caffe_rng_rand. 28 | */ 29 | void StartInternalThread(); 30 | 31 | /** Will not return until the internal thread has exited. */ 32 | void StopInternalThread(); 33 | 34 | bool is_started() const; 35 | 36 | protected: 37 | /* Implement this method in your subclass 38 | with the code you want your thread to run. */ 39 | virtual void InternalThreadEntry() {} 40 | 41 | /* Should be tested when running loops to exit when requested. */ 42 | bool must_stop(); 43 | 44 | private: 45 | void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count, 46 | bool root_solver); 47 | 48 | shared_ptr thread_; 49 | }; 50 | 51 | } // namespace caffe 52 | 53 | #endif // CAFFE_INTERNAL_THREAD_HPP_ 54 | -------------------------------------------------------------------------------- /include/caffe/util/sampler.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_SAMPLER_H_ 2 | #define CAFFE_UTIL_SAMPLER_H_ 3 | 4 | #include 5 | 6 | #include "glog/logging.h" 7 | 8 | #include "caffe/caffe.hpp" 9 | 10 | namespace caffe { 11 | 12 | // Find all annotated NormalizedBBox. 13 | void GroupObjectBBoxes(const AnnotatedDatum& anno_datum, 14 | vector* object_bboxes); 15 | 16 | // Check if a sampled bbox satisfy the constraints with all object bboxes. 17 | bool SatisfySampleConstraint(const NormalizedBBox& sampled_bbox, 18 | const vector& object_bboxes, 19 | const SampleConstraint& sample_constraint); 20 | 21 | // Sample a NormalizedBBox given the specifictions. 22 | void SampleBBox(const Sampler& sampler, NormalizedBBox* sampled_bbox); 23 | 24 | // Generate samples from NormalizedBBox using the BatchSampler. 25 | void GenerateSamples(const NormalizedBBox& source_bbox, 26 | const vector& object_bboxes, 27 | const BatchSampler& batch_sampler, 28 | vector* sampled_bboxes); 29 | 30 | // Generate samples from AnnotatedDatum using the BatchSampler. 31 | // All sampled bboxes which satisfy the constraints defined in BatchSampler 32 | // is stored in sampled_bboxes. 33 | void GenerateBatchSamples(const AnnotatedDatum& anno_datum, 34 | const vector& batch_samplers, 35 | vector* sampled_bboxes); 36 | 37 | } // namespace caffe 38 | 39 | #endif // CAFFE_UTIL_SAMPLER_H_ 40 | -------------------------------------------------------------------------------- /matlab/+caffe/+test/test_solver.m: -------------------------------------------------------------------------------- 1 | classdef test_solver < matlab.unittest.TestCase 2 | 3 | properties 4 | num_output 5 | solver 6 | end 7 | 8 | methods 9 | function self = test_solver() 10 | self.num_output = 13; 11 | model_file = caffe.test.test_net.simple_net_file(self.num_output); 12 | solver_file = tempname(); 13 | 14 | fid = fopen(solver_file, 'w'); 15 | fprintf(fid, [ ... 16 | 'net: "' model_file '"\n' ... 17 | 'test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n' ... 18 | 'weight_decay: 0.0005 lr_policy: "inv" gamma: 0.0001 power: 0.75\n' ... 19 | 'display: 100 max_iter: 100 snapshot_after_train: false\n' ]); 20 | fclose(fid); 21 | 22 | self.solver = caffe.Solver(solver_file); 23 | % also make sure get_solver runs 24 | caffe.get_solver(solver_file); 25 | caffe.set_mode_cpu(); 26 | % fill in valid labels 27 | self.solver.net.blobs('label').set_data(randi( ... 28 | self.num_output - 1, self.solver.net.blobs('label').shape)); 29 | self.solver.test_nets(1).blobs('label').set_data(randi( ... 30 | self.num_output - 1, self.solver.test_nets(1).blobs('label').shape)); 31 | 32 | delete(solver_file); 33 | delete(model_file); 34 | end 35 | end 36 | methods (Test) 37 | function test_solve(self) 38 | self.verifyEqual(self.solver.iter(), 0) 39 | self.solver.step(30); 40 | self.verifyEqual(self.solver.iter(), 30) 41 | self.solver.solve() 42 | self.verifyEqual(self.solver.iter(), 100) 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /src/caffe/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # The option allows to include in build only selected test files and exclude all others 2 | # Usage example: 3 | # cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" 4 | set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") 5 | caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) 6 | caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) 7 | 8 | # For 'make runtest' target we don't need to embed test data paths to 9 | # source files, because test target is executed in source directory 10 | # That's why the lines below are commented. TODO: remove them 11 | 12 | # definition needed to include CMake generated files 13 | #add_definitions(-DCMAKE_BUILD) 14 | 15 | # generates test_data/sample_data_list.txt.gen.cmake 16 | #caffe_configure_testdatafile(test_data/sample_data_list.txt) 17 | 18 | set(the_target test.testbin) 19 | set(test_args --gtest_shuffle) 20 | 21 | if(HAVE_CUDA) 22 | caffe_cuda_compile(test_cuda_objs ${test_cuda}) 23 | list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) 24 | else() 25 | list(APPEND test_args --gtest_filter="-*GPU*") 26 | endif() 27 | 28 | # ---[ Adding test target 29 | add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) 30 | target_link_libraries(${the_target} gtest ${Caffe_LINK}) 31 | caffe_default_properties(${the_target}) 32 | caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") 33 | 34 | # ---[ Adding runtest 35 | add_custom_target(runtest COMMAND ${the_target} ${test_args} 36 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) 37 | -------------------------------------------------------------------------------- /src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/flatten_layer.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void FlattenLayer::Reshape(const vector*>& bottom, 9 | const vector*>& top) { 10 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 11 | "allow in-place computation."; 12 | const int start_axis = bottom[0]->CanonicalAxisIndex( 13 | this->layer_param_.flatten_param().axis()); 14 | const int end_axis = bottom[0]->CanonicalAxisIndex( 15 | this->layer_param_.flatten_param().end_axis()); 16 | vector top_shape; 17 | for (int i = 0; i < start_axis; ++i) { 18 | top_shape.push_back(bottom[0]->shape(i)); 19 | } 20 | const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); 21 | top_shape.push_back(flattened_dim); 22 | for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { 23 | top_shape.push_back(bottom[0]->shape(i)); 24 | } 25 | top[0]->Reshape(top_shape); 26 | CHECK_EQ(top[0]->count(), bottom[0]->count()); 27 | } 28 | 29 | template 30 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 31 | const vector*>& top) { 32 | top[0]->ShareData(*bottom[0]); 33 | } 34 | 35 | template 36 | void FlattenLayer::Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom) { 38 | bottom[0]->ShareDiff(*top[0]); 39 | } 40 | 41 | INSTANTIATE_CLASS(FlattenLayer); 42 | REGISTER_LAYER_CLASS(Flatten); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /include/caffe/layers/input_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INPUT_LAYER_HPP_ 2 | #define CAFFE_INPUT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Provides data to the Net by assigning tops directly. 14 | * 15 | * This data layer is a container that merely holds the data assigned to it; 16 | * forward, backward, and reshape are all no-ops. 17 | */ 18 | template 19 | class InputLayer : public Layer { 20 | public: 21 | explicit InputLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // Data layers should be shared by multiple solvers in parallel 26 | virtual inline bool ShareInParallel() const { return true; } 27 | // Data layers have no bottoms, so reshaping is trivial. 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top) {} 30 | 31 | virtual inline const char* type() const { return "Input"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int MinTopBlobs() const { return 1; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top) {} 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) {} 40 | }; 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_INPUT_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(NOT HAVE_PYTHON) 2 | message(STATUS "Python interface is disabled or not all required dependencies found. Building without it...") 3 | return() 4 | endif() 5 | 6 | include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) 7 | file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) 8 | 9 | add_library(pycaffe SHARED ${python_srcs}) 10 | target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) 11 | set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") 12 | caffe_default_properties(pycaffe) 13 | 14 | if(UNIX OR APPLE) 15 | set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") 16 | add_custom_command(TARGET pycaffe POST_BUILD 17 | COMMAND ln -sf $ "${__linkname}" 18 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/python/caffe/proto 19 | COMMAND touch ${PROJECT_SOURCE_DIR}/python/caffe/proto/__init__.py 20 | COMMAND cp ${proto_gen_folder}/*.py ${PROJECT_SOURCE_DIR}/python/caffe/proto/ 21 | COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/_caffe${Caffe_POSTFIX}.so") 22 | endif() 23 | 24 | # ---[ Install 25 | file(GLOB files1 *.py requirements.txt) 26 | install(FILES ${files1} DESTINATION python) 27 | 28 | file(GLOB files2 caffe/*.py) 29 | install(FILES ${files2} DESTINATION python/caffe) 30 | install(TARGETS pycaffe DESTINATION python/caffe) 31 | install(DIRECTORY caffe/imagenet caffe/proto caffe/test DESTINATION python/caffe) 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /include/caffe/layers/silence_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SILENCE_LAYER_HPP_ 2 | #define CAFFE_SILENCE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Ignores bottom blobs while producing no top blobs. (This is useful 14 | * to suppress outputs during testing.) 15 | */ 16 | template 17 | class SilenceLayer : public Layer { 18 | public: 19 | explicit SilenceLayer(const LayerParameter& param) 20 | : Layer(param) {} 21 | virtual void Reshape(const vector*>& bottom, 22 | const vector*>& top) {} 23 | 24 | virtual inline const char* type() const { return "Silence"; } 25 | virtual inline int MinBottomBlobs() const { return 1; } 26 | virtual inline int ExactNumTopBlobs() const { return 0; } 27 | 28 | protected: 29 | virtual void Forward_cpu(const vector*>& bottom, 30 | const vector*>& top) {} 31 | // We can't define Forward_gpu here, since STUB_GPU will provide 32 | // its own definition for CPU_ONLY mode. 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | }; 40 | 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_SILENCE_LAYER_HPP_ 44 | -------------------------------------------------------------------------------- /include/caffe/layers/split_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SPLIT_LAYER_HPP_ 2 | #define CAFFE_SPLIT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Creates a "split" path in the network by copying the bottom Blob 14 | * into multiple top Blob%s to be used by multiple consuming layers. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class SplitLayer : public Layer { 20 | public: 21 | explicit SplitLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void Reshape(const vector*>& bottom, 24 | const vector*>& top); 25 | 26 | virtual inline const char* type() const { return "Split"; } 27 | virtual inline int ExactNumBottomBlobs() const { return 1; } 28 | virtual inline int MinTopBlobs() const { return 1; } 29 | 30 | protected: 31 | virtual void Forward_cpu(const vector*>& bottom, 32 | const vector*>& top); 33 | virtual void Forward_gpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom); 37 | virtual void Backward_gpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | 40 | int count_; 41 | }; 42 | 43 | } // namespace caffe 44 | 45 | #endif // CAFFE_SPLIT_LAYER_HPP_ 46 | -------------------------------------------------------------------------------- /include/caffe/layers/video_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_VIDEO_DATA_LAYER_HPP_ 2 | #define CAFFE_VIDEO_DATA_LAYER_HPP_ 3 | 4 | #ifdef USE_OPENCV 5 | #if OPENCV_VERSION == 3 6 | #include 7 | #else 8 | #include 9 | #endif // OPENCV_VERSION == 3 10 | #endif // USE_OPENCV 11 | 12 | #include 13 | #include 14 | 15 | #include "caffe/blob.hpp" 16 | #include "caffe/data_transformer.hpp" 17 | #include "caffe/internal_thread.hpp" 18 | #include "caffe/layer.hpp" 19 | #include "caffe/layers/base_data_layer.hpp" 20 | #include "caffe/proto/caffe.pb.h" 21 | #include "caffe/util/db.hpp" 22 | 23 | namespace caffe { 24 | 25 | /** 26 | * @brief Provides data to the Net from webcam or video files. 27 | * 28 | * TODO(weiliu89): thorough documentation for Forward and proto params. 29 | */ 30 | template 31 | class VideoDataLayer : public BasePrefetchingDataLayer { 32 | public: 33 | explicit VideoDataLayer(const LayerParameter& param); 34 | virtual ~VideoDataLayer(); 35 | virtual void DataLayerSetUp(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual inline bool ShareInParallel() const { return false; } 38 | virtual inline const char* type() const { return "VideoData"; } 39 | virtual inline int ExactNumBottomBlobs() const { return 0; } 40 | virtual inline int MinTopBlobs() const { return 1; } 41 | 42 | protected: 43 | virtual void load_batch(Batch* batch); 44 | 45 | VideoDataParameter_VideoType video_type_; 46 | cv::VideoCapture cap_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_VIDEO_DATA_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /cmake/Modules/FindGlog.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Glog 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GLOG_FOUND 8 | # GLOG_INCLUDE_DIRS 9 | # GLOG_LIBRARIES 10 | # GLOG_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") 15 | 16 | if(WIN32) 17 | find_path(GLOG_INCLUDE_DIR glog/logging.h 18 | PATHS ${GLOG_ROOT_DIR}/src/windows) 19 | else() 20 | find_path(GLOG_INCLUDE_DIR glog/logging.h 21 | PATHS ${GLOG_ROOT_DIR}) 22 | endif() 23 | 24 | if(MSVC) 25 | find_library(GLOG_LIBRARY_RELEASE libglog_static 26 | PATHS ${GLOG_ROOT_DIR} 27 | PATH_SUFFIXES Release) 28 | 29 | find_library(GLOG_LIBRARY_DEBUG libglog_static 30 | PATHS ${GLOG_ROOT_DIR} 31 | PATH_SUFFIXES Debug) 32 | 33 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) 34 | else() 35 | find_library(GLOG_LIBRARY glog 36 | PATHS ${GLOG_ROOT_DIR} 37 | PATH_SUFFIXES lib lib64) 38 | endif() 39 | 40 | find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) 41 | 42 | if(GLOG_FOUND) 43 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) 44 | set(GLOG_LIBRARIES ${GLOG_LIBRARY}) 45 | message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})") 46 | mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG 47 | GLOG_LIBRARY GLOG_INCLUDE_DIR) 48 | endif() 49 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_lcn_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lcn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLCNLayer::Forward_gpu(const vector*>& bottom, 10 | const vector*>& top) { 11 | const Dtype* bottom_data = bottom[0]->gpu_data(); 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | 14 | CUDNN_CHECK(cudnnDivisiveNormalizationForward( 15 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 16 | cudnn::dataType::one, 17 | bottom_desc_, bottom_data, 18 | NULL, // srcMeansData 19 | this->tempData1, this->tempData2, 20 | cudnn::dataType::zero, 21 | top_desc_, top_data) ); 22 | } 23 | 24 | template 25 | void CuDNNLCNLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | const Dtype* top_diff = top[0]->gpu_diff(); 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | 32 | CUDNN_CHECK(cudnnDivisiveNormalizationBackward( 33 | handle_, norm_desc_, CUDNN_DIVNORM_PRECOMPUTED_MEANS, 34 | cudnn::dataType::one, 35 | bottom_desc_, bottom_data, 36 | NULL, top_diff, // NULL - srcMeansData 37 | this->tempData1, this->tempData2, 38 | cudnn::dataType::zero, 39 | bottom_desc_, bottom_diff, 40 | NULL) ); 41 | } 42 | 43 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNLCNLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /include/caffe/layers/mvn_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MVN_LAYER_HPP_ 2 | #define CAFFE_MVN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Normalizes the input to have 0-mean and/or unit (1) variance. 14 | * 15 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class MVNLayer : public Layer { 19 | public: 20 | explicit MVNLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | 25 | virtual inline const char* type() const { return "MVN"; } 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | 29 | protected: 30 | virtual void Forward_cpu(const vector*>& bottom, 31 | const vector*>& top); 32 | virtual void Forward_gpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | Blob mean_, variance_, temp_; 40 | 41 | /// sum_multiplier is used to carry out sum using BLAS 42 | Blob sum_multiplier_; 43 | Dtype eps_; 44 | }; 45 | 46 | } // namespace caffe 47 | 48 | #endif // CAFFE_MVN_LAYER_HPP_ 49 | -------------------------------------------------------------------------------- /cmake/lint.cmake: -------------------------------------------------------------------------------- 1 | 2 | set(CMAKE_SOURCE_DIR ..) 3 | set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) 4 | set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) 5 | set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) 6 | set(LINT_DIRS include src/caffe examples tools python matlab) 7 | 8 | cmake_policy(SET CMP0009 NEW) # suppress cmake warning 9 | 10 | # find all files of interest 11 | foreach(ext ${SRC_FILE_EXTENSIONS}) 12 | foreach(dir ${LINT_DIRS}) 13 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext}) 14 | set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES}) 15 | endforeach() 16 | endforeach() 17 | 18 | # find all files that should be excluded 19 | foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) 20 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) 21 | set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) 22 | endforeach() 23 | 24 | # exclude generated pb files 25 | list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES}) 26 | 27 | execute_process( 28 | COMMAND ${LINT_COMMAND} ${LINT_SOURCES} 29 | ERROR_VARIABLE LINT_OUTPUT 30 | ERROR_STRIP_TRAILING_WHITESPACE 31 | ) 32 | 33 | string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT}) 34 | 35 | list(GET LINT_OUTPUT -1 LINT_RESULT) 36 | list(REMOVE_AT LINT_OUTPUT -1) 37 | string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT}) 38 | list(GET LINT_RESULT -1 NUM_ERRORS) 39 | if(NUM_ERRORS GREATER 0) 40 | foreach(msg ${LINT_OUTPUT}) 41 | string(FIND ${msg} "Done" result) 42 | if(result LESS 0) 43 | message(STATUS ${msg}) 44 | endif() 45 | endforeach() 46 | message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!") 47 | else() 48 | message(STATUS "Lint did not find any errors!") 49 | endif() 50 | 51 | -------------------------------------------------------------------------------- /src/caffe/test/test_solver_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/common.hpp" 9 | #include "caffe/solver.hpp" 10 | #include "caffe/solver_factory.hpp" 11 | 12 | #include "caffe/test/test_caffe_main.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | class SolverFactoryTest : public MultiDeviceTest { 18 | protected: 19 | SolverParameter simple_solver_param() { 20 | const string solver_proto = 21 | "train_net_param { " 22 | " layer { " 23 | " name: 'data' type: 'DummyData' top: 'data' " 24 | " dummy_data_param { shape { dim: 1 } } " 25 | " } " 26 | "} "; 27 | SolverParameter solver_param; 28 | CHECK(google::protobuf::TextFormat::ParseFromString( 29 | solver_proto, &solver_param)); 30 | return solver_param; 31 | } 32 | }; 33 | 34 | TYPED_TEST_CASE(SolverFactoryTest, TestDtypesAndDevices); 35 | 36 | TYPED_TEST(SolverFactoryTest, TestCreateSolver) { 37 | typedef typename TypeParam::Dtype Dtype; 38 | typename SolverRegistry::CreatorRegistry& registry = 39 | SolverRegistry::Registry(); 40 | shared_ptr > solver; 41 | SolverParameter solver_param = this->simple_solver_param(); 42 | for (typename SolverRegistry::CreatorRegistry::iterator iter = 43 | registry.begin(); iter != registry.end(); ++iter) { 44 | solver_param.set_type(iter->first); 45 | solver.reset(SolverRegistry::CreateSolver(solver_param)); 46 | EXPECT_EQ(iter->first, solver->type()); 47 | } 48 | } 49 | 50 | } // namespace caffe 51 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_pooling_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | PoolingLayer::LayerSetUp(bottom, top); 12 | CUDNN_CHECK(cudnnCreate(&handle_)); 13 | cudnn::createTensor4dDesc(&bottom_desc_); 14 | cudnn::createTensor4dDesc(&top_desc_); 15 | cudnn::createPoolingDesc(&pooling_desc_, 16 | this->layer_param_.pooling_param().pool(), &mode_, 17 | this->kernel_h_, this->kernel_w_, this->pad_h_, this->pad_w_, 18 | this->stride_h_, this->stride_w_); 19 | handles_setup_ = true; 20 | } 21 | 22 | template 23 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom, 24 | const vector*>& top) { 25 | PoolingLayer::Reshape(bottom, top); 26 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 27 | this->channels_, this->height_, this->width_); 28 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 29 | this->channels_, this->pooled_height_, this->pooled_width_); 30 | } 31 | 32 | template 33 | CuDNNPoolingLayer::~CuDNNPoolingLayer() { 34 | // Check that handles have been setup before destroying. 35 | if (!handles_setup_) { return; } 36 | 37 | cudnnDestroyTensorDescriptor(bottom_desc_); 38 | cudnnDestroyTensorDescriptor(top_desc_); 39 | cudnnDestroyPoolingDescriptor(pooling_desc_); 40 | cudnnDestroy(handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | 3 | # Compiled Object files 4 | *.slo 5 | *.lo 6 | *.o 7 | *.cuo 8 | 9 | # Compiled Dynamic libraries 10 | *.so 11 | *.dylib 12 | 13 | # Compiled Static libraries 14 | *.lai 15 | *.la 16 | *.a 17 | 18 | # Compiled protocol buffers 19 | *.pb.h 20 | *.pb.cc 21 | *_pb2.py 22 | 23 | # Compiled python 24 | *.pyc 25 | 26 | # Compiled MATLAB 27 | *.mex* 28 | 29 | # IPython notebook checkpoints 30 | .ipynb_checkpoints 31 | 32 | # Editor temporaries 33 | *.swp 34 | *~ 35 | 36 | # Sublime Text settings 37 | *.sublime-workspace 38 | *.sublime-project 39 | 40 | # Eclipse Project settings 41 | *.*project 42 | .settings 43 | 44 | # QtCreator files 45 | *.user 46 | 47 | # PyCharm files 48 | .idea 49 | 50 | # OSX dir files 51 | .DS_Store 52 | 53 | ## Caffe 54 | 55 | # User's build configuration 56 | Makefile.config 57 | 58 | # Data and models are either 59 | # 1. reference, and not casually committed 60 | # 2. custom, and live on their own unless they're deliberated contributed 61 | #data/* 62 | models/* 63 | *.caffemodel 64 | *.caffemodel.h5 65 | *.solverstate 66 | *.solverstate.h5 67 | *.binaryproto 68 | *leveldb 69 | *lmdb 70 | 71 | # build, distribute, and bins (+ python proto bindings) 72 | build 73 | .build_debug/* 74 | .build_release/* 75 | distribute/* 76 | *.testbin 77 | *.bin 78 | python/caffe/proto/ 79 | cmake_build 80 | .cmake_build 81 | 82 | # Generated documentation 83 | docs/_site 84 | docs/gathered 85 | _site 86 | doxygen 87 | docs/dev 88 | 89 | # LevelDB files 90 | *.sst 91 | *.ldb 92 | LOCK 93 | LOG* 94 | CURRENT 95 | MANIFEST-* 96 | 97 | # temporary directories 98 | jobs 99 | temp 100 | examples/*/*lmdb 101 | -------------------------------------------------------------------------------- /include/caffe/layers/cudnn_pooling_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_ 2 | #define CAFFE_CUDNN_POOLING_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/pooling_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | #ifdef USE_CUDNN 15 | /* 16 | * @brief cuDNN implementation of PoolingLayer. 17 | * Fallback to PoolingLayer for CPU mode. 18 | */ 19 | template 20 | class CuDNNPoolingLayer : public PoolingLayer { 21 | public: 22 | explicit CuDNNPoolingLayer(const LayerParameter& param) 23 | : PoolingLayer(param), handles_setup_(false) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual ~CuDNNPoolingLayer(); 29 | // Currently, cuDNN does not support the extra top blob. 30 | virtual inline int MinTopBlobs() const { return -1; } 31 | virtual inline int ExactNumTopBlobs() const { return 1; } 32 | 33 | protected: 34 | virtual void Forward_gpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | bool handles_setup_; 40 | cudnnHandle_t handle_; 41 | cudnnTensorDescriptor_t bottom_desc_, top_desc_; 42 | cudnnPoolingDescriptor_t pooling_desc_; 43 | cudnnPoolingMode_t mode_; 44 | }; 45 | #endif 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_CUDNN_POOLING_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /src/caffe/test/test_layer_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "boost/scoped_ptr.hpp" 5 | #include "gtest/gtest.h" 6 | 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/layer_factory.hpp" 10 | #include "caffe/util/db.hpp" 11 | #include "caffe/util/io.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | 15 | namespace caffe { 16 | 17 | template 18 | class LayerFactoryTest : public MultiDeviceTest {}; 19 | 20 | TYPED_TEST_CASE(LayerFactoryTest, TestDtypesAndDevices); 21 | 22 | TYPED_TEST(LayerFactoryTest, TestCreateLayer) { 23 | typedef typename TypeParam::Dtype Dtype; 24 | typename LayerRegistry::CreatorRegistry& registry = 25 | LayerRegistry::Registry(); 26 | shared_ptr > layer; 27 | for (typename LayerRegistry::CreatorRegistry::iterator iter = 28 | registry.begin(); iter != registry.end(); ++iter) { 29 | // Special case: PythonLayer is checked by pytest 30 | if (iter->first == "Python") { continue; } 31 | LayerParameter layer_param; 32 | // Data layers expect a DB 33 | if (iter->first == "Data" || iter->first == "AnnotatedData") { 34 | #ifdef USE_LEVELDB 35 | string tmp; 36 | MakeTempDir(&tmp); 37 | boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); 38 | db->Open(tmp, db::NEW); 39 | db->Close(); 40 | layer_param.mutable_data_param()->set_source(tmp); 41 | #else 42 | continue; 43 | #endif // USE_LEVELDB 44 | } 45 | layer_param.set_type(iter->first); 46 | layer = LayerRegistry::CreateLayer(layer_param); 47 | EXPECT_EQ(iter->first, layer->type()); 48 | } 49 | } 50 | 51 | } // namespace caffe 52 | -------------------------------------------------------------------------------- /cmake/Modules/FindGFlags.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find GFLAGS 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GFLAGS_FOUND 8 | # GFLAGS_INCLUDE_DIRS 9 | # GFLAGS_LIBRARIES 10 | # GFLAGS_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") 15 | 16 | # We are testing only a couple of files in the include directories 17 | if(WIN32) 18 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 19 | PATHS ${GFLAGS_ROOT_DIR}/src/windows) 20 | else() 21 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 22 | PATHS ${GFLAGS_ROOT_DIR}) 23 | endif() 24 | 25 | if(MSVC) 26 | find_library(GFLAGS_LIBRARY_RELEASE 27 | NAMES libgflags 28 | PATHS ${GFLAGS_ROOT_DIR} 29 | PATH_SUFFIXES Release) 30 | 31 | find_library(GFLAGS_LIBRARY_DEBUG 32 | NAMES libgflags-debug 33 | PATHS ${GFLAGS_ROOT_DIR} 34 | PATH_SUFFIXES Debug) 35 | 36 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) 37 | else() 38 | find_library(GFLAGS_LIBRARY gflags) 39 | endif() 40 | 41 | find_package_handle_standard_args(GFlags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) 42 | 43 | 44 | if(GFLAGS_FOUND) 45 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) 46 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) 47 | message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})") 48 | mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE 49 | GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) 50 | endif() 51 | -------------------------------------------------------------------------------- /tools/upgrade_net_proto_binary.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromBinaryFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input binary file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | WriteProtoToBinaryFile(net_param, argv[2]); 47 | 48 | LOG(INFO) << "Wrote upgraded NetParameter binary proto to " << argv[2]; 49 | return !success; 50 | } 51 | -------------------------------------------------------------------------------- /tools/upgrade_net_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: " 23 | << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | NetParameter net_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromTextFile(input_filename, &net_param)) { 30 | LOG(ERROR) << "Failed to parse input text file as NetParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = NetNeedsUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | // Save new format prototxt. 47 | WriteProtoToTextFile(net_param, argv[2]); 48 | 49 | LOG(INFO) << "Wrote upgraded NetParameter text proto to " << argv[2]; 50 | return !success; 51 | } 52 | -------------------------------------------------------------------------------- /include/caffe/layers/softmax_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SOFTMAX_LAYER_HPP_ 2 | #define CAFFE_SOFTMAX_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Computes the softmax function. 14 | * 15 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class SoftmaxLayer : public Layer { 19 | public: 20 | explicit SoftmaxLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void Reshape(const vector*>& bottom, 23 | const vector*>& top); 24 | 25 | virtual inline const char* type() const { return "Softmax"; } 26 | virtual inline int ExactNumBottomBlobs() const { return 1; } 27 | virtual inline int ExactNumTopBlobs() const { return 1; } 28 | 29 | protected: 30 | virtual void Forward_cpu(const vector*>& bottom, 31 | const vector*>& top); 32 | virtual void Forward_gpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom); 36 | virtual void Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | 39 | int outer_num_; 40 | int inner_num_; 41 | int softmax_axis_; 42 | /// sum_multiplier is used to carry out sum using BLAS 43 | Blob sum_multiplier_; 44 | /// scale is an intermediate Blob to hold temporary results. 45 | Blob scale_; 46 | }; 47 | 48 | } // namespace caffe 49 | 50 | #endif // CAFFE_SOFTMAX_LAYER_HPP_ 51 | -------------------------------------------------------------------------------- /src/caffe/layers/hdf5_data_layer.cu: -------------------------------------------------------------------------------- 1 | /* 2 | TODO: 3 | - only load parts of the file, in accordance with a prototxt param "max_mem" 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include "hdf5.h" 10 | #include "hdf5_hl.h" 11 | 12 | #include "caffe/layers/hdf5_data_layer.hpp" 13 | 14 | namespace caffe { 15 | 16 | template 17 | void HDF5DataLayer::Forward_gpu(const vector*>& bottom, 18 | const vector*>& top) { 19 | const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); 20 | for (int i = 0; i < batch_size; ++i, ++current_row_) { 21 | if (current_row_ == hdf_blobs_[0]->shape(0)) { 22 | if (num_files_ > 1) { 23 | current_file_ += 1; 24 | if (current_file_ == num_files_) { 25 | current_file_ = 0; 26 | if (this->layer_param_.hdf5_data_param().shuffle()) { 27 | std::random_shuffle(file_permutation_.begin(), 28 | file_permutation_.end()); 29 | } 30 | DLOG(INFO) << "Looping around to first file."; 31 | } 32 | LoadHDF5FileData( 33 | hdf_filenames_[file_permutation_[current_file_]].c_str()); 34 | } 35 | current_row_ = 0; 36 | if (this->layer_param_.hdf5_data_param().shuffle()) 37 | std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); 38 | } 39 | for (int j = 0; j < this->layer_param_.top_size(); ++j) { 40 | int data_dim = top[j]->count() / top[j]->shape(0); 41 | caffe_copy(data_dim, 42 | &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] 43 | * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); 44 | } 45 | } 46 | } 47 | 48 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer); 49 | 50 | } // namespace caffe 51 | -------------------------------------------------------------------------------- /src/caffe/layers/cudnn_lrn_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/layers/cudnn_lrn_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void CuDNNLRNLayer::LayerSetUp(const vector*>& bottom, 10 | const vector*>& top) { 11 | LRNLayer::LayerSetUp(bottom, top); 12 | 13 | CUDNN_CHECK(cudnnCreate(&handle_)); 14 | CUDNN_CHECK(cudnnCreateLRNDescriptor(&norm_desc_)); 15 | cudnn::createTensor4dDesc(&bottom_desc_); 16 | cudnn::createTensor4dDesc(&top_desc_); 17 | 18 | // create a LRN handle 19 | handles_setup_ = true; 20 | 21 | size_ = this->layer_param().lrn_param().local_size(); 22 | alpha_ = this->layer_param().lrn_param().alpha(); 23 | beta_ = this->layer_param().lrn_param().beta(); 24 | k_ = this->layer_param().lrn_param().k(); 25 | } 26 | 27 | template 28 | void CuDNNLRNLayer::Reshape(const vector*>& bottom, 29 | const vector*>& top) { 30 | LRNLayer::Reshape(bottom, top); 31 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 32 | this->channels_, this->height_, this->width_); 33 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 34 | this->channels_, this->height_, this->width_); 35 | CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); 36 | } 37 | 38 | template 39 | CuDNNLRNLayer::~CuDNNLRNLayer() { 40 | // Check that handles have been setup before destroying. 41 | if (!handles_setup_) { return; } 42 | 43 | cudnnDestroyTensorDescriptor(bottom_desc_); 44 | cudnnDestroyTensorDescriptor(top_desc_); 45 | 46 | // destroy LRN handle 47 | cudnnDestroy(handle_); 48 | } 49 | 50 | INSTANTIATE_CLASS(CuDNNLRNLayer); 51 | 52 | } // namespace caffe 53 | #endif 54 | -------------------------------------------------------------------------------- /include/caffe/layers/eltwise_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_ELTWISE_LAYER_HPP_ 2 | #define CAFFE_ELTWISE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Compute elementwise operations, such as product and sum, 14 | * along multiple input Blobs. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class EltwiseLayer : public Layer { 20 | public: 21 | explicit EltwiseLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "Eltwise"; } 29 | virtual inline int MinBottomBlobs() const { return 2; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | EltwiseParameter_EltwiseOp op_; 43 | vector coeffs_; 44 | Blob max_idx_; 45 | 46 | bool stable_prod_grad_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_ELTWISE_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /include/caffe/layers/slice_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SLICE_LAYER_HPP_ 2 | #define CAFFE_SLICE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Takes a Blob and slices it along either the num or channel dimension, 14 | * outputting multiple sliced Blob results. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class SliceLayer : public Layer { 20 | public: 21 | explicit SliceLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "Slice"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 1; } 30 | virtual inline int MinTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | int count_; 43 | int num_slices_; 44 | int slice_size_; 45 | int slice_axis_; 46 | vector slice_point_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_SLICE_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /tools/upgrade_solver_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade old solver prototxts to the new format. 2 | // Usage: 3 | // upgrade_solver_proto_text old_solver_proto_file_in solver_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging) 20 | ::google::InitGoogleLogging(argv[0]); 21 | if (argc != 3) { 22 | LOG(ERROR) << "Usage: upgrade_solver_proto_text " 23 | << "old_solver_proto_file_in solver_proto_file_out"; 24 | return 1; 25 | } 26 | 27 | SolverParameter solver_param; 28 | string input_filename(argv[1]); 29 | if (!ReadProtoFromTextFile(input_filename, &solver_param)) { 30 | LOG(ERROR) << "Failed to parse input text file as SolverParameter: " 31 | << input_filename; 32 | return 2; 33 | } 34 | bool need_upgrade = SolverNeedsTypeUpgrade(solver_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeSolverAsNeeded(input_filename, &solver_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | // Save new format prototxt. 47 | WriteProtoToTextFile(solver_param, argv[2]); 48 | 49 | LOG(INFO) << "Wrote upgraded SolverParameter text proto to " << argv[2]; 50 | return !success; 51 | } 52 | -------------------------------------------------------------------------------- /include/caffe/layers/dummy_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_DUMMY_DATA_LAYER_HPP_ 2 | #define CAFFE_DUMMY_DATA_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/filler.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/proto/caffe.pb.h" 10 | 11 | namespace caffe { 12 | 13 | /** 14 | * @brief Provides data to the Net generated by a Filler. 15 | * 16 | * TODO(dox): thorough documentation for Forward and proto params. 17 | */ 18 | template 19 | class DummyDataLayer : public Layer { 20 | public: 21 | explicit DummyDataLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | // Data layers should be shared by multiple solvers in parallel 26 | virtual inline bool ShareInParallel() const { return true; } 27 | // Data layers have no bottoms, so reshaping is trivial. 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top) {} 30 | 31 | virtual inline const char* type() const { return "DummyData"; } 32 | virtual inline int ExactNumBottomBlobs() const { return 0; } 33 | virtual inline int MinTopBlobs() const { return 1; } 34 | 35 | protected: 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom) {} 40 | virtual void Backward_gpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom) {} 42 | 43 | vector > > fillers_; 44 | vector refill_; 45 | }; 46 | 47 | } // namespace caffe 48 | 49 | #endif // CAFFE_DUMMY_DATA_LAYER_HPP_ 50 | -------------------------------------------------------------------------------- /cmake/Modules/FindOpenBLAS.cmake: -------------------------------------------------------------------------------- 1 | 2 | 3 | SET(Open_BLAS_INCLUDE_SEARCH_PATHS 4 | /usr/include 5 | /usr/include/openblas 6 | /usr/include/openblas-base 7 | /usr/local/include 8 | /usr/local/include/openblas 9 | /usr/local/include/openblas-base 10 | /opt/OpenBLAS/include 11 | $ENV{OpenBLAS_HOME} 12 | $ENV{OpenBLAS_HOME}/include 13 | ) 14 | 15 | SET(Open_BLAS_LIB_SEARCH_PATHS 16 | /lib/ 17 | /lib/openblas-base 18 | /lib64/ 19 | /usr/lib 20 | /usr/lib/openblas-base 21 | /usr/lib64 22 | /usr/local/lib 23 | /usr/local/lib64 24 | /opt/OpenBLAS/lib 25 | $ENV{OpenBLAS}cd 26 | $ENV{OpenBLAS}/lib 27 | $ENV{OpenBLAS_HOME} 28 | $ENV{OpenBLAS_HOME}/lib 29 | ) 30 | 31 | FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS}) 32 | FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS}) 33 | 34 | SET(OpenBLAS_FOUND ON) 35 | 36 | # Check include files 37 | IF(NOT OpenBLAS_INCLUDE_DIR) 38 | SET(OpenBLAS_FOUND OFF) 39 | MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off") 40 | ENDIF() 41 | 42 | # Check libraries 43 | IF(NOT OpenBLAS_LIB) 44 | SET(OpenBLAS_FOUND OFF) 45 | MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off") 46 | ENDIF() 47 | 48 | IF (OpenBLAS_FOUND) 49 | IF (NOT OpenBLAS_FIND_QUIETLY) 50 | MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}") 51 | MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}") 52 | ENDIF (NOT OpenBLAS_FIND_QUIETLY) 53 | ELSE (OpenBLAS_FOUND) 54 | IF (OpenBLAS_FIND_REQUIRED) 55 | MESSAGE(FATAL_ERROR "Could not find OpenBLAS") 56 | ENDIF (OpenBLAS_FIND_REQUIRED) 57 | ENDIF (OpenBLAS_FOUND) 58 | 59 | MARK_AS_ADVANCED( 60 | OpenBLAS_INCLUDE_DIR 61 | OpenBLAS_LIB 62 | OpenBLAS 63 | ) 64 | 65 | -------------------------------------------------------------------------------- /scripts/deploy_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Publish documentation to the gh-pages site. 3 | 4 | # The remote for pushing the docs (defaults to origin). 5 | # This is where you will submit the PR to BVLC:gh-pages from. 6 | REMOTE=${1:-origin} 7 | 8 | echo "Generating docs and pushing to $REMOTE:gh-pages..." 9 | echo "To build and view docs when not on master, simply do 'jekyll serve -s docs'." 10 | echo 11 | 12 | REMOTE_URL=`git config --get remote.${REMOTE}.url` 13 | BRANCH=`git rev-parse --abbrev-ref HEAD` 14 | MSG=`git log --oneline -1` 15 | 16 | if [[ $BRANCH = 'master' ]]; then 17 | # Find the docs dir, no matter where the script is called 18 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 19 | DOCS_SITE_DIR=$DIR/../docs/_site 20 | 21 | # Make sure that docs/_site tracks remote:gh-pages. 22 | # If not, then we make a new repo and check out just that branch. 23 | mkdir -p $DOCS_SITE_DIR 24 | cd $DOCS_SITE_DIR 25 | SITE_REMOTE_URL=`git config --get remote.${REMOTE}.url` 26 | SITE_BRANCH=`git rev-parse --abbrev-ref HEAD` 27 | 28 | echo $SITE_REMOTE_URL 29 | echo $SITE_BRANCH 30 | echo `pwd` 31 | 32 | if [[ ( $SITE_REMOTE_URL = $REMOTE_URL ) && ( $SITE_BRANCH = 'gh-pages' ) ]]; then 33 | echo "Confirmed that docs/_site has same remote as main repo, and is on gh-pages." 34 | else 35 | echo "Checking out $REMOTE:gh-pages into docs/_site (will take a little time)." 36 | git init . 37 | git remote add -t gh-pages -f $REMOTE $REMOTE_URL 38 | git checkout gh-pages 39 | fi 40 | 41 | echo "Building the site into docs/_site, and committing the changes." 42 | jekyll build -s .. -d . 43 | git add --all . 44 | git commit -m "$MSG" 45 | git push $REMOTE gh-pages 46 | 47 | echo "All done!" 48 | cd ../.. 49 | else echo "You must run this deployment script from the 'master' branch." 50 | fi 51 | -------------------------------------------------------------------------------- /cmake/Modules/FindAtlas.cmake: -------------------------------------------------------------------------------- 1 | # Find the Atlas (and Lapack) libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Atlas_ROOT_DIR: Base directory where all Atlas components are found 5 | # 6 | # The following are set after configuration is done: 7 | # Atlas_FOUND 8 | # Atlas_INCLUDE_DIRS 9 | # Atlas_LIBRARIES 10 | # Atlas_LIBRARYRARY_DIRS 11 | 12 | set(Atlas_INCLUDE_SEARCH_PATHS 13 | /usr/include/atlas 14 | /usr/include/atlas-base 15 | $ENV{Atlas_ROOT_DIR} 16 | $ENV{Atlas_ROOT_DIR}/include 17 | ) 18 | 19 | set(Atlas_LIB_SEARCH_PATHS 20 | /usr/lib/atlas 21 | /usr/lib/atlas-base 22 | $ENV{Atlas_ROOT_DIR} 23 | $ENV{Atlas_ROOT_DIR}/lib 24 | ) 25 | 26 | find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 27 | find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 28 | 29 | find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) 30 | find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 31 | find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 32 | 33 | set(LOOKED_FOR 34 | Atlas_CBLAS_INCLUDE_DIR 35 | Atlas_CLAPACK_INCLUDE_DIR 36 | 37 | Atlas_CBLAS_LIBRARY 38 | Atlas_BLAS_LIBRARY 39 | Atlas_LAPACK_LIBRARY 40 | ) 41 | 42 | include(FindPackageHandleStandardArgs) 43 | find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR}) 44 | 45 | if(ATLAS_FOUND) 46 | set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR}) 47 | set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY}) 48 | mark_as_advanced(${LOOKED_FOR}) 49 | 50 | message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})") 51 | endif(ATLAS_FOUND) 52 | 53 | -------------------------------------------------------------------------------- /matlab/+caffe/io.m: -------------------------------------------------------------------------------- 1 | classdef io 2 | % a class for input and output functions 3 | 4 | methods (Static) 5 | function im_data = load_image(im_file) 6 | % im_data = load_image(im_file) 7 | % load an image from disk into Caffe-supported data format 8 | % switch channels from RGB to BGR, make width the fastest dimension 9 | % and convert to single 10 | % returns im_data in W x H x C. For colored images, C = 3 in BGR 11 | % channels, and for grayscale images, C = 1 12 | CHECK(ischar(im_file), 'im_file must be a string'); 13 | CHECK_FILE_EXIST(im_file); 14 | im_data = imread(im_file); 15 | % permute channels from RGB to BGR for colored images 16 | if size(im_data, 3) == 3 17 | im_data = im_data(:, :, [3, 2, 1]); 18 | end 19 | % flip width and height to make width the fastest dimension 20 | im_data = permute(im_data, [2, 1, 3]); 21 | % convert from uint8 to single 22 | im_data = single(im_data); 23 | end 24 | function mean_data = read_mean(mean_proto_file) 25 | % mean_data = read_mean(mean_proto_file) 26 | % read image mean data from binaryproto file 27 | % returns mean_data in W x H x C with BGR channels 28 | CHECK(ischar(mean_proto_file), 'mean_proto_file must be a string'); 29 | CHECK_FILE_EXIST(mean_proto_file); 30 | mean_data = caffe_('read_mean', mean_proto_file); 31 | end 32 | function write_mean(mean_data, mean_proto_file) 33 | % write_mean(mean_data, mean_proto_file) 34 | % write image mean data to binaryproto file 35 | % mean_data should be W x H x C with BGR channels 36 | CHECK(ischar(mean_proto_file), 'mean_proto_file must be a string'); 37 | CHECK(isa(mean_data, 'single'), 'mean_data must be a SINGLE matrix'); 38 | caffe_('write_mean', mean_data, mean_proto_file); 39 | end 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /include/caffe/layers/normalize_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MVN_LAYER_HPP_ 2 | #define CAFFE_MVN_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Normalizes the input to have L_p norm of 1 with scale learnable. 14 | * 15 | * TODO(weiliu89): thorough documentation for Forward, Backward, and proto params. 16 | */ 17 | template 18 | class NormalizeLayer : public Layer { 19 | public: 20 | explicit NormalizeLayer(const LayerParameter& param) 21 | : Layer(param) {} 22 | virtual void LayerSetUp(const vector*>& bottom, 23 | const vector*>& top); 24 | virtual void Reshape(const vector*>& bottom, 25 | const vector*>& top); 26 | 27 | virtual inline const char* type() const { return "Normalize"; } 28 | virtual inline int ExactNumBottomBlobs() const { return 1; } 29 | virtual inline int ExactNumTopBlobs() const { return 1; } 30 | 31 | protected: 32 | virtual void Forward_cpu(const vector*>& bottom, 33 | const vector*>& top); 34 | virtual void Forward_gpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Backward_cpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom); 38 | virtual void Backward_gpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom); 40 | 41 | Blob norm_; 42 | Blob sum_channel_multiplier_, sum_spatial_multiplier_; 43 | Blob buffer_, buffer_channel_, buffer_spatial_; 44 | bool across_spatial_; 45 | bool channel_shared_; 46 | Dtype eps_; 47 | }; 48 | 49 | } // namespace caffe 50 | 51 | #endif // CAFFE_MVN_LAYER_HPP_ 52 | -------------------------------------------------------------------------------- /docs/install_yum.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation: RHEL / Fedora / CentOS 3 | --- 4 | 5 | # RHEL / Fedora / CentOS Installation 6 | 7 | **General dependencies** 8 | 9 | sudo yum install protobuf-devel leveldb-devel snappy-devel opencv-devel boost-devel hdf5-devel 10 | 11 | **Remaining dependencies, recent OS** 12 | 13 | sudo yum install gflags-devel glog-devel lmdb-devel 14 | 15 | **Remaining dependencies, if not found** 16 | 17 | # glog 18 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz 19 | tar zxvf glog-0.3.3.tar.gz 20 | cd glog-0.3.3 21 | ./configure 22 | make && make install 23 | # gflags 24 | wget https://github.com/schuhschuh/gflags/archive/master.zip 25 | unzip master.zip 26 | cd gflags-master 27 | mkdir build && cd build 28 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 29 | make && make install 30 | # lmdb 31 | git clone https://github.com/LMDB/lmdb 32 | cd lmdb/libraries/liblmdb 33 | make && make install 34 | 35 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. 36 | 37 | **CUDA**: Install via the NVIDIA package instead of `yum` to be certain of the library and driver versions. 38 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. 39 | + CentOS/RHEL/Fedora: 40 | 41 | **BLAS**: install ATLAS by `sudo yum install atlas-devel` or install OpenBLAS or MKL for better CPU performance. For the Makefile build, uncomment and set `BLAS_LIB` accordingly as ATLAS is usually installed under `/usr/lib[64]/atlas`). 42 | 43 | **Python** (optional): if you use the default Python you will need to `sudo yum install` the `python-devel` package to have the Python headers for building the pycaffe wrapper. 44 | 45 | Continue with [compilation](installation.html#compilation). 46 | -------------------------------------------------------------------------------- /src/caffe/internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/internal_thread.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | InternalThread::~InternalThread() { 10 | StopInternalThread(); 11 | } 12 | 13 | bool InternalThread::is_started() const { 14 | return thread_ && thread_->joinable(); 15 | } 16 | 17 | bool InternalThread::must_stop() { 18 | return thread_ && thread_->interruption_requested(); 19 | } 20 | 21 | void InternalThread::StartInternalThread() { 22 | CHECK(!is_started()) << "Threads should persist and not be restarted."; 23 | 24 | int device = 0; 25 | #ifndef CPU_ONLY 26 | CUDA_CHECK(cudaGetDevice(&device)); 27 | #endif 28 | Caffe::Brew mode = Caffe::mode(); 29 | int rand_seed = caffe_rng_rand(); 30 | int solver_count = Caffe::solver_count(); 31 | bool root_solver = Caffe::root_solver(); 32 | 33 | try { 34 | thread_.reset(new boost::thread(&InternalThread::entry, this, device, mode, 35 | rand_seed, solver_count, root_solver)); 36 | } catch (std::exception& e) { 37 | LOG(FATAL) << "Thread exception: " << e.what(); 38 | } 39 | } 40 | 41 | void InternalThread::entry(int device, Caffe::Brew mode, int rand_seed, 42 | int solver_count, bool root_solver) { 43 | #ifndef CPU_ONLY 44 | CUDA_CHECK(cudaSetDevice(device)); 45 | #endif 46 | Caffe::set_mode(mode); 47 | Caffe::set_random_seed(rand_seed); 48 | Caffe::set_solver_count(solver_count); 49 | Caffe::set_root_solver(root_solver); 50 | 51 | InternalThreadEntry(); 52 | } 53 | 54 | void InternalThread::StopInternalThread() { 55 | if (is_started()) { 56 | thread_->interrupt(); 57 | try { 58 | thread_->join(); 59 | } catch (boost::thread_interrupted&) { 60 | } catch (std::exception& e) { 61 | LOG(FATAL) << "Thread exception: " << e.what(); 62 | } 63 | } 64 | } 65 | 66 | } // namespace caffe 67 | -------------------------------------------------------------------------------- /cmake/Modules/FindLevelDB.cmake: -------------------------------------------------------------------------------- 1 | # - Find LevelDB 2 | # 3 | # LevelDB_INCLUDES - List of LevelDB includes 4 | # LevelDB_LIBRARIES - List of libraries when using LevelDB. 5 | # LevelDB_FOUND - True if LevelDB found. 6 | 7 | # Look for the header file. 8 | find_path(LevelDB_INCLUDE NAMES leveldb/db.h 9 | PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include 10 | DOC "Path in which the file leveldb/db.h is located." ) 11 | 12 | # Look for the library. 13 | find_library(LevelDB_LIBRARY NAMES leveldb 14 | PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib 15 | DOC "Path to leveldb library." ) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY) 19 | 20 | if(LEVELDB_FOUND) 21 | message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})") 22 | set(LevelDB_INCLUDES ${LevelDB_INCLUDE}) 23 | set(LevelDB_LIBRARIES ${LevelDB_LIBRARY}) 24 | mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY) 25 | 26 | if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h") 27 | file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines 28 | REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;") 29 | 30 | foreach(__line ${__version_lines}) 31 | if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);") 32 | set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1}) 33 | elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);") 34 | set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1}) 35 | endif() 36 | endforeach() 37 | 38 | if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR) 39 | set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}") 40 | endif() 41 | 42 | caffe_clear_vars(__line __version_lines) 43 | endif() 44 | endif() 45 | -------------------------------------------------------------------------------- /include/caffe/layers/embed_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_EMBED_LAYER_HPP_ 2 | #define CAFFE_EMBED_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief A layer for learning "embeddings" of one-hot vector input. 14 | * Equivalent to an InnerProductLayer with one-hot vectors as input, but 15 | * for efficiency the input is the "hot" index of each column itself. 16 | * 17 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 18 | */ 19 | template 20 | class EmbedLayer : public Layer { 21 | public: 22 | explicit EmbedLayer(const LayerParameter& param) 23 | : Layer(param) {} 24 | virtual void LayerSetUp(const vector*>& bottom, 25 | const vector*>& top); 26 | virtual void Reshape(const vector*>& bottom, 27 | const vector*>& top); 28 | 29 | virtual inline const char* type() const { return "Embed"; } 30 | virtual inline int ExactNumBottomBlobs() const { return 1; } 31 | virtual inline int ExactNumTopBlobs() const { return 1; } 32 | 33 | protected: 34 | virtual void Forward_cpu(const vector*>& bottom, 35 | const vector*>& top); 36 | virtual void Forward_gpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Backward_cpu(const vector*>& top, 39 | const vector& propagate_down, const vector*>& bottom); 40 | virtual void Backward_gpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom); 42 | 43 | int M_; 44 | int K_; 45 | int N_; 46 | bool bias_term_; 47 | Blob bias_multiplier_; 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_EMBED_LAYER_HPP_ 53 | -------------------------------------------------------------------------------- /src/gtest/gtest_main.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2006, Google Inc. 2 | // All rights reserved. 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | #include 31 | 32 | #include "gtest/gtest.h" 33 | 34 | GTEST_API_ int main(int argc, char **argv) { 35 | std::cout << "Running main() from gtest_main.cc\n"; 36 | 37 | testing::InitGoogleTest(&argc, argv); 38 | return RUN_ALL_TESTS(); 39 | } 40 | -------------------------------------------------------------------------------- /python/caffe/test/test_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | 4 | import caffe 5 | 6 | class TestBlobProtoToArray(unittest.TestCase): 7 | 8 | def test_old_format(self): 9 | data = np.zeros((10,10)) 10 | blob = caffe.proto.caffe_pb2.BlobProto() 11 | blob.data.extend(list(data.flatten())) 12 | shape = (1,1,10,10) 13 | blob.num, blob.channels, blob.height, blob.width = shape 14 | 15 | arr = caffe.io.blobproto_to_array(blob) 16 | self.assertEqual(arr.shape, shape) 17 | 18 | def test_new_format(self): 19 | data = np.zeros((10,10)) 20 | blob = caffe.proto.caffe_pb2.BlobProto() 21 | blob.data.extend(list(data.flatten())) 22 | blob.shape.dim.extend(list(data.shape)) 23 | 24 | arr = caffe.io.blobproto_to_array(blob) 25 | self.assertEqual(arr.shape, data.shape) 26 | 27 | def test_no_shape(self): 28 | data = np.zeros((10,10)) 29 | blob = caffe.proto.caffe_pb2.BlobProto() 30 | blob.data.extend(list(data.flatten())) 31 | 32 | with self.assertRaises(ValueError): 33 | caffe.io.blobproto_to_array(blob) 34 | 35 | def test_scalar(self): 36 | data = np.ones((1)) * 123 37 | blob = caffe.proto.caffe_pb2.BlobProto() 38 | blob.data.extend(list(data.flatten())) 39 | 40 | arr = caffe.io.blobproto_to_array(blob) 41 | self.assertEqual(arr, 123) 42 | 43 | 44 | class TestArrayToDatum(unittest.TestCase): 45 | 46 | def test_label_none_size(self): 47 | # Set label 48 | d1 = caffe.io.array_to_datum( 49 | np.ones((10,10,3)), label=1) 50 | # Don't set label 51 | d2 = caffe.io.array_to_datum( 52 | np.ones((10,10,3))) 53 | # Not setting the label should result in a smaller object 54 | self.assertGreater( 55 | len(d1.SerializeToString()), 56 | len(d2.SerializeToString())) 57 | -------------------------------------------------------------------------------- /include/caffe/layers/window_data_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_WINDOW_DATA_LAYER_HPP_ 2 | #define CAFFE_WINDOW_DATA_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/data_transformer.hpp" 10 | #include "caffe/internal_thread.hpp" 11 | #include "caffe/layer.hpp" 12 | #include "caffe/layers/base_data_layer.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | 15 | namespace caffe { 16 | 17 | /** 18 | * @brief Provides data to the Net from windows of images files, specified 19 | * by a window data file. 20 | * 21 | * TODO(dox): thorough documentation for Forward and proto params. 22 | */ 23 | template 24 | class WindowDataLayer : public BasePrefetchingDataLayer { 25 | public: 26 | explicit WindowDataLayer(const LayerParameter& param) 27 | : BasePrefetchingDataLayer(param) {} 28 | virtual ~WindowDataLayer(); 29 | virtual void DataLayerSetUp(const vector*>& bottom, 30 | const vector*>& top); 31 | 32 | virtual inline const char* type() const { return "WindowData"; } 33 | virtual inline int ExactNumBottomBlobs() const { return 0; } 34 | virtual inline int ExactNumTopBlobs() const { return 2; } 35 | 36 | protected: 37 | virtual unsigned int PrefetchRand(); 38 | virtual void load_batch(Batch* batch); 39 | 40 | shared_ptr prefetch_rng_; 41 | vector > > image_database_; 42 | enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; 43 | vector > fg_windows_; 44 | vector > bg_windows_; 45 | Blob data_mean_; 46 | vector mean_values_; 47 | bool has_mean_file_; 48 | bool has_mean_values_; 49 | bool cache_images_; 50 | vector > image_database_cache_; 51 | }; 52 | 53 | } // namespace caffe 54 | 55 | #endif // CAFFE_WINDOW_DATA_LAYER_HPP_ 56 | -------------------------------------------------------------------------------- /cmake/Modules/FindMatlabMex.cmake: -------------------------------------------------------------------------------- 1 | # This module looks for MatlabMex compiler 2 | # Defines variables: 3 | # Matlab_DIR - Matlab root dir 4 | # Matlab_mex - path to mex compiler 5 | # Matlab_mexext - path to mexext 6 | 7 | if(MSVC) 8 | foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7") 9 | get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE) 10 | if(__matlab_root) 11 | break() 12 | endif() 13 | endforeach() 14 | endif() 15 | 16 | if(APPLE) 17 | foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a") 18 | if(EXISTS /Applications/MATLAB_${__ver}.app) 19 | set(__matlab_root /Applications/MATLAB_${__ver}.app) 20 | break() 21 | endif() 22 | endforeach() 23 | endif() 24 | 25 | if(UNIX) 26 | execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE 27 | OUTPUT_VARIABLE __out RESULT_VARIABLE __res) 28 | 29 | if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing 30 | execute_process(COMMAND which matlab COMMAND xargs readlink 31 | COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n 32 | OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE) 33 | endif() 34 | endif() 35 | 36 | 37 | find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root} 38 | DOC "Matlab directory" NO_DEFAULT_PATH) 39 | 40 | find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) 41 | find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) 42 | 43 | include(FindPackageHandleStandardArgs) 44 | find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext) 45 | 46 | if(MATLABMEX_FOUND) 47 | mark_as_advanced(Matlab_mex Matlab_mexext) 48 | endif() 49 | -------------------------------------------------------------------------------- /include/caffe/layers/inner_product_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INNER_PRODUCT_LAYER_HPP_ 2 | #define CAFFE_INNER_PRODUCT_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Also known as a "fully-connected" layer, computes an inner product 14 | * with a set of learned weights, and (optionally) adds biases. 15 | * 16 | * TODO(dox): thorough documentation for Forward, Backward, and proto params. 17 | */ 18 | template 19 | class InnerProductLayer : public Layer { 20 | public: 21 | explicit InnerProductLayer(const LayerParameter& param) 22 | : Layer(param) {} 23 | virtual void LayerSetUp(const vector*>& bottom, 24 | const vector*>& top); 25 | virtual void Reshape(const vector*>& bottom, 26 | const vector*>& top); 27 | 28 | virtual inline const char* type() const { return "InnerProduct"; } 29 | virtual inline int ExactNumBottomBlobs() const { return 1; } 30 | virtual inline int ExactNumTopBlobs() const { return 1; } 31 | 32 | protected: 33 | virtual void Forward_cpu(const vector*>& bottom, 34 | const vector*>& top); 35 | virtual void Forward_gpu(const vector*>& bottom, 36 | const vector*>& top); 37 | virtual void Backward_cpu(const vector*>& top, 38 | const vector& propagate_down, const vector*>& bottom); 39 | virtual void Backward_gpu(const vector*>& top, 40 | const vector& propagate_down, const vector*>& bottom); 41 | 42 | int M_; 43 | int K_; 44 | int N_; 45 | bool bias_term_; 46 | Blob bias_multiplier_; 47 | bool transpose_; ///< if true, assume transposed weights 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_INNER_PRODUCT_LAYER_HPP_ 53 | -------------------------------------------------------------------------------- /cmake/External/glog.cmake: -------------------------------------------------------------------------------- 1 | # glog depends on gflags 2 | include("cmake/External/gflags.cmake") 3 | 4 | if (NOT __GLOG_INCLUDED) 5 | set(__GLOG_INCLUDED TRUE) 6 | 7 | # try the system-wide glog first 8 | find_package(Glog) 9 | if (GLOG_FOUND) 10 | set(GLOG_EXTERNAL FALSE) 11 | else() 12 | # fetch and build glog from github 13 | 14 | # build directory 15 | set(glog_PREFIX ${CMAKE_BINARY_DIR}/external/glog-prefix) 16 | # install directory 17 | set(glog_INSTALL ${CMAKE_BINARY_DIR}/external/glog-install) 18 | 19 | # we build glog statically, but want to link it into the caffe shared library 20 | # this requires position-independent code 21 | if (UNIX) 22 | set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC") 23 | endif() 24 | 25 | set(GLOG_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}) 26 | set(GLOG_C_FLAGS ${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}) 27 | 28 | # depend on gflags if we're also building it 29 | if (GFLAGS_EXTERNAL) 30 | set(GLOG_DEPENDS gflags) 31 | endif() 32 | 33 | ExternalProject_Add(glog 34 | DEPENDS ${GLOG_DEPENDS} 35 | PREFIX ${glog_PREFIX} 36 | GIT_REPOSITORY "https://github.com/google/glog" 37 | GIT_TAG "v0.3.4" 38 | UPDATE_COMMAND "" 39 | INSTALL_DIR ${gflags_INSTALL} 40 | CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/.. 41 | LOG_DOWNLOAD 1 42 | LOG_CONFIGURE 1 43 | LOG_INSTALL 1 44 | ) 45 | 46 | set(GLOG_FOUND TRUE) 47 | set(GLOG_INCLUDE_DIRS ${glog_INSTALL}/include) 48 | set(GLOG_LIBRARIES ${GFLAGS_LIBRARIES} ${glog_INSTALL}/lib/libglog.a) 49 | set(GLOG_LIBRARY_DIRS ${glog_INSTALL}/lib) 50 | set(GLOG_EXTERNAL TRUE) 51 | 52 | list(APPEND external_project_dependencies glog) 53 | endif() 54 | 55 | endif() 56 | 57 | -------------------------------------------------------------------------------- /include/caffe/layers/python_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_PYTHON_LAYER_HPP_ 2 | #define CAFFE_PYTHON_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | 9 | namespace bp = boost::python; 10 | 11 | namespace caffe { 12 | 13 | template 14 | class PythonLayer : public Layer { 15 | public: 16 | PythonLayer(PyObject* self, const LayerParameter& param) 17 | : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } 18 | 19 | virtual void LayerSetUp(const vector*>& bottom, 20 | const vector*>& top) { 21 | // Disallow PythonLayer in MultiGPU training stage, due to GIL issues 22 | // Details: https://github.com/BVLC/caffe/issues/2936 23 | if (this->phase_ == TRAIN && Caffe::solver_count() > 1 24 | && !ShareInParallel()) { 25 | LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training"; 26 | } 27 | self_.attr("param_str") = bp::str( 28 | this->layer_param_.python_param().param_str()); 29 | self_.attr("setup")(bottom, top); 30 | } 31 | virtual void Reshape(const vector*>& bottom, 32 | const vector*>& top) { 33 | self_.attr("reshape")(bottom, top); 34 | } 35 | 36 | virtual inline bool ShareInParallel() const { 37 | return this->layer_param_.python_param().share_in_parallel(); 38 | } 39 | 40 | virtual inline const char* type() const { return "Python"; } 41 | 42 | protected: 43 | virtual void Forward_cpu(const vector*>& bottom, 44 | const vector*>& top) { 45 | self_.attr("forward")(bottom, top); 46 | } 47 | virtual void Backward_cpu(const vector*>& top, 48 | const vector& propagate_down, const vector*>& bottom) { 49 | self_.attr("backward")(top, propagate_down, bottom); 50 | } 51 | 52 | private: 53 | bp::object self_; 54 | }; 55 | 56 | } // namespace caffe 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /src/caffe/layers/euclidean_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layers/euclidean_loss_layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void EuclideanLossLayer::Reshape( 10 | const vector*>& bottom, const vector*>& top) { 11 | LossLayer::Reshape(bottom, top); 12 | CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) 13 | << "Inputs must have the same dimension."; 14 | diff_.ReshapeLike(*bottom[0]); 15 | } 16 | 17 | template 18 | void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | int count = bottom[0]->count(); 21 | caffe_sub( 22 | count, 23 | bottom[0]->cpu_data(), 24 | bottom[1]->cpu_data(), 25 | diff_.mutable_cpu_data()); 26 | Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); 27 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 28 | top[0]->mutable_cpu_data()[0] = loss; 29 | } 30 | 31 | template 32 | void EuclideanLossLayer::Backward_cpu(const vector*>& top, 33 | const vector& propagate_down, const vector*>& bottom) { 34 | for (int i = 0; i < 2; ++i) { 35 | if (propagate_down[i]) { 36 | const Dtype sign = (i == 0) ? 1 : -1; 37 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 38 | caffe_cpu_axpby( 39 | bottom[i]->count(), // count 40 | alpha, // alpha 41 | diff_.cpu_data(), // a 42 | Dtype(0), // beta 43 | bottom[i]->mutable_cpu_diff()); // b 44 | } 45 | } 46 | } 47 | 48 | #ifdef CPU_ONLY 49 | STUB_GPU(EuclideanLossLayer); 50 | #endif 51 | 52 | INSTANTIATE_CLASS(EuclideanLossLayer); 53 | REGISTER_LAYER_CLASS(EuclideanLoss); 54 | 55 | } // namespace caffe 56 | -------------------------------------------------------------------------------- /cmake/Misc.cmake: -------------------------------------------------------------------------------- 1 | # ---[ Configuration types 2 | set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE) 3 | mark_as_advanced(CMAKE_CONFIGURATION_TYPES) 4 | 5 | if(DEFINED CMAKE_BUILD_TYPE) 6 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES}) 7 | endif() 8 | 9 | # --[ If user doesn't specify build type then assume release 10 | if("${CMAKE_BUILD_TYPE}" STREQUAL "") 11 | set(CMAKE_BUILD_TYPE Release) 12 | endif() 13 | 14 | if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 15 | set(CMAKE_COMPILER_IS_CLANGXX TRUE) 16 | endif() 17 | 18 | # ---[ Solution folders 19 | caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) ) 20 | 21 | if(USE_PROJECT_FOLDERS) 22 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 23 | set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets") 24 | endif() 25 | 26 | # ---[ Install options 27 | if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) 28 | set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE) 29 | endif() 30 | 31 | # ---[ RPATH settings 32 | set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath") 33 | set(CMAKE_MACOSX_RPATH TRUE) 34 | 35 | list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir) 36 | if(${__is_systtem_dir} STREQUAL -1) 37 | set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib) 38 | endif() 39 | 40 | # ---[ Funny target 41 | if(UNIX OR APPLE) 42 | add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build" 43 | COMMENT "Adding symlink: /build -> ${PROJECT_BINARY_DIR}" ) 44 | endif() 45 | 46 | # ---[ Set debug postfix 47 | set(Caffe_DEBUG_POSTFIX "-d") 48 | 49 | set(Caffe_POSTFIX "") 50 | if(CMAKE_BUILD_TYPE MATCHES "Debug") 51 | set(Caffe_POSTFIX ${Caffe_DEBUG_POSTFIX}) 52 | endif() 53 | -------------------------------------------------------------------------------- /docs/install_apt.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation: Ubuntu 3 | --- 4 | 5 | # Ubuntu Installation 6 | 7 | **General dependencies** 8 | 9 | sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler 10 | sudo apt-get install --no-install-recommends libboost-all-dev 11 | 12 | **CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. 13 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. 14 | This can be skipped for CPU-only installation. 15 | 16 | **BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. 17 | 18 | **Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. 19 | 20 | **Remaining dependencies, 14.04** 21 | 22 | Everything is packaged in 14.04. 23 | 24 | sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev 25 | 26 | **Remaining dependencies, 12.04** 27 | 28 | These dependencies need manual installation in 12.04. 29 | 30 | # glog 31 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz 32 | tar zxvf glog-0.3.3.tar.gz 33 | cd glog-0.3.3 34 | ./configure 35 | make && make install 36 | # gflags 37 | wget https://github.com/schuhschuh/gflags/archive/master.zip 38 | unzip master.zip 39 | cd gflags-master 40 | mkdir build && cd build 41 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 42 | make && make install 43 | # lmdb 44 | git clone https://github.com/LMDB/lmdb 45 | cd lmdb/libraries/liblmdb 46 | make && make install 47 | 48 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. 49 | 50 | Continue with [compilation](installation.html#compilation). 51 | -------------------------------------------------------------------------------- /include/caffe/layers/bias_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_BIAS_LAYER_HPP_ 2 | #define CAFFE_BIAS_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | /** 13 | * @brief Computes a sum of two input Blobs, with the shape of the 14 | * latter Blob "broadcast" to match the shape of the former. 15 | * Equivalent to tiling the latter Blob, then computing the elementwise 16 | * sum. 17 | * 18 | * The second input may be omitted, in which case it's learned as a parameter 19 | * of the layer. 20 | */ 21 | template 22 | class BiasLayer : public Layer { 23 | public: 24 | explicit BiasLayer(const LayerParameter& param) 25 | : Layer(param) {} 26 | virtual void LayerSetUp(const vector*>& bottom, 27 | const vector*>& top); 28 | virtual void Reshape(const vector*>& bottom, 29 | const vector*>& top); 30 | 31 | virtual inline const char* type() const { return "Bias"; } 32 | virtual inline int MinBottomBlobs() const { return 1; } 33 | virtual inline int MaxBottomBlobs() const { return 2; } 34 | virtual inline int ExactNumTopBlobs() const { return 1; } 35 | 36 | virtual void Forward_cpu(const vector*>& bottom, 37 | const vector*>& top); 38 | virtual void Forward_gpu(const vector*>& bottom, 39 | const vector*>& top); 40 | virtual void Backward_cpu(const vector*>& top, 41 | const vector& propagate_down, const vector*>& bottom); 42 | virtual void Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, const vector*>& bottom); 44 | 45 | private: 46 | Blob bias_multiplier_; 47 | int outer_dim_, bias_dim_, inner_dim_, dim_; 48 | }; 49 | 50 | 51 | 52 | } // namespace caffe 53 | 54 | #endif // CAFFE_BIAS_LAYER_HPP_ 55 | --------------------------------------------------------------------------------