├── LICENSE.txt ├── LICENSE_FRCNN.txt ├── README.md ├── caffe-fast-rcnn ├── .Doxyfile ├── .travis.yml ├── CMakeLists.txt ├── CONTRIBUTORS.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── Makefile.config ├── README.md ├── caffe.cloc ├── cmake │ ├── ConfigGen.cmake │ ├── Cuda.cmake │ ├── Dependencies.cmake │ ├── Misc.cmake │ ├── Modules │ │ ├── FindAtlas.cmake │ │ ├── FindGFlags.cmake │ │ ├── FindGlog.cmake │ │ ├── FindLAPACK.cmake │ │ ├── FindLMDB.cmake │ │ ├── FindLevelDB.cmake │ │ ├── FindMKL.cmake │ │ ├── FindMatlabMex.cmake │ │ ├── FindNumPy.cmake │ │ ├── FindOpenBLAS.cmake │ │ ├── FindSnappy.cmake │ │ └── FindvecLib.cmake │ ├── ProtoBuf.cmake │ ├── Summary.cmake │ ├── Targets.cmake │ ├── Templates │ │ ├── CaffeConfig.cmake.in │ │ ├── CaffeConfigVersion.cmake.in │ │ └── caffe_config.h.in │ ├── Utils.cmake │ └── lint.cmake ├── docs │ ├── CMakeLists.txt │ ├── CNAME │ ├── README.md │ ├── _config.yml │ ├── _layouts │ │ └── default.html │ ├── development.md │ ├── images │ │ ├── GitHub-Mark-64px.png │ │ └── caffeine-icon.png │ ├── index.md │ ├── install_apt.md │ ├── install_osx.md │ ├── install_yum.md │ ├── installation.md │ ├── model_zoo.md │ ├── performance_hardware.md │ ├── stylesheets │ │ ├── pygment_trac.css │ │ ├── reset.css │ │ └── styles.css │ └── tutorial │ │ ├── convolution.md │ │ ├── data.md │ │ ├── fig │ │ ├── .gitignore │ │ ├── backward.jpg │ │ ├── forward.jpg │ │ ├── forward_backward.png │ │ ├── layer.jpg │ │ └── logreg.jpg │ │ ├── forward_backward.md │ │ ├── index.md │ │ ├── interfaces.md │ │ ├── layers.md │ │ ├── loss.md │ │ ├── net_layer_blob.md │ │ └── solver.md ├── include │ └── caffe │ │ ├── blob.hpp │ │ ├── caffe.hpp │ │ ├── common.hpp │ │ ├── common_layers.hpp │ │ ├── data_layers.hpp │ │ ├── data_transformer.hpp │ │ ├── fast_rcnn_layers.hpp │ │ ├── filler.hpp │ │ ├── internal_thread.hpp │ │ ├── layer.hpp │ │ ├── layer_factory.hpp │ │ ├── loss_layers.hpp │ │ ├── net.hpp │ │ ├── neuron_layers.hpp │ │ ├── python_layer.hpp │ │ ├── solver.hpp │ │ ├── syncedmem.hpp │ │ ├── test │ │ ├── test_caffe_main.hpp │ │ └── test_gradient_check_util.hpp │ │ ├── util │ │ ├── benchmark.hpp │ │ ├── cudnn.hpp │ │ ├── db.hpp │ │ ├── device_alternate.hpp │ │ ├── im2col.hpp │ │ ├── insert_splits.hpp │ │ ├── io.hpp │ │ ├── math_functions.hpp │ │ ├── mkl_alternate.hpp │ │ ├── rng.hpp │ │ └── upgrade_proto.hpp │ │ └── vision_layers.hpp ├── python │ ├── CMakeLists.txt │ ├── caffe │ │ ├── __init__.py │ │ ├── _caffe.cpp │ │ ├── classifier.py │ │ ├── detector.py │ │ ├── draw.py │ │ ├── imagenet │ │ │ └── ilsvrc_2012_mean.npy │ │ ├── io.py │ │ ├── pycaffe.py │ │ └── test │ │ │ ├── test_net.py │ │ │ ├── test_python_layer.py │ │ │ └── test_solver.py │ ├── classify.py │ ├── detect.py │ ├── draw_net.py │ └── requirements.txt ├── scripts │ ├── build_docs.sh │ ├── copy_notebook.py │ ├── cpp_lint.py │ ├── deploy_docs.sh │ ├── download_model_binary.py │ ├── download_model_from_gist.sh │ ├── gather_examples.sh │ ├── travis │ │ ├── travis_build_and_test.sh │ │ ├── travis_install.sh │ │ └── travis_setup_makefile_config.sh │ └── upload_model_to_gist.sh ├── src │ ├── caffe │ │ ├── CMakeLists.txt │ │ ├── blob.cpp │ │ ├── common.cpp │ │ ├── data_transformer.cpp │ │ ├── internal_thread.cpp │ │ ├── layer_factory.cpp │ │ ├── layers │ │ │ ├── absval_layer.cpp │ │ │ ├── absval_layer.cu │ │ │ ├── accuracy_layer.cpp │ │ │ ├── argmax_layer.cpp │ │ │ ├── base_conv_layer.cpp │ │ │ ├── base_data_layer.cpp │ │ │ ├── base_data_layer.cu │ │ │ ├── bnll_layer.cpp │ │ │ ├── bnll_layer.cu │ │ │ ├── concat_layer.cpp │ │ │ ├── concat_layer.cu │ │ │ ├── contrastive_loss_layer.cpp │ │ │ ├── contrastive_loss_layer.cu │ │ │ ├── conv_layer.cpp │ │ │ ├── conv_layer.cu │ │ │ ├── cudnn_conv_layer.cpp │ │ │ ├── cudnn_conv_layer.cu │ │ │ ├── cudnn_pooling_layer.cpp │ │ │ ├── cudnn_pooling_layer.cu │ │ │ ├── cudnn_relu_layer.cpp │ │ │ ├── cudnn_relu_layer.cu │ │ │ ├── cudnn_sigmoid_layer.cpp │ │ │ ├── cudnn_sigmoid_layer.cu │ │ │ ├── cudnn_softmax_layer.cpp │ │ │ ├── cudnn_softmax_layer.cu │ │ │ ├── cudnn_tanh_layer.cpp │ │ │ ├── cudnn_tanh_layer.cu │ │ │ ├── data_layer.cpp │ │ │ ├── deconv_layer.cpp │ │ │ ├── deconv_layer.cu │ │ │ ├── dropout_layer.cpp │ │ │ ├── dropout_layer.cu │ │ │ ├── dummy_data_layer.cpp │ │ │ ├── eltwise_layer.cpp │ │ │ ├── eltwise_layer.cu │ │ │ ├── euclidean_loss_layer.cpp │ │ │ ├── euclidean_loss_layer.cu │ │ │ ├── exp_layer.cpp │ │ │ ├── exp_layer.cu │ │ │ ├── flatten_layer.cpp │ │ │ ├── hdf5_data_layer.cpp │ │ │ ├── hdf5_data_layer.cu │ │ │ ├── hdf5_output_layer.cpp │ │ │ ├── hdf5_output_layer.cu │ │ │ ├── hinge_loss_layer.cpp │ │ │ ├── im2col_layer.cpp │ │ │ ├── im2col_layer.cu │ │ │ ├── image_data_layer.cpp │ │ │ ├── infogain_loss_layer.cpp │ │ │ ├── inner_product_layer.cpp │ │ │ ├── inner_product_layer.cu │ │ │ ├── loss_layer.cpp │ │ │ ├── lrn_layer.cpp │ │ │ ├── lrn_layer.cu │ │ │ ├── memory_data_layer.cpp │ │ │ ├── multinomial_logistic_loss_layer.cpp │ │ │ ├── mvn_layer.cpp │ │ │ ├── mvn_layer.cu │ │ │ ├── neuron_layer.cpp │ │ │ ├── pooling_layer.cpp │ │ │ ├── pooling_layer.cu │ │ │ ├── power_layer.cpp │ │ │ ├── power_layer.cu │ │ │ ├── prelu_layer.cpp │ │ │ ├── prelu_layer.cu │ │ │ ├── relu_layer.cpp │ │ │ ├── relu_layer.cu │ │ │ ├── roi_pooling_layer.cpp │ │ │ ├── roi_pooling_layer.cu │ │ │ ├── sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── sigmoid_cross_entropy_loss_layer.cu │ │ │ ├── sigmoid_layer.cpp │ │ │ ├── sigmoid_layer.cu │ │ │ ├── silence_layer.cpp │ │ │ ├── silence_layer.cu │ │ │ ├── slice_layer.cpp │ │ │ ├── slice_layer.cu │ │ │ ├── smooth_L1_loss_layer.cpp │ │ │ ├── smooth_L1_loss_layer.cu │ │ │ ├── softmax_layer.cpp │ │ │ ├── softmax_layer.cu │ │ │ ├── softmax_loss_layer.cpp │ │ │ ├── softmax_loss_layer.cu │ │ │ ├── split_layer.cpp │ │ │ ├── split_layer.cu │ │ │ ├── tanh_layer.cpp │ │ │ ├── tanh_layer.cu │ │ │ ├── threshold_layer.cpp │ │ │ ├── threshold_layer.cu │ │ │ └── window_data_layer.cpp │ │ ├── net.cpp │ │ ├── proto │ │ │ └── caffe.proto │ │ ├── solver.cpp │ │ ├── syncedmem.cpp │ │ ├── test │ │ │ ├── CMakeLists.txt │ │ │ ├── test_accuracy_layer.cpp │ │ │ ├── test_argmax_layer.cpp │ │ │ ├── test_benchmark.cpp │ │ │ ├── test_blob.cpp │ │ │ ├── test_caffe_main.cpp │ │ │ ├── test_common.cpp │ │ │ ├── test_concat_layer.cpp │ │ │ ├── test_contrastive_loss_layer.cpp │ │ │ ├── test_convolution_layer.cpp │ │ │ ├── test_data │ │ │ │ ├── generate_sample_data.py │ │ │ │ ├── sample_data.h5 │ │ │ │ ├── sample_data_2_gzip.h5 │ │ │ │ └── sample_data_list.txt │ │ │ ├── test_data_layer.cpp │ │ │ ├── test_data_transformer.cpp │ │ │ ├── test_db.cpp │ │ │ ├── test_deconvolution_layer.cpp │ │ │ ├── test_dummy_data_layer.cpp │ │ │ ├── test_eltwise_layer.cpp │ │ │ ├── test_euclidean_loss_layer.cpp │ │ │ ├── test_filler.cpp │ │ │ ├── test_flatten_layer.cpp │ │ │ ├── test_gradient_based_solver.cpp │ │ │ ├── test_hdf5_output_layer.cpp │ │ │ ├── test_hdf5data_layer.cpp │ │ │ ├── test_hinge_loss_layer.cpp │ │ │ ├── test_im2col_kernel.cu │ │ │ ├── test_im2col_layer.cpp │ │ │ ├── test_image_data_layer.cpp │ │ │ ├── test_infogain_loss_layer.cpp │ │ │ ├── test_inner_product_layer.cpp │ │ │ ├── test_internal_thread.cpp │ │ │ ├── test_io.cpp │ │ │ ├── test_layer_factory.cpp │ │ │ ├── test_lrn_layer.cpp │ │ │ ├── test_math_functions.cpp │ │ │ ├── test_maxpool_dropout_layers.cpp │ │ │ ├── test_memory_data_layer.cpp │ │ │ ├── test_multinomial_logistic_loss_layer.cpp │ │ │ ├── test_mvn_layer.cpp │ │ │ ├── test_net.cpp │ │ │ ├── test_neuron_layer.cpp │ │ │ ├── test_platform.cpp │ │ │ ├── test_pooling_layer.cpp │ │ │ ├── test_power_layer.cpp │ │ │ ├── test_protobuf.cpp │ │ │ ├── test_random_number_generator.cpp │ │ │ ├── test_roi_pooling_layer.cpp │ │ │ ├── test_sigmoid_cross_entropy_loss_layer.cpp │ │ │ ├── test_slice_layer.cpp │ │ │ ├── test_softmax_layer.cpp │ │ │ ├── test_softmax_with_loss_layer.cpp │ │ │ ├── test_solver.cpp │ │ │ ├── test_split_layer.cpp │ │ │ ├── test_stochastic_pooling.cpp │ │ │ ├── test_syncedmem.cpp │ │ │ ├── test_tanh_layer.cpp │ │ │ ├── test_threshold_layer.cpp │ │ │ ├── test_upgrade_proto.cpp │ │ │ └── test_util_blas.cpp │ │ └── util │ │ │ ├── benchmark.cpp │ │ │ ├── db.cpp │ │ │ ├── im2col.cpp │ │ │ ├── im2col.cu │ │ │ ├── insert_splits.cpp │ │ │ ├── io.cpp │ │ │ ├── math_functions.cpp │ │ │ ├── math_functions.cu │ │ │ └── upgrade_proto.cpp │ └── gtest │ │ ├── CMakeLists.txt │ │ ├── gtest-all.cpp │ │ ├── gtest.h │ │ └── gtest_main.cc └── tools │ ├── CMakeLists.txt │ ├── caffe.cpp │ ├── compute_image_mean.cpp │ ├── convert_imageset.cpp │ ├── device_query.cpp │ ├── extra │ ├── extract_seconds.py │ ├── launch_resize_and_crop_images.sh │ ├── parse_log.py │ ├── parse_log.sh │ ├── plot_log.gnuplot.example │ ├── plot_training_log.py.example │ └── resize_and_crop_images.py │ ├── extract_features.cpp │ ├── finetune_net.cpp │ ├── net_speed_benchmark.cpp │ ├── test_net.cpp │ ├── train_net.cpp │ ├── upgrade_net_proto_binary.cpp │ └── upgrade_net_proto_text.cpp ├── data ├── .tgz ├── cache │ └── .cache ├── demo │ ├── demo_boxes.mat │ ├── im0.jpg │ ├── im1.jpg │ ├── im2.jpg │ └── im3.jpg └── scripts │ ├── fetch_coco_matlab_data.sh │ ├── fetch_edge_box_data.sh │ ├── fetch_fast_dbox_data.sh │ ├── fetch_imagenet_model.sh │ └── fetch_slid_window_data.sh ├── eval ├── eval_fast_dbox.m ├── evalbbox.m ├── plot_fast_dbox.m └── scorebboxes.m ├── models └── DboxNet │ ├── solver.prototxt │ ├── test.prototxt │ └── train.prototxt ├── output └── default │ └── scripts │ └── fetch_fast_dbox_models.sh ├── src ├── Makefile ├── datasets │ ├── __init__.py │ ├── coco_imdb.py │ ├── factory.py │ └── imdb_coco.py ├── fast_dbox_config.py ├── fast_dbox_test.py ├── fast_dbox_train.py ├── finetuning.py ├── setup.py └── utils │ ├── .gitignore │ ├── __init__.py │ ├── bbox.pyx │ ├── blob.py │ └── timer.py └── tools ├── .train_net.py.swp ├── demo.py ├── test_net.py └── train_net.py /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Fast DeepBox 2 | 3 | Copyright (c) UC Berkeley 4 | 5 | All rights reserved. 6 | 7 | MIT License 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a 10 | copy of this software and associated documentation files (the "Software"), 11 | to deal in the Software without restriction, including without limitation 12 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 | and/or sell copies of the Software, and to permit persons to whom the 14 | Software is furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included 17 | in all copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /LICENSE_FRCNN.txt: -------------------------------------------------------------------------------- 1 | Fast R-CNN 2 | 3 | Copyright (c) Microsoft Corporation 4 | 5 | All rights reserved. 6 | 7 | MIT License 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a 10 | copy of this software and associated documentation files (the "Software"), 11 | to deal in the Software without restriction, including without limitation 12 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 | and/or sell copies of the Software, and to permit persons to whom the 14 | Software is furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included 17 | in all copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/.travis.yml: -------------------------------------------------------------------------------- 1 | # Use a build matrix to do two builds in parallel: 2 | # one using CMake, and one using make. 3 | env: 4 | matrix: 5 | - WITH_CUDA=false WITH_CMAKE=false 6 | - WITH_CUDA=false WITH_CMAKE=true 7 | - WITH_CUDA=true WITH_CMAKE=false 8 | - WITH_CUDA=true WITH_CMAKE=true 9 | 10 | language: cpp 11 | 12 | # Cache Ubuntu apt packages. 13 | cache: apt 14 | 15 | compiler: gcc 16 | 17 | before_install: 18 | - export NUM_THREADS=4 19 | - export SCRIPTS=./scripts/travis 20 | 21 | install: 22 | - sudo -E $SCRIPTS/travis_install.sh 23 | 24 | before_script: 25 | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64 26 | - export PATH=/home/travis/miniconda/bin:$PATH 27 | - if ! $WITH_CMAKE; then $SCRIPTS/travis_setup_makefile_config.sh; fi 28 | 29 | script: $SCRIPTS/travis_build_and_test.sh 30 | 31 | notifications: 32 | # Emails are sent to the committer's git-configured email address by default, 33 | # but only if they have access to the repository. To enable Travis on your 34 | # public fork of Caffe, just go to travis-ci.org and flip the switch on for 35 | # your Caffe fork. To configure your git email address, use: 36 | # git config --global user.email me@example.com 37 | email: 38 | on_success: always 39 | on_failure: always 40 | 41 | # IRC notifications disabled by default. 42 | # Uncomment next 5 lines to send notifications to chat.freenode.net#caffe 43 | # irc: 44 | # channels: 45 | # - "chat.freenode.net#caffe" 46 | # template: 47 | # - "%{repository}/%{branch} (%{commit} - %{author}): %{message}" 48 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.7) 2 | 3 | # ---[ Caffe project 4 | project(Caffe C CXX) 5 | 6 | # ---[ Using cmake scripts and modules 7 | list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules) 8 | 9 | include(cmake/Utils.cmake) 10 | include(cmake/Targets.cmake) 11 | include(cmake/Misc.cmake) 12 | include(cmake/Summary.cmake) 13 | include(cmake/ConfigGen.cmake) 14 | 15 | # ---[ Options 16 | caffe_option(CPU_ONLY "Build Caffe wihtout CUDA support" OFF) # TODO: rename to USE_CUDA 17 | caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF NOT CPU_ONLY) 18 | caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) 19 | caffe_option(BUILD_python "Build Python wrapper" ON) 20 | set(python_version "2" CACHE STRING "Specify which python version to use") 21 | caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE) 22 | caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE) 23 | caffe_option(BUILD_python_layer "Build the caffe python layer" ON) 24 | 25 | # ---[ Dependencies 26 | include(cmake/Dependencies.cmake) 27 | 28 | # ---[ Flags 29 | if(UNIX OR APPLE) 30 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall") 31 | endif() 32 | 33 | if(USE_libstdcpp) 34 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++") 35 | message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)") 36 | endif() 37 | 38 | add_definitions(-DGTEST_USE_OWN_TR1_TUPLE) 39 | 40 | # ---[ Warnings 41 | caffe_warnings_disable(CMAKE_CXX_FLAGS -Wno-sign-compare -Wno-uninitialized) 42 | 43 | # ---[ Config generation 44 | configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_config.h") 45 | 46 | # ---[ Includes 47 | set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include) 48 | include_directories(${Caffe_INCLUDE_DIR} ${PROJECT_BINARY_DIR}) 49 | include_directories(BEFORE src) # This is needed for gtest. 50 | 51 | # ---[ Subdirectories 52 | add_subdirectory(src/gtest) 53 | add_subdirectory(src/caffe) 54 | add_subdirectory(tools) 55 | add_subdirectory(examples) 56 | add_subdirectory(python) 57 | add_subdirectory(matlab) 58 | add_subdirectory(docs) 59 | 60 | # ---[ Linter target 61 | add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake) 62 | 63 | # ---[ Configuration summary 64 | caffe_print_configuration_summary() 65 | 66 | # ---[ Export configs generation 67 | caffe_generate_export_configs() 68 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Caffe is developed by a core set of BVLC members and the open-source community. 4 | 5 | We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! 6 | 7 | **For the detailed history of contributions** of a given file, try 8 | 9 | git blame file 10 | 11 | to see line-by-line credits and 12 | 13 | git log --follow file 14 | 15 | to see the change log even across renames and rewrites. 16 | 17 | Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. 18 | 19 | **Copyright** is held by the original contributor according to the versioning history; see LICENSE. 20 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | See http://caffe.berkeleyvision.org/installation.html for the latest 4 | installation instructions. 5 | 6 | Check the issue tracker in case you need help: 7 | https://github.com/BVLC/caffe/issues 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Caffe 2 | 3 | Caffe is a deep learning framework made with expression, speed, and modularity in mind. 4 | It is developed by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu)) and community contributors. 5 | 6 | Check out the [project site](http://caffe.berkeleyvision.org) for all the details like 7 | 8 | - [DIY Deep Learning for Vision with Caffe](https://docs.google.com/presentation/d/1UeKXVgRvvxg9OUdh_UiC5G71UMscNPlvArsWER41PsU/edit#slide=id.p) 9 | - [Tutorial Documentation](http://caffe.berkeleyvision.org/tutorial/) 10 | - [BVLC reference models](http://caffe.berkeleyvision.org/model_zoo.html) and the [community model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo) 11 | - [Installation instructions](http://caffe.berkeleyvision.org/installation.html) 12 | 13 | and step-by-step examples. 14 | 15 | [![Join the chat at https://gitter.im/BVLC/caffe](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/BVLC/caffe?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 16 | 17 | Please join the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users) or [gitter chat](https://gitter.im/BVLC/caffe) to ask questions and talk about methods and models. 18 | Framework development discussions and thorough bug reports are collected on [Issues](https://github.com/BVLC/caffe/issues). 19 | 20 | Happy brewing! 21 | 22 | ## License and Citation 23 | 24 | Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). 25 | The BVLC reference models are released for unrestricted use. 26 | 27 | Please cite Caffe in your publications if it helps your research: 28 | 29 | @article{jia2014caffe, 30 | Author = {Jia, Yangqing and Shelhamer, Evan and Donahue, Jeff and Karayev, Sergey and Long, Jonathan and Girshick, Ross and Guadarrama, Sergio and Darrell, Trevor}, 31 | Journal = {arXiv preprint arXiv:1408.5093}, 32 | Title = {Caffe: Convolutional Architecture for Fast Feature Embedding}, 33 | Year = {2014} 34 | } 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/caffe.cloc: -------------------------------------------------------------------------------- 1 | Bourne Shell 2 | filter remove_matches ^\s*# 3 | filter remove_inline #.*$ 4 | extension sh 5 | script_exe sh 6 | C 7 | filter remove_matches ^\s*// 8 | filter call_regexp_common C 9 | filter remove_inline //.*$ 10 | extension c 11 | extension ec 12 | extension pgc 13 | C++ 14 | filter remove_matches ^\s*// 15 | filter remove_inline //.*$ 16 | filter call_regexp_common C 17 | extension C 18 | extension cc 19 | extension cpp 20 | extension cxx 21 | extension pcc 22 | C/C++ Header 23 | filter remove_matches ^\s*// 24 | filter call_regexp_common C 25 | filter remove_inline //.*$ 26 | extension H 27 | extension h 28 | extension hh 29 | extension hpp 30 | CUDA 31 | filter remove_matches ^\s*// 32 | filter remove_inline //.*$ 33 | filter call_regexp_common C 34 | extension cu 35 | Python 36 | filter remove_matches ^\s*# 37 | filter docstring_to_C 38 | filter call_regexp_common C 39 | filter remove_inline #.*$ 40 | extension py 41 | make 42 | filter remove_matches ^\s*# 43 | filter remove_inline #.*$ 44 | extension Gnumakefile 45 | extension Makefile 46 | extension am 47 | extension gnumakefile 48 | extension makefile 49 | filename Gnumakefile 50 | filename Makefile 51 | filename gnumakefile 52 | filename makefile 53 | script_exe make 54 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Misc.cmake: -------------------------------------------------------------------------------- 1 | # ---[ Configurations types 2 | set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Possible configurations" FORCE) 3 | mark_as_advanced(CMAKE_CONFIGURATION_TYPES) 4 | 5 | if(DEFINED CMAKE_BUILD_TYPE) 6 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES}) 7 | endif() 8 | 9 | # --[ If user doesn't specify build type then assume release 10 | if("${CMAKE_BUILD_TYPE}" STREQUAL "") 11 | set(CMAKE_BUILD_TYPE Release) 12 | endif() 13 | 14 | if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 15 | set(CMAKE_COMPILER_IS_CLANGXX TRUE) 16 | endif() 17 | 18 | # ---[ Solution folders 19 | caffe_option(USE_PROJECT_FOLDERS "IDE Solution folders" (MSVC_IDE OR CMAKE_GENERATOR MATCHES Xcode) ) 20 | 21 | if(USE_PROJECT_FOLDERS) 22 | set_property(GLOBAL PROPERTY USE_FOLDERS ON) 23 | set_property(GLOBAL PROPERTY PREDEFINED_TARGETS_FOLDER "CMakeTargets") 24 | endif() 25 | 26 | # ---[ Install options 27 | if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) 28 | set(CMAKE_INSTALL_PREFIX "${PROJECT_BINARY_DIR}/install" CACHE PATH "Default install path" FORCE) 29 | endif() 30 | 31 | # ---[ RPATH settings 32 | set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath") 33 | set(CMAKE_MACOSX_RPATH TRUE) 34 | 35 | list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir) 36 | if(${__is_systtem_dir} STREQUAL -1) 37 | set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib) 38 | endif() 39 | 40 | # ---[ Funny target 41 | if(UNIX OR APPLE) 42 | add_custom_target(symlink_to_build COMMAND "ln" "-sf" "${PROJECT_BINARY_DIR}" "${PROJECT_SOURCE_DIR}/build" 43 | COMMENT "Adding symlink: /build -> ${PROJECT_BINARY_DIR}" ) 44 | endif() 45 | 46 | # ---[ Set debug postfix 47 | set(Caffe_DEBUG_POSTFIX "-d") 48 | 49 | set(CAffe_POSTFIX "") 50 | if(CMAKE_BUILD_TYPE MATCHES "Debug") 51 | set(CAffe_POSTFIX ${Caffe_DEBUG_POSTFIX}) 52 | endif() 53 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindAtlas.cmake: -------------------------------------------------------------------------------- 1 | # Find the Atlas (and Lapack) libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Atlas_ROOT_DIR: Base directory where all Atlas components are found 5 | # 6 | # The following are set after configuration is done: 7 | # Atlas_FOUND 8 | # Atlas_INCLUDE_DIRS 9 | # Atlas_LIBRARIES 10 | # Atlas_LIBRARYRARY_DIRS 11 | 12 | set(Atlas_INCLUDE_SEARCH_PATHS 13 | /usr/include/atlas 14 | /usr/include/atlas-base 15 | $ENV{Atlas_ROOT_DIR} 16 | $ENV{Atlas_ROOT_DIR}/include 17 | ) 18 | 19 | set(Atlas_LIB_SEARCH_PATHS 20 | /usr/lib/atlas 21 | /usr/lib/atlas-base 22 | $ENV{Atlas_ROOT_DIR} 23 | $ENV{Atlas_ROOT_DIR}/lib 24 | ) 25 | 26 | find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 27 | find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) 28 | 29 | find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) 30 | find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 31 | find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) 32 | 33 | set(LOOKED_FOR 34 | Atlas_CBLAS_INCLUDE_DIR 35 | Atlas_CLAPACK_INCLUDE_DIR 36 | 37 | Atlas_CBLAS_LIBRARY 38 | Atlas_BLAS_LIBRARY 39 | Atlas_LAPACK_LIBRARY 40 | ) 41 | 42 | include(FindPackageHandleStandardArgs) 43 | find_package_handle_standard_args(Atlas DEFAULT_MSG ${LOOKED_FOR}) 44 | 45 | if(ATLAS_FOUND) 46 | set(Atlas_INCLUDE_DIR ${Atlas_CBLAS_INCLUDE_DIR} ${Atlas_CLAPACK_INCLUDE_DIR}) 47 | set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY}) 48 | mark_as_advanced(${LOOKED_FOR}) 49 | 50 | message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})") 51 | endif(ATLAS_FOUND) 52 | 53 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindGFlags.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find GFLAGS 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GFLAGS_FOUND 8 | # GFLAGS_INCLUDE_DIRS 9 | # GFLAGS_LIBRARIES 10 | # GFLAGS_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") 15 | 16 | # We are testing only a couple of files in the include directories 17 | if(WIN32) 18 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 19 | PATHS ${GFLAGS_ROOT_DIR}/src/windows) 20 | else() 21 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 22 | PATHS ${GFLAGS_ROOT_DIR}) 23 | endif() 24 | 25 | if(MSVC) 26 | find_library(GFLAGS_LIBRARY_RELEASE 27 | NAMES libgflags 28 | PATHS ${GFLAGS_ROOT_DIR} 29 | PATH_SUFFIXES Release) 30 | 31 | find_library(GFLAGS_LIBRARY_DEBUG 32 | NAMES libgflags-debug 33 | PATHS ${GFLAGS_ROOT_DIR} 34 | PATH_SUFFIXES Debug) 35 | 36 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) 37 | else() 38 | find_library(GFLAGS_LIBRARY gflags) 39 | endif() 40 | 41 | find_package_handle_standard_args(GFLAGS DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) 42 | 43 | 44 | if(GFLAGS_FOUND) 45 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) 46 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) 47 | message(STATUS "Found gflags (include: ${GFLAGS_INCLUDE_DIR}, library: ${GFLAGS_LIBRARY})") 48 | mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE 49 | GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) 50 | endif() 51 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindGlog.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Glog 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GLOG_FOUND 8 | # GLOG_INCLUDE_DIRS 9 | # GLOG_LIBRARIES 10 | # GLOG_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") 15 | 16 | if(WIN32) 17 | find_path(GLOG_INCLUDE_DIR glog/logging.h 18 | PATHS ${GLOG_ROOT_DIR}/src/windows) 19 | else() 20 | find_path(GLOG_INCLUDE_DIR glog/logging.h 21 | PATHS ${GLOG_ROOT_DIR}) 22 | endif() 23 | 24 | if(MSVC) 25 | find_library(GLOG_LIBRARY_RELEASE libglog_static 26 | PATHS ${GLOG_ROOT_DIR} 27 | PATH_SUFFIXES Release) 28 | 29 | find_library(GLOG_LIBRARY_DEBUG libglog_static 30 | PATHS ${GLOG_ROOT_DIR} 31 | PATH_SUFFIXES Debug) 32 | 33 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) 34 | else() 35 | find_library(GLOG_LIBRARY glog 36 | PATHS ${GLOG_ROOT_DIR} 37 | PATH_SUFFIXES lib lib64) 38 | endif() 39 | 40 | find_package_handle_standard_args(GLOG DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) 41 | 42 | if(GLOG_FOUND) 43 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) 44 | set(GLOG_LIBRARIES ${GLOG_LIBRARY}) 45 | message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})") 46 | mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG 47 | GLOG_LIBRARY GLOG_INCLUDE_DIR) 48 | endif() 49 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindLMDB.cmake: -------------------------------------------------------------------------------- 1 | # Try to find the LMBD libraries and headers 2 | # LMDB_FOUND - system has LMDB lib 3 | # LMDB_INCLUDE_DIR - the LMDB include directory 4 | # LMDB_LIBRARIES - Libraries needed to use LMDB 5 | 6 | # FindCWD based on FindGMP by: 7 | # Copyright (c) 2006, Laurent Montel, 8 | # 9 | # Redistribution and use is allowed according to the terms of the BSD license. 10 | 11 | # Adapted from FindCWD by: 12 | # Copyright 2013 Conrad Steenberg 13 | # Aug 31, 2013 14 | 15 | find_path(LMDB_INCLUDE_DIR NAMES lmdb.h PATHS "$ENV{LMDB_DIR}/include") 16 | find_library(LMDB_LIBRARIES NAMES lmdb PATHS "$ENV{LMDB_DIR}/lib" ) 17 | 18 | include(FindPackageHandleStandardArgs) 19 | find_package_handle_standard_args(LMDB DEFAULT_MSG LMDB_INCLUDE_DIR LMDB_LIBRARIES) 20 | 21 | if(LMDB_FOUND) 22 | message(STATUS "Found lmdb (include: ${LMDB_INCLUDE_DIR}, library: ${LMDB_LIBRARIES})") 23 | mark_as_advanced(LMDB_INCLUDE_DIR LMDB_LIBRARIES) 24 | 25 | caffe_parse_header(${LMDB_INCLUDE_DIR}/lmdb.h 26 | LMDB_VERSION_LINES MDB_VERSION_MAJOR MDB_VERSION_MINOR MDB_VERSION_PATCH) 27 | set(LMDB_VERSION "${MDB_VERSION_MAJOR}.${MDB_VERSION_MINOR}.${MDB_VERSION_PATCH}") 28 | endif() 29 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindLevelDB.cmake: -------------------------------------------------------------------------------- 1 | # - Find LevelDB 2 | # 3 | # LevelDB_INCLUDES - List of LevelDB includes 4 | # LevelDB_LIBRARIES - List of libraries when using LevelDB. 5 | # LevelDB_FOUND - True if LevelDB found. 6 | 7 | # Look for the header file. 8 | find_path(LevelDB_INCLUDE NAMES leveldb/db.h 9 | PATHS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include 10 | DOC "Path in which the file leveldb/db.h is located." ) 11 | 12 | # Look for the library. 13 | find_library(LevelDB_LIBRARY NAMES leveldb 14 | PATHS /usr/lib $ENV{LEVELDB_ROOT}/lib 15 | DOC "Path to leveldb library." ) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(LevelDB DEFAULT_MSG LevelDB_INCLUDE LevelDB_LIBRARY) 19 | 20 | if(LEVELDB_FOUND) 21 | message(STATUS "Found LevelDB (include: ${LevelDB_INCLUDE}, library: ${LevelDB_LIBRARY})") 22 | set(LevelDB_INCLUDES ${LevelDB_INCLUDE}) 23 | set(LevelDB_LIBRARIES ${LevelDB_LIBRARY}) 24 | mark_as_advanced(LevelDB_INCLUDE LevelDB_LIBRARY) 25 | 26 | if(EXISTS "${LevelDB_INCLUDE}/leveldb/db.h") 27 | file(STRINGS "${LevelDB_INCLUDE}/leveldb/db.h" __version_lines 28 | REGEX "static const int k[^V]+Version[ \t]+=[ \t]+[0-9]+;") 29 | 30 | foreach(__line ${__version_lines}) 31 | if(__line MATCHES "[^k]+kMajorVersion[ \t]+=[ \t]+([0-9]+);") 32 | set(LEVELDB_VERSION_MAJOR ${CMAKE_MATCH_1}) 33 | elseif(__line MATCHES "[^k]+kMinorVersion[ \t]+=[ \t]+([0-9]+);") 34 | set(LEVELDB_VERSION_MINOR ${CMAKE_MATCH_1}) 35 | endif() 36 | endforeach() 37 | 38 | if(LEVELDB_VERSION_MAJOR AND LEVELDB_VERSION_MINOR) 39 | set(LEVELDB_VERSION "${LEVELDB_VERSION_MAJOR}.${LEVELDB_VERSION_MINOR}") 40 | endif() 41 | 42 | caffe_clear_vars(__line __version_lines) 43 | endif() 44 | endif() 45 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindMatlabMex.cmake: -------------------------------------------------------------------------------- 1 | # This module looks for MatlabMex compiler 2 | # Defines variables: 3 | # Matlab_DIR - Matlab root dir 4 | # Matlab_mex - path to mex compiler 5 | # Matlab_mexext - path to mexext 6 | 7 | if(MSVC) 8 | foreach(__ver "9.30" "7.14" "7.11" "7.10" "7.9" "7.8" "7.7") 9 | get_filename_component(__matlab_root "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MathWorks\\MATLAB\\${__ver};MATLABROOT]" ABSOLUTE) 10 | if(__matlab_root) 11 | break() 12 | endif() 13 | endforeach() 14 | endif() 15 | 16 | if(APPLE) 17 | foreach(__ver "R2014b" "R2014a" "R2013b" "R2013a" "R2012b" "R2012a" "R2011b" "R2011a" "R2010b" "R2010a") 18 | if(EXISTS /Applications/MATLAB_${__ver}.app) 19 | set(__matlab_root /Applications/MATLAB_${__ver}.app) 20 | break() 21 | endif() 22 | endforeach() 23 | endif() 24 | 25 | if(UNIX) 26 | execute_process(COMMAND which matlab OUTPUT_STRIP_TRAILING_WHITESPACE 27 | OUTPUT_VARIABLE __out RESULT_VARIABLE __res) 28 | 29 | if(__res MATCHES 0) # Suppress `readlink` warning if `which` returned nothing 30 | execute_process(COMMAND which matlab COMMAND xargs readlink 31 | COMMAND xargs dirname COMMAND xargs dirname COMMAND xargs echo -n 32 | OUTPUT_VARIABLE __matlab_root OUTPUT_STRIP_TRAILING_WHITESPACE) 33 | endif() 34 | endif() 35 | 36 | 37 | find_path(Matlab_DIR NAMES bin/mex bin/mexext PATHS ${__matlab_root} 38 | DOC "Matlab directory" NO_DEFAULT_PATH) 39 | 40 | find_program(Matlab_mex NAMES mex mex.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) 41 | find_program(Matlab_mexext NAMES mexext mexext.bat HINTS ${Matlab_DIR} PATH_SUFFIXES bin NO_DEFAULT_PATH) 42 | 43 | include(FindPackageHandleStandardArgs) 44 | find_package_handle_standard_args(MatlabMex DEFAULT_MSG Matlab_mex Matlab_mexext) 45 | 46 | if(MATLABMEX_FOUND) 47 | mark_as_advanced(Matlab_mex Matlab_mexext) 48 | endif() 49 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindNumPy.cmake: -------------------------------------------------------------------------------- 1 | # - Find the NumPy libraries 2 | # This module finds if NumPy is installed, and sets the following variables 3 | # indicating where it is. 4 | # 5 | # TODO: Update to provide the libraries and paths for linking npymath lib. 6 | # 7 | # NUMPY_FOUND - was NumPy found 8 | # NUMPY_VERSION - the version of NumPy found as a string 9 | # NUMPY_VERSION_MAJOR - the major version number of NumPy 10 | # NUMPY_VERSION_MINOR - the minor version number of NumPy 11 | # NUMPY_VERSION_PATCH - the patch version number of NumPy 12 | # NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601 13 | # NUMPY_INCLUDE_DIR - path to the NumPy include files 14 | 15 | unset(NUMPY_VERSION) 16 | unset(NUMPY_INCLUDE_DIR) 17 | 18 | if(PYTHONINTERP_FOUND) 19 | execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" 20 | "import numpy as n; print(n.__version__); print(n.get_include());" 21 | RESULT_VARIABLE __result 22 | OUTPUT_VARIABLE __output 23 | OUTPUT_STRIP_TRAILING_WHITESPACE) 24 | 25 | if(__result MATCHES 0) 26 | string(REGEX REPLACE ";" "\\\\;" __values ${__output}) 27 | string(REGEX REPLACE "\r?\n" ";" __values ${__values}) 28 | list(GET __values 0 NUMPY_VERSION) 29 | list(GET __values 1 NUMPY_INCLUDE_DIR) 30 | 31 | string(REGEX MATCH "^([0-9])+\\.([0-9])+\\.([0-9])+" __ver_check "${NUMPY_VERSION}") 32 | if(NOT "${__ver_check}" STREQUAL "") 33 | set(NUMPY_VERSION_MAJOR ${CMAKE_MATCH_1}) 34 | set(NUMPY_VERSION_MINOR ${CMAKE_MATCH_2}) 35 | set(NUMPY_VERSION_PATCH ${CMAKE_MATCH_3}) 36 | math(EXPR NUMPY_VERSION_DECIMAL 37 | "(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}") 38 | string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIR ${NUMPY_INCLUDE_DIR}) 39 | else() 40 | unset(NUMPY_VERSION) 41 | unset(NUMPY_INCLUDE_DIR) 42 | message(STATUS "Requested NumPy version and include path, but got instead:\n${__output}\n") 43 | endif() 44 | endif() 45 | else() 46 | message(STATUS "To find NumPy Python interpretator is required to be found.") 47 | endif() 48 | 49 | include(FindPackageHandleStandardArgs) 50 | find_package_handle_standard_args(NumPy REQUIRED_VARS NUMPY_INCLUDE_DIR NUMPY_VERSION 51 | VERSION_VAR NUMPY_VERSION) 52 | 53 | if(NUMPY_FOUND) 54 | message(STATUS "NumPy ver. ${NUMPY_VERSION} found (include: ${NUMPY_INCLUDE_DIR})") 55 | endif() 56 | 57 | caffe_clear_vars(__result __output __error_value __values __ver_check __error_value) 58 | 59 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindOpenBLAS.cmake: -------------------------------------------------------------------------------- 1 | 2 | 3 | SET(Open_BLAS_INCLUDE_SEARCH_PATHS 4 | /usr/include 5 | /usr/include/openblas-base 6 | /usr/local/include 7 | /usr/local/include/openblas-base 8 | /opt/OpenBLAS/include 9 | $ENV{OpenBLAS_HOME} 10 | $ENV{OpenBLAS_HOME}/include 11 | ) 12 | 13 | SET(Open_BLAS_LIB_SEARCH_PATHS 14 | /lib/ 15 | /lib/openblas-base 16 | /lib64/ 17 | /usr/lib 18 | /usr/lib/openblas-base 19 | /usr/lib64 20 | /usr/local/lib 21 | /usr/local/lib64 22 | /opt/OpenBLAS/lib 23 | $ENV{OpenBLAS}cd 24 | $ENV{OpenBLAS}/lib 25 | $ENV{OpenBLAS_HOME} 26 | $ENV{OpenBLAS_HOME}/lib 27 | ) 28 | 29 | FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS}) 30 | FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS}) 31 | 32 | SET(OpenBLAS_FOUND ON) 33 | 34 | # Check include files 35 | IF(NOT OpenBLAS_INCLUDE_DIR) 36 | SET(OpenBLAS_FOUND OFF) 37 | MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off") 38 | ENDIF() 39 | 40 | # Check libraries 41 | IF(NOT OpenBLAS_LIB) 42 | SET(OpenBLAS_FOUND OFF) 43 | MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off") 44 | ENDIF() 45 | 46 | IF (OpenBLAS_FOUND) 47 | IF (NOT OpenBLAS_FIND_QUIETLY) 48 | MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}") 49 | MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}") 50 | ENDIF (NOT OpenBLAS_FIND_QUIETLY) 51 | ELSE (OpenBLAS_FOUND) 52 | IF (OpenBLAS_FIND_REQUIRED) 53 | MESSAGE(FATAL_ERROR "Could not find OpenBLAS") 54 | ENDIF (OpenBLAS_FIND_REQUIRED) 55 | ENDIF (OpenBLAS_FOUND) 56 | 57 | MARK_AS_ADVANCED( 58 | OpenBLAS_INCLUDE_DIR 59 | OpenBLAS_LIB 60 | OpenBLAS 61 | ) 62 | 63 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindSnappy.cmake: -------------------------------------------------------------------------------- 1 | # Find the Snappy libraries 2 | # 3 | # The following variables are optionally searched for defaults 4 | # Snappy_ROOT_DIR: Base directory where all Snappy components are found 5 | # 6 | # The following are set after configuration is done: 7 | # SNAPPY_FOUND 8 | # Snappy_INCLUDE_DIR 9 | # Snappy_LIBRARIES 10 | 11 | find_path(Snappy_INCLUDE_DIR NAMES snappy.h 12 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/include) 13 | 14 | find_library(Snappy_LIBRARIES NAMES snappy 15 | PATHS ${SNAPPY_ROOT_DIR} ${SNAPPY_ROOT_DIR}/lib) 16 | 17 | include(FindPackageHandleStandardArgs) 18 | find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_INCLUDE_DIR Snappy_LIBRARIES) 19 | 20 | if(SNAPPY_FOUND) 21 | message(STATUS "Found Snappy (include: ${Snappy_INCLUDE_DIR}, library: ${Snappy_LIBRARIES})") 22 | mark_as_advanced(Snappy_INCLUDE_DIR Snappy_LIBRARIES) 23 | 24 | caffe_parse_header(${Snappy_INCLUDE_DIR}/snappy-stubs-public.h 25 | SNAPPY_VERION_LINES SNAPPY_MAJOR SNAPPY_MINOR SNAPPY_PATCHLEVEL) 26 | set(Snappy_VERSION "${SNAPPY_MAJOR}.${SNAPPY_MINOR}.${SNAPPY_PATCHLEVEL}") 27 | endif() 28 | 29 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Modules/FindvecLib.cmake: -------------------------------------------------------------------------------- 1 | # Find the vecLib libraries as part of Accelerate.framework or as standalon framework 2 | # 3 | # The following are set after configuration is done: 4 | # VECLIB_FOUND 5 | # vecLib_INCLUDE_DIR 6 | # vecLib_LINKER_LIBS 7 | 8 | 9 | if(NOT APPLE) 10 | return() 11 | endif() 12 | 13 | set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") 14 | 15 | find_path(vecLib_INCLUDE_DIR vecLib.h 16 | DOC "vecLib include directory" 17 | PATHS /System/Library/${__veclib_include_suffix} 18 | /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} 19 | /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) 20 | 21 | include(FindPackageHandleStandardArgs) 22 | find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) 23 | 24 | if(VECLIB_FOUND) 25 | if(vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") 26 | set(vecLib_LINKER_LIBS -lcblas "-framework vecLib") 27 | message(STATUS "Found standalone vecLib.framework") 28 | else() 29 | set(vecLib_LINKER_LIBS -lcblas "-framework Accelerate") 30 | message(STATUS "Found vecLib as part of Accelerate.framework") 31 | endif() 32 | 33 | mark_as_advanced(vecLib_INCLUDE_DIR) 34 | endif() 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Templates/CaffeConfig.cmake.in: -------------------------------------------------------------------------------- 1 | # Config file for the Caffe package. 2 | # 3 | # Note: 4 | # Caffe and this config file depends on opencv, 5 | # so put `find_package(OpenCV)` before searching Caffe 6 | # via `find_package(Caffe)`. All other lib/includes 7 | # dependencies are hard coded int the file 8 | # 9 | # After successful configuration the following variables 10 | # will be defined: 11 | # 12 | # Caffe_INCLUDE_DIRS - Caffe include directories 13 | # Caffe_LIBRARIES - libraries to link against 14 | # Caffe_DEFINITIONS - a list of definitions to pass to compiler 15 | # 16 | # Caffe_HAVE_CUDA - signals about CUDA support 17 | # Caffe_HAVE_CUDNN - signals about cuDNN support 18 | 19 | 20 | # OpenCV dependency 21 | 22 | if(NOT OpenCV_FOUND) 23 | set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@") 24 | if(Caffe_OpenCV_CONFIG_PATH) 25 | get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE) 26 | 27 | if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) 28 | message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") 29 | include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) 30 | endif() 31 | 32 | else() 33 | find_package(OpenCV REQUIRED) 34 | endif() 35 | unset(Caffe_OpenCV_CONFIG_PATH) 36 | endif() 37 | 38 | # Compute paths 39 | get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) 40 | set(Caffe_INCLUDE_DIRS "@Caffe_INCLUDE_DIRS@") 41 | 42 | @Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND@ 43 | 44 | # Our library dependencies 45 | if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) 46 | include("${Caffe_CMAKE_DIR}/CaffeTargets.cmake") 47 | endif() 48 | 49 | # List of IMPORTED libs created by CaffeTargets.cmake 50 | set(Caffe_LIBRARIES caffe) 51 | 52 | # Definitions 53 | set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@") 54 | 55 | # Cuda support variables 56 | set(Caffe_CPU_ONLY @CPU_ONLY@) 57 | set(Caffe_HAVE_CUDA @HAVE_CUDA@) 58 | set(Caffe_HAVE_CUDNN @HAVE_CUDNN@) 59 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Templates/CaffeConfigVersion.cmake.in: -------------------------------------------------------------------------------- 1 | set(PACKAGE_VERSION "@Caffe_VERSION@") 2 | 3 | # Check whether the requested PACKAGE_FIND_VERSION is compatible 4 | if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") 5 | set(PACKAGE_VERSION_COMPATIBLE FALSE) 6 | else() 7 | set(PACKAGE_VERSION_COMPATIBLE TRUE) 8 | if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") 9 | set(PACKAGE_VERSION_EXACT TRUE) 10 | endif() 11 | endif() 12 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/Templates/caffe_config.h.in: -------------------------------------------------------------------------------- 1 | /* Sources directory */ 2 | #define SOURCE_FOLDER "${PROJECT_SOURCE_DIR}" 3 | 4 | /* Binaries directory */ 5 | #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" 6 | 7 | /* NVIDA Cuda */ 8 | #cmakedefine HAVE_CUDA 9 | 10 | /* NVIDA cuDNN */ 11 | #cmakedefine HAVE_CUDNN 12 | #cmakedefine USE_CUDNN 13 | 14 | /* NVIDA cuDNN */ 15 | #cmakedefine CPU_ONLY 16 | 17 | /* Test device */ 18 | #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} 19 | 20 | /* Temporary (TODO: remove) */ 21 | #if 1 22 | #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" 23 | #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" 24 | #define CMAKE_EXT ".gen.cmake" 25 | #else 26 | #define CMAKE_SOURCE_DIR "src/" 27 | #define EXAMPLES_SOURCE_DIR "examples/" 28 | #define CMAKE_EXT "" 29 | #endif 30 | 31 | /* Matlab */ 32 | #cmakedefine HAVE_MATLAB 33 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/cmake/lint.cmake: -------------------------------------------------------------------------------- 1 | 2 | set(CMAKE_SOURCE_DIR ..) 3 | set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/cpp_lint.py) 4 | set(SRC_FILE_EXTENSIONS h hpp hu c cpp cu cc) 5 | set(EXCLUDE_FILE_EXTENSTIONS pb.h pb.cc) 6 | set(LINT_DIRS include src/caffe examples tools python matlab) 7 | 8 | cmake_policy(SET CMP0009 NEW) # supress cmake warning 9 | 10 | # find all files of interest 11 | foreach(ext ${SRC_FILE_EXTENSIONS}) 12 | foreach(dir ${LINT_DIRS}) 13 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/${dir}/*.${ext}) 14 | set(LINT_SOURCES ${LINT_SOURCES} ${FOUND_FILES}) 15 | endforeach() 16 | endforeach() 17 | 18 | # find all files that should be excluded 19 | foreach(ext ${EXCLUDE_FILE_EXTENSTIONS}) 20 | file(GLOB_RECURSE FOUND_FILES ${CMAKE_SOURCE_DIR}/*.${ext}) 21 | set(EXCLUDED_FILES ${EXCLUDED_FILES} ${FOUND_FILES}) 22 | endforeach() 23 | 24 | # exclude generated pb files 25 | list(REMOVE_ITEM LINT_SOURCES ${EXCLUDED_FILES}) 26 | 27 | execute_process( 28 | COMMAND ${LINT_COMMAND} ${LINT_SOURCES} 29 | ERROR_VARIABLE LINT_OUTPUT 30 | ERROR_STRIP_TRAILING_WHITESPACE 31 | ) 32 | 33 | string(REPLACE "\n" ";" LINT_OUTPUT ${LINT_OUTPUT}) 34 | 35 | list(GET LINT_OUTPUT -1 LINT_RESULT) 36 | list(REMOVE_AT LINT_OUTPUT -1) 37 | string(REPLACE " " ";" LINT_RESULT ${LINT_RESULT}) 38 | list(GET LINT_RESULT -1 NUM_ERRORS) 39 | if(NUM_ERRORS GREATER 0) 40 | foreach(msg ${LINT_OUTPUT}) 41 | string(FIND ${msg} "Done" result) 42 | if(result LESS 0) 43 | message(STATUS ${msg}) 44 | endif() 45 | endforeach() 46 | message(FATAL_ERROR "Lint found ${NUM_ERRORS} errors!") 47 | else() 48 | message(STATUS "Lint did not find any errors!") 49 | endif() 50 | 51 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/CNAME: -------------------------------------------------------------------------------- 1 | caffe.berkeleyvision.org 2 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/README.md: -------------------------------------------------------------------------------- 1 | # Caffe Documentation 2 | 3 | To generate the documentation, run `$CAFFE_ROOT/scripts/build_docs.sh`. 4 | 5 | To push your changes to the documentation to the gh-pages branch of your or the BVLC repo, run `$CAFFE_ROOT/scripts/deploy_docs.sh `. 6 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/_config.yml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - 3 | scope: 4 | path: "" # an empty string here means all files in the project 5 | values: 6 | layout: "default" 7 | 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/_layouts/default.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 10 | 11 | Caffe {% if page contains 'title' %}| {{ page.title }}{% endif %} 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 24 | 25 | 26 | 35 |
36 |
37 |

Caffe

38 |

39 | Deep learning framework by the BVLC 40 |

41 |

42 | Created by 43 |
44 | Yangqing Jia 45 |
46 | Lead Developer 47 |
48 | Evan Shelhamer 49 |

54 |
55 |
56 | 57 | {{ content }} 58 | 59 |
60 |
61 | 62 | 63 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/images/GitHub-Mark-64px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/images/GitHub-Mark-64px.png -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/images/caffeine-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/images/caffeine-icon.png -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/install_apt.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation: Ubuntu 3 | --- 4 | 5 | # Ubuntu Installation 6 | 7 | **General dependencies** 8 | 9 | sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev 10 | 11 | **Remaining dependencies, 14.04** 12 | 13 | sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler 14 | 15 | **Remaining dependencies, 12.04** 16 | 17 | # glog 18 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz 19 | tar zxvf glog-0.3.3.tar.gz 20 | cd glog-0.3.3 21 | ./configure 22 | make && make install 23 | # gflags 24 | wget https://github.com/schuhschuh/gflags/archive/master.zip 25 | unzip master.zip 26 | cd gflags-master 27 | mkdir build && cd build 28 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 29 | make && make install 30 | # lmdb 31 | git clone git://gitorious.org/mdb/mdb.git 32 | cd mdb/libraries/liblmdb 33 | make && make install 34 | 35 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. 36 | 37 | **CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. 38 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. 39 | 40 | **BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. 41 | 42 | **Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. 43 | 44 | Continue with [compilation](installation.html#compilation). 45 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/install_yum.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation: RHEL / Fedora / CentOS 3 | --- 4 | 5 | # RHEL / Fedora / CentOS Installation 6 | 7 | **General dependencies** 8 | 9 | sudo yum install protobuf-devel leveldb-devel snappy-devel opencv-devel boost-devel hdf5-devel 10 | 11 | **Remaining dependencies, recent OS** 12 | 13 | sudo yum install gflags-devel glog-devel lmdb-devel 14 | 15 | **Remaining dependencies, if not found** 16 | 17 | # glog 18 | wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz 19 | tar zxvf glog-0.3.3.tar.gz 20 | cd glog-0.3.3 21 | ./configure 22 | make && make install 23 | # gflags 24 | wget https://github.com/schuhschuh/gflags/archive/master.zip 25 | unzip master.zip 26 | cd gflags-master 27 | mkdir build && cd build 28 | export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 29 | make && make install 30 | # lmdb 31 | git clone git://gitorious.org/mdb/mdb.git 32 | cd mdb/libraries/liblmdb 33 | make && make install 34 | 35 | Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first. 36 | 37 | **CUDA**: Install via the NVIDIA package instead of `yum` to be certain of the library and driver versions. 38 | Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. 39 | + CentOS/RHEL/Fedora: 40 | 41 | **BLAS**: install ATLAS by `sudo yum install atlas-devel` or install OpenBLAS or MKL for better CPU performance. For the Makefile build, uncomment and set `BLAS_LIB` accordingly as ATLAS is usually installed under `/usr/lib[64]/atlas`). 42 | 43 | **Python** (optional): if you use the default Python you will need to `sudo yum install` the `python-devel` package to have the Python headers for building the pycaffe wrapper. 44 | 45 | Continue with [compilation](installation.html#compilation). 46 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/stylesheets/reset.css: -------------------------------------------------------------------------------- 1 | /* MeyerWeb Reset */ 2 | 3 | html, body, div, span, applet, object, iframe, 4 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 5 | a, abbr, acronym, address, big, cite, code, 6 | del, dfn, em, img, ins, kbd, q, s, samp, 7 | small, strike, strong, sub, sup, tt, var, 8 | b, u, i, center, 9 | dl, dt, dd, ol, ul, li, 10 | fieldset, form, label, legend, 11 | table, caption, tbody, tfoot, thead, tr, th, td, 12 | article, aside, canvas, details, embed, 13 | figure, figcaption, footer, header, hgroup, 14 | menu, nav, output, ruby, section, summary, 15 | time, mark, audio, video { 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/convolution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convolution 3 | --- 4 | # Caffeinated Convolution 5 | 6 | The Caffe strategy for convolution is to reduce the problem to matrix-matrix multiplication. 7 | This linear algebra computation is highly-tuned in BLAS libraries and efficiently computed on GPU devices. 8 | 9 | For more details read Yangqing's [Convolution in Caffe: a memo](https://github.com/Yangqing/caffe/wiki/Convolution-in-Caffe:-a-memo). 10 | 11 | As it turns out, this same reduction was independently explored in the context of conv. nets by 12 | 13 | > K. Chellapilla, S. Puri, P. Simard, et al. High performance convolutional neural networks for document processing. In Tenth International Workshop on Frontiers in Handwriting Recognition, 2006. 14 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/.gitignore -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/backward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/backward.jpg -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/forward.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/forward.jpg -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/forward_backward.png -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/layer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/layer.jpg -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/fig/logreg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/docs/tutorial/fig/logreg.jpg -------------------------------------------------------------------------------- /caffe-fast-rcnn/docs/tutorial/forward_backward.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Forward and Backward for Inference and Learning 3 | --- 4 | # Forward and Backward 5 | 6 | The forward and backward passes are the essential computations of a [Net](net_layer_blob.html). 7 | 8 | Forward and Backward 9 | 10 | Let's consider a simple logistic regression classifier. 11 | 12 | The **forward** pass computes the output given the input for inference. 13 | In forward Caffe composes the computation of each layer to compute the "function" represented by the model. 14 | This pass goes from bottom to top. 15 | 16 | Forward pass 17 | 18 | The data $$x$$ is passed through an inner product layer for $$g(x)$$ then through a softmax for $$h(g(x))$$ and softmax loss to give $$f_W(x)$$. 19 | 20 | The **backward** pass computes the gradient given the loss for learning. 21 | In backward Caffe reverse-composes the gradient of each layer to compute the gradient of the whole model by automatic differentiation. 22 | This is back-propagation. 23 | This pass goes from top to bottom. 24 | 25 | Backward pass 26 | 27 | The backward pass begins with the loss and computes the gradient with respect to the output $$\frac{\partial f_W}{\partial h}$$. The gradient with respect to the rest of the model is computed layer-by-layer through the chain rule. Layers with parameters, like the `INNER_PRODUCT` layer, compute the gradient with respect to their parameters $$\frac{\partial f_W}{\partial W_{\text{ip}}}$$ during the backward step. 28 | 29 | These computations follow immediately from defining the model: Caffe plans and carries out the forward and backward passes for you. 30 | 31 | - The `Net::Forward()` and `Net::Backward()` methods carry out the respective passes while `Layer::Forward()` and `Layer::Backward()` compute each step. 32 | - Every layer type has `forward_{cpu,gpu}()` and `backward_{cpu,gpu}` methods to compute its steps according to the mode of computation. A layer may only implement CPU or GPU mode due to constraints or convenience. 33 | 34 | The [Solver](solver.html) optimizes a model by first calling forward to yield the output and loss, then calling backward to generate the gradient of the model, and then incorporating the gradient into a weight update that attempts to minimize the loss. Division of labor between the Solver, Net, and Layer keep Caffe modular and open to development. 35 | 36 | For the details of the forward and backward steps of Caffe's layer types, refer to the [layer catalogue](layers.html). 37 | 38 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/caffe.hpp: -------------------------------------------------------------------------------- 1 | // caffe.hpp is the header file that you need to include in your code. It wraps 2 | // all the internal caffe header files into one for simpler inclusion. 3 | 4 | #ifndef CAFFE_CAFFE_HPP_ 5 | #define CAFFE_CAFFE_HPP_ 6 | 7 | #include "caffe/blob.hpp" 8 | #include "caffe/common.hpp" 9 | #include "caffe/filler.hpp" 10 | #include "caffe/layer.hpp" 11 | #include "caffe/layer_factory.hpp" 12 | #include "caffe/net.hpp" 13 | #include "caffe/proto/caffe.pb.h" 14 | #include "caffe/solver.hpp" 15 | #include "caffe/util/benchmark.hpp" 16 | #include "caffe/util/io.hpp" 17 | #include "caffe/vision_layers.hpp" 18 | 19 | #endif // CAFFE_CAFFE_HPP_ 20 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/internal_thread.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_INTERNAL_THREAD_HPP_ 2 | #define CAFFE_INTERNAL_THREAD_HPP_ 3 | 4 | #include "caffe/common.hpp" 5 | 6 | /** 7 | Forward declare boost::thread instead of including boost/thread.hpp 8 | to avoid a boost/NVCC issues (#1009, #1010) on OSX. 9 | */ 10 | namespace boost { class thread; } 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * Virtual class encapsulate boost::thread for use in base class 16 | * The child class will acquire the ability to run a single thread, 17 | * by reimplementing the virutal function InternalThreadEntry. 18 | */ 19 | class InternalThread { 20 | public: 21 | InternalThread() : thread_() {} 22 | virtual ~InternalThread(); 23 | 24 | /** Returns true if the thread was successfully started. **/ 25 | bool StartInternalThread(); 26 | 27 | /** Will not return until the internal thread has exited. */ 28 | bool WaitForInternalThreadToExit(); 29 | 30 | bool is_started() const; 31 | 32 | protected: 33 | /* Implement this method in your subclass 34 | with the code you want your thread to run. */ 35 | virtual void InternalThreadEntry() {} 36 | 37 | shared_ptr thread_; 38 | }; 39 | 40 | } // namespace caffe 41 | 42 | #endif // CAFFE_INTERNAL_THREAD_HPP_ 43 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/python_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_PYTHON_LAYER_HPP_ 2 | #define CAFFE_PYTHON_LAYER_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | 9 | namespace bp = boost::python; 10 | 11 | namespace caffe { 12 | 13 | template 14 | class PythonLayer : public Layer { 15 | public: 16 | PythonLayer(PyObject* self, const LayerParameter& param) 17 | : Layer(param), self_(self) { } 18 | 19 | virtual void LayerSetUp(const vector*>& bottom, 20 | const vector*>& top) { 21 | try { 22 | bp::call_method(self_, "setup", bottom, top); 23 | } catch (bp::error_already_set) { 24 | PyErr_Print(); 25 | throw; 26 | } 27 | } 28 | 29 | virtual void Reshape(const vector*>& bottom, 30 | const vector*>& top) { 31 | try { 32 | bp::call_method(self_, "reshape", bottom, top); 33 | } catch (bp::error_already_set) { 34 | PyErr_Print(); 35 | throw; 36 | } 37 | } 38 | 39 | virtual inline const char* type() const { return "Python"; } 40 | 41 | protected: 42 | virtual void Forward_cpu(const vector*>& bottom, 43 | const vector*>& top) { 44 | try { 45 | bp::call_method(self_, "forward", bottom, top); 46 | } catch (bp::error_already_set) { 47 | PyErr_Print(); 48 | throw; 49 | } 50 | } 51 | virtual void Backward_cpu(const vector*>& top, 52 | const vector& propagate_down, const vector*>& bottom) { 53 | try { 54 | bp::call_method(self_, "backward", top, propagate_down, 55 | bottom); 56 | } catch (bp::error_already_set) { 57 | PyErr_Print(); 58 | throw; 59 | } 60 | } 61 | 62 | private: 63 | PyObject* self_; 64 | }; 65 | 66 | } // namespace caffe 67 | 68 | #endif 69 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/syncedmem.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_SYNCEDMEM_HPP_ 2 | #define CAFFE_SYNCEDMEM_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | 9 | namespace caffe { 10 | 11 | // Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the 12 | // cudaMallocHost and cudaFree functions in order to create pinned memory. 13 | // However, those codes rely on the existence of a cuda GPU (I don't know 14 | // why that is a must since allocating memory should not be accessing the 15 | // GPU resorce, but it just creates an error as of Cuda 5.0) and will cause 16 | // problem when running on a machine without GPU. Thus, we simply define 17 | // these two functions for safety and possible future change if the problem 18 | // of calling cuda functions disappears in a future version. 19 | // 20 | // In practice, although we are creating unpinned memory here, as long as we 21 | // are constantly accessing them the memory pages almost always stays in 22 | // the physical memory (assuming we have large enough memory installed), and 23 | // does not seem to create a memory bottleneck here. 24 | 25 | inline void CaffeMallocHost(void** ptr, size_t size) { 26 | *ptr = malloc(size); 27 | CHECK(*ptr) << "host allocation of size " << size << " failed"; 28 | } 29 | 30 | inline void CaffeFreeHost(void* ptr) { 31 | free(ptr); 32 | } 33 | 34 | 35 | /** 36 | * @brief Manages memory allocation and synchronization between the host (CPU) 37 | * and device (GPU). 38 | * 39 | * TODO(dox): more thorough description. 40 | */ 41 | class SyncedMemory { 42 | public: 43 | SyncedMemory() 44 | : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), 45 | own_cpu_data_(false) {} 46 | explicit SyncedMemory(size_t size) 47 | : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), 48 | own_cpu_data_(false) {} 49 | ~SyncedMemory(); 50 | const void* cpu_data(); 51 | void set_cpu_data(void* data); 52 | const void* gpu_data(); 53 | void* mutable_cpu_data(); 54 | void* mutable_gpu_data(); 55 | enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; 56 | SyncedHead head() { return head_; } 57 | size_t size() { return size_; } 58 | 59 | private: 60 | void to_cpu(); 61 | void to_gpu(); 62 | void* cpu_ptr_; 63 | void* gpu_ptr_; 64 | size_t size_; 65 | SyncedHead head_; 66 | bool own_cpu_data_; 67 | 68 | DISABLE_COPY_AND_ASSIGN(SyncedMemory); 69 | }; // class SyncedMemory 70 | 71 | } // namespace caffe 72 | 73 | #endif // CAFFE_SYNCEDMEM_HPP_ 74 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/test/test_caffe_main.hpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | #ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 4 | #define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | 12 | #include "caffe/common.hpp" 13 | 14 | using std::cout; 15 | using std::endl; 16 | 17 | #ifdef CMAKE_BUILD 18 | #include "caffe_config.h" 19 | #else 20 | #define CUDA_TEST_DEVICE -1 21 | #define CMAKE_SOURCE_DIR "src/" 22 | #define EXAMPLES_SOURCE_DIR "examples/" 23 | #define CMAKE_EXT "" 24 | #endif 25 | 26 | int main(int argc, char** argv); 27 | 28 | namespace caffe { 29 | 30 | template 31 | class MultiDeviceTest : public ::testing::Test { 32 | public: 33 | typedef typename TypeParam::Dtype Dtype; 34 | protected: 35 | MultiDeviceTest() { 36 | Caffe::set_mode(TypeParam::device); 37 | } 38 | virtual ~MultiDeviceTest() {} 39 | }; 40 | 41 | typedef ::testing::Types TestDtypes; 42 | 43 | struct FloatCPU { 44 | typedef float Dtype; 45 | static const Caffe::Brew device = Caffe::CPU; 46 | }; 47 | 48 | struct DoubleCPU { 49 | typedef double Dtype; 50 | static const Caffe::Brew device = Caffe::CPU; 51 | }; 52 | 53 | #ifdef CPU_ONLY 54 | 55 | typedef ::testing::Types TestDtypesAndDevices; 56 | 57 | #else 58 | 59 | struct FloatGPU { 60 | typedef float Dtype; 61 | static const Caffe::Brew device = Caffe::GPU; 62 | }; 63 | 64 | struct DoubleGPU { 65 | typedef double Dtype; 66 | static const Caffe::Brew device = Caffe::GPU; 67 | }; 68 | 69 | typedef ::testing::Types 70 | TestDtypesAndDevices; 71 | 72 | #endif 73 | 74 | } // namespace caffe 75 | 76 | #endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ 77 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/util/benchmark.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UTIL_BENCHMARK_H_ 2 | #define CAFFE_UTIL_BENCHMARK_H_ 3 | 4 | #include 5 | 6 | #include "caffe/util/device_alternate.hpp" 7 | 8 | namespace caffe { 9 | 10 | class Timer { 11 | public: 12 | Timer(); 13 | virtual ~Timer(); 14 | virtual void Start(); 15 | virtual void Stop(); 16 | virtual float MilliSeconds(); 17 | virtual float MicroSeconds(); 18 | virtual float Seconds(); 19 | 20 | inline bool initted() { return initted_; } 21 | inline bool running() { return running_; } 22 | inline bool has_run_at_least_once() { return has_run_at_least_once_; } 23 | 24 | protected: 25 | void Init(); 26 | 27 | bool initted_; 28 | bool running_; 29 | bool has_run_at_least_once_; 30 | #ifndef CPU_ONLY 31 | cudaEvent_t start_gpu_; 32 | cudaEvent_t stop_gpu_; 33 | #endif 34 | boost::posix_time::ptime start_cpu_; 35 | boost::posix_time::ptime stop_cpu_; 36 | float elapsed_milliseconds_; 37 | float elapsed_microseconds_; 38 | }; 39 | 40 | class CPUTimer : public Timer { 41 | public: 42 | explicit CPUTimer(); 43 | virtual ~CPUTimer() {} 44 | virtual void Start(); 45 | virtual void Stop(); 46 | virtual float MilliSeconds(); 47 | virtual float MicroSeconds(); 48 | }; 49 | 50 | } // namespace caffe 51 | 52 | #endif // CAFFE_UTIL_BENCHMARK_H_ 53 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/util/im2col.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_IM2COL_HPP_ 2 | #define _CAFFE_UTIL_IM2COL_HPP_ 3 | 4 | namespace caffe { 5 | 6 | template 7 | void im2col_cpu(const Dtype* data_im, const int channels, 8 | const int height, const int width, const int kernel_h, const int kernel_w, 9 | const int pad_h, const int pad_w, const int stride_h, 10 | const int stride_w, Dtype* data_col); 11 | 12 | template 13 | void col2im_cpu(const Dtype* data_col, const int channels, 14 | const int height, const int width, const int patch_h, const int patch_w, 15 | const int pad_h, const int pad_w, const int stride_h, 16 | const int stride_w, Dtype* data_im); 17 | 18 | template 19 | void im2col_gpu(const Dtype* data_im, const int channels, 20 | const int height, const int width, const int kernel_h, const int kernel_w, 21 | const int pad_h, const int pad_w, const int stride_h, 22 | const int stride_w, Dtype* data_col); 23 | 24 | template 25 | void col2im_gpu(const Dtype* data_col, const int channels, 26 | const int height, const int width, const int patch_h, const int patch_w, 27 | const int pad_h, const int pad_w, const int stride_h, 28 | const int stride_w, Dtype* data_im); 29 | 30 | } // namespace caffe 31 | 32 | #endif // CAFFE_UTIL_IM2COL_HPP_ 33 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/util/insert_splits.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ 2 | #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/proto/caffe.pb.h" 7 | 8 | namespace caffe { 9 | 10 | // Copy NetParameters with SplitLayers added to replace any shared bottom 11 | // blobs with unique bottom blobs provided by the SplitLayer. 12 | void InsertSplits(const NetParameter& param, NetParameter* param_split); 13 | 14 | void ConfigureSplitLayer(const string& layer_name, const string& blob_name, 15 | const int blob_idx, const int split_count, const float loss_weight, 16 | LayerParameter* split_layer_param); 17 | 18 | string SplitLayerName(const string& layer_name, const string& blob_name, 19 | const int blob_idx); 20 | 21 | string SplitBlobName(const string& layer_name, const string& blob_name, 22 | const int blob_idx, const int split_idx); 23 | 24 | } // namespace caffe 25 | 26 | #endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ 27 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/include/caffe/util/rng.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_RNG_CPP_HPP_ 2 | #define CAFFE_RNG_CPP_HPP_ 3 | 4 | #include 5 | #include 6 | 7 | #include "boost/random/mersenne_twister.hpp" 8 | #include "boost/random/uniform_int.hpp" 9 | 10 | #include "caffe/common.hpp" 11 | 12 | namespace caffe { 13 | 14 | typedef boost::mt19937 rng_t; 15 | 16 | inline rng_t* caffe_rng() { 17 | return static_cast(Caffe::rng_stream().generator()); 18 | } 19 | 20 | // Fisher–Yates algorithm 21 | template 22 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end, 23 | RandomGenerator* gen) { 24 | typedef typename std::iterator_traits::difference_type 25 | difference_type; 26 | typedef typename boost::uniform_int dist_type; 27 | 28 | difference_type length = std::distance(begin, end); 29 | if (length <= 0) return; 30 | 31 | for (difference_type i = length - 1; i > 0; --i) { 32 | dist_type dist(0, i); 33 | std::iter_swap(begin + i, begin + dist(*gen)); 34 | } 35 | } 36 | 37 | template 38 | inline void shuffle(RandomAccessIterator begin, RandomAccessIterator end) { 39 | shuffle(begin, end, caffe_rng()); 40 | } 41 | } // namespace caffe 42 | 43 | #endif // CAFFE_RNG_HPP_ 44 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(NOT HAVE_PYTHON) 2 | message(STATUS "Python interface is disabled or not all required dependecies found. Building without it...") 3 | return() 4 | endif() 5 | 6 | include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) 7 | file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) 8 | 9 | add_library(pycaffe SHARED ${python_srcs}) 10 | target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) 11 | set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") 12 | caffe_default_properties(pycaffe) 13 | 14 | if(UNIX OR APPLE) 15 | set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") 16 | add_custom_command(TARGET pycaffe POST_BUILD 17 | COMMAND ln -sf $ "${__linkname}" 18 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/python/caffe/proto 19 | COMMAND touch ${PROJECT_SOURCE_DIR}/python/caffe/proto/__init__.py 20 | COMMAND cp ${proto_gen_folder}/*.py ${PROJECT_SOURCE_DIR}/python/caffe/proto/ 21 | COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/_caffe${CAffe_POSTFIX}.so") 22 | endif() 23 | 24 | # ---[ Install 25 | file(GLOB files1 *.py requirements.txt) 26 | install(FILES ${files1} DESTINATION python) 27 | 28 | file(GLOB files2 caffe/*.py) 29 | install(FILES ${files2} DESTINATION python/caffe) 30 | install(TARGETS pycaffe DESTINATION python/caffe) 31 | install(DIRECTORY caffe/imagenet caffe/proto caffe/test DESTINATION python/caffe) 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | from .pycaffe import Net, SGDSolver 2 | from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver 3 | from .proto.caffe_pb2 import TRAIN, TEST 4 | from .classifier import Classifier 5 | from .detector import Detector 6 | import io 7 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/caffe/imagenet/ilsvrc_2012_mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/python/caffe/imagenet/ilsvrc_2012_mean.npy -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/caffe/test/test_python_layer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tempfile 3 | import os 4 | 5 | import caffe 6 | 7 | class SimpleLayer(caffe.Layer): 8 | """A layer that just multiplies by ten""" 9 | 10 | def setup(self, bottom, top): 11 | pass 12 | 13 | def reshape(self, bottom, top): 14 | top[0].reshape(*bottom[0].data.shape) 15 | 16 | def forward(self, bottom, top): 17 | top[0].data[...] = 10 * bottom[0].data 18 | 19 | def backward(self, top, propagate_down, bottom): 20 | bottom[0].diff[...] = 10 * top[0].diff 21 | 22 | def python_net_file(): 23 | with tempfile.NamedTemporaryFile(delete=False) as f: 24 | f.write("""name: 'pythonnet' force_backward: true 25 | input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } 26 | layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' 27 | python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } 28 | layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' 29 | python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } 30 | layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' 31 | python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") 32 | return f.name 33 | 34 | class TestPythonLayer(unittest.TestCase): 35 | def setUp(self): 36 | net_file = python_net_file() 37 | self.net = caffe.Net(net_file, caffe.TRAIN) 38 | os.remove(net_file) 39 | 40 | def test_forward(self): 41 | x = 8 42 | self.net.blobs['data'].data[...] = x 43 | self.net.forward() 44 | for y in self.net.blobs['three'].data.flat: 45 | self.assertEqual(y, 10**3 * x) 46 | 47 | def test_backward(self): 48 | x = 7 49 | self.net.blobs['three'].diff[...] = x 50 | self.net.backward() 51 | for y in self.net.blobs['data'].diff.flat: 52 | self.assertEqual(y, 10**3 * x) 53 | 54 | def test_reshape(self): 55 | s = 4 56 | self.net.blobs['data'].reshape(s, s, s, s) 57 | self.net.forward() 58 | for blob in self.net.blobs.itervalues(): 59 | for d in blob.data.shape: 60 | self.assertEqual(s, d) 61 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/caffe/test/test_solver.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tempfile 3 | import os 4 | import numpy as np 5 | 6 | import caffe 7 | from test_net import simple_net_file 8 | 9 | class TestSolver(unittest.TestCase): 10 | def setUp(self): 11 | self.num_output = 13 12 | net_f = simple_net_file(self.num_output) 13 | f = tempfile.NamedTemporaryFile(delete=False) 14 | f.write("""net: '""" + net_f + """' 15 | test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9 16 | weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75 17 | display: 100 max_iter: 100 snapshot_after_train: false""") 18 | f.close() 19 | self.solver = caffe.SGDSolver(f.name) 20 | # also make sure get_solver runs 21 | caffe.get_solver(f.name) 22 | caffe.set_mode_cpu() 23 | # fill in valid labels 24 | self.solver.net.blobs['label'].data[...] = \ 25 | np.random.randint(self.num_output, 26 | size=self.solver.net.blobs['label'].data.shape) 27 | self.solver.test_nets[0].blobs['label'].data[...] = \ 28 | np.random.randint(self.num_output, 29 | size=self.solver.test_nets[0].blobs['label'].data.shape) 30 | os.remove(f.name) 31 | os.remove(net_f) 32 | 33 | def test_solve(self): 34 | self.assertEqual(self.solver.iter, 0) 35 | self.solver.solve() 36 | self.assertEqual(self.solver.iter, 100) 37 | 38 | def test_net_memory(self): 39 | """Check that nets survive after the solver is destroyed.""" 40 | 41 | nets = [self.solver.net] + list(self.solver.test_nets) 42 | self.assertEqual(len(nets), 2) 43 | del self.solver 44 | 45 | total = 0 46 | for net in nets: 47 | for ps in net.params.itervalues(): 48 | for p in ps: 49 | total += p.data.sum() + p.diff.sum() 50 | for bl in net.blobs.itervalues(): 51 | total += bl.data.sum() + bl.diff.sum() 52 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/draw_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Draw a graph of the net architecture. 4 | """ 5 | import argparse 6 | from google.protobuf import text_format 7 | 8 | import caffe 9 | import caffe.draw 10 | from caffe.proto import caffe_pb2 11 | 12 | 13 | def parse_args(): 14 | """Parse input arguments 15 | """ 16 | 17 | parser = argparse.ArgumentParser(description='Draw a network graph') 18 | 19 | parser.add_argument('input_net_proto_file', 20 | help='Input network prototxt file') 21 | parser.add_argument('output_image_file', 22 | help='Output image file') 23 | parser.add_argument('--rankdir', 24 | help=('One of TB (top-bottom, i.e., vertical), ' 25 | 'RL (right-left, i.e., horizontal), or another' 26 | 'valid dot option; see' 27 | 'http://www.graphviz.org/doc/info/attrs.html#k:rankdir' 28 | '(default: LR)'), 29 | default='LR') 30 | 31 | args = parser.parse_args() 32 | return args 33 | 34 | 35 | def main(): 36 | args = parse_args() 37 | net = caffe_pb2.NetParameter() 38 | text_format.Merge(open(args.input_net_proto_file).read(), net) 39 | print('Drawing net to %s' % args.output_image_file) 40 | caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython>=0.19.2 2 | numpy>=1.7.1 3 | scipy>=0.13.2 4 | scikit-image>=0.9.3 5 | scikit-learn>=0.14.1 6 | matplotlib>=1.3.1 7 | ipython>=1.1.0 8 | h5py>=2.2.0 9 | leveldb>=0.191 10 | networkx>=1.8.1 11 | nose>=1.3.0 12 | pandas>=0.12.0 13 | python-dateutil>=1.4,<2 14 | protobuf>=2.5.0 15 | python-gflags>=2.0 16 | pyyaml>=3.10 17 | Pillow>=2.7.0 18 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build documentation for display in web browser. 3 | 4 | PORT=${1:-4000} 5 | 6 | echo "usage: build_docs.sh [port]" 7 | 8 | # Find the docs dir, no matter where the script is called 9 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 10 | cd $ROOT_DIR 11 | 12 | # Gather docs. 13 | scripts/gather_examples.sh 14 | 15 | # Generate developer docs. 16 | make docs 17 | 18 | # Display docs using web server. 19 | cd docs 20 | jekyll serve -w -s . -d _site --port=$PORT 21 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/copy_notebook.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Takes as arguments: 4 | 1. the path to a JSON file (such as an IPython notebook). 5 | 2. the path to output file 6 | 7 | If 'metadata' dict in the JSON file contains 'include_in_docs': true, 8 | then copies the file to output file, appending the 'metadata' property 9 | as YAML front-matter, adding the field 'category' with value 'notebook'. 10 | """ 11 | import os 12 | import sys 13 | import json 14 | 15 | filename = sys.argv[1] 16 | output_filename = sys.argv[2] 17 | content = json.load(open(filename)) 18 | 19 | if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']: 20 | yaml_frontmatter = ['---'] 21 | for key, val in content['metadata'].iteritems(): 22 | if key == 'example_name': 23 | key = 'title' 24 | if val == '': 25 | val = os.path.basename(filename) 26 | yaml_frontmatter.append('{}: {}'.format(key, val)) 27 | yaml_frontmatter += ['category: notebook'] 28 | yaml_frontmatter += ['original_path: ' + filename] 29 | 30 | with open(output_filename, 'w') as fo: 31 | fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n') 32 | fo.write(open(filename).read()) 33 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/deploy_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Publish documentation to the gh-pages site. 3 | 4 | # The remote for pushing the docs (defaults to origin). 5 | # This is where you will submit the PR to BVLC:gh-pages from. 6 | REMOTE=${1:-origin} 7 | 8 | echo "Generating docs and pushing to $REMOTE:gh-pages..." 9 | echo "To build and view docs when not on master, simply do 'jekyll serve -s docs'." 10 | echo 11 | 12 | REMOTE_URL=`git config --get remote.${REMOTE}.url` 13 | BRANCH=`git rev-parse --abbrev-ref HEAD` 14 | MSG=`git log --oneline -1` 15 | 16 | if [[ $BRANCH = 'master' ]]; then 17 | # Find the docs dir, no matter where the script is called 18 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 19 | DOCS_SITE_DIR=$DIR/../docs/_site 20 | 21 | # Make sure that docs/_site tracks remote:gh-pages. 22 | # If not, then we make a new repo and check out just that branch. 23 | mkdir -p $DOCS_SITE_DIR 24 | cd $DOCS_SITE_DIR 25 | SITE_REMOTE_URL=`git config --get remote.${REMOTE}.url` 26 | SITE_BRANCH=`git rev-parse --abbrev-ref HEAD` 27 | 28 | echo $SITE_REMOTE_URL 29 | echo $SITE_BRANCH 30 | echo `pwd` 31 | 32 | if [[ ( $SITE_REMOTE_URL = $REMOTE_URL ) && ( $SITE_BRANCH = 'gh-pages' ) ]]; then 33 | echo "Confirmed that docs/_site has same remote as main repo, and is on gh-pages." 34 | else 35 | echo "Checking out $REMOTE:gh-pages into docs/_site (will take a little time)." 36 | git init . 37 | git remote add -t gh-pages -f $REMOTE $REMOTE_URL 38 | git checkout gh-pages 39 | fi 40 | 41 | echo "Building the site into docs/_site, and committing the changes." 42 | jekyll build -s .. -d . 43 | git add --all . 44 | git commit -m "$MSG" 45 | git push $REMOTE gh-pages 46 | 47 | echo "All done!" 48 | cd ../.. 49 | else echo "You must run this deployment script from the 'master' branch." 50 | fi 51 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/download_model_from_gist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | GIST=$1 4 | DIRNAME=${2:-./models} 5 | 6 | if [ -z $GIST ]; then 7 | echo "usage: download_model_from_gist.sh " 8 | exit 9 | fi 10 | 11 | GIST_DIR=$(echo $GIST | tr '/' '-') 12 | MODEL_DIR="$DIRNAME/$GIST_DIR" 13 | 14 | if [ -d $MODEL_DIR ]; then 15 | echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!" 16 | exit 17 | fi 18 | 19 | echo "Downloading Caffe model info to $MODEL_DIR ..." 20 | mkdir -p $MODEL_DIR 21 | wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.tar.gz 22 | tar xzf $MODEL_DIR/gist.tar.gz --directory=$MODEL_DIR --strip-components=1 23 | rm $MODEL_DIR/gist.tar.gz 24 | echo "Done" 25 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/gather_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assemble documentation for the project into one directory via symbolic links. 3 | 4 | # Find the docs dir, no matter where the script is called 5 | ROOT_DIR="$( cd "$(dirname "$0")"/.. ; pwd -P )" 6 | cd $ROOT_DIR 7 | 8 | # Gather docs from examples/**/readme.md 9 | GATHERED_DIR=docs/gathered 10 | rm -r $GATHERED_DIR 11 | mkdir $GATHERED_DIR 12 | for README_FILENAME in $(find examples -iname "readme.md"); do 13 | # Only use file if it is to be included in docs. 14 | if grep -Fxq "include_in_docs: true" $README_FILENAME; then 15 | # Make link to readme.md in docs/gathered/. 16 | # Since everything is called readme.md, rename it by its dirname. 17 | README_DIRNAME=`dirname $README_FILENAME` 18 | DOCS_FILENAME=$GATHERED_DIR/$README_DIRNAME.md 19 | mkdir -p `dirname $DOCS_FILENAME` 20 | ln -s $ROOT_DIR/$README_FILENAME $DOCS_FILENAME 21 | fi 22 | done 23 | 24 | # Gather docs from examples/*.ipynb and add YAML front-matter. 25 | for NOTEBOOK_FILENAME in $(find examples -depth -iname "*.ipynb"); do 26 | DOCS_FILENAME=$GATHERED_DIR/$NOTEBOOK_FILENAME 27 | mkdir -p `dirname $DOCS_FILENAME` 28 | python scripts/copy_notebook.py $NOTEBOOK_FILENAME $DOCS_FILENAME 29 | done 30 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/travis/travis_build_and_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script called by Travis to do a CPU-only build of and test Caffe. 3 | 4 | set -e 5 | MAKE="make --jobs=$NUM_THREADS --keep-going" 6 | 7 | if $WITH_CMAKE; then 8 | mkdir build 9 | cd build 10 | cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON .. 11 | $MAKE 12 | if ! $WITH_CUDA; then 13 | $MAKE runtest 14 | $MAKE lint 15 | fi 16 | $MAKE clean 17 | cd - 18 | else 19 | if ! $WITH_CUDA; then 20 | export CPU_ONLY=1 21 | fi 22 | $MAKE all test pycaffe warn lint || true 23 | if ! $WITH_CUDA; then 24 | $MAKE runtest 25 | fi 26 | $MAKE all 27 | $MAKE test 28 | $MAKE pycaffe 29 | $MAKE pytest 30 | $MAKE warn 31 | if ! $WITH_CUDA; then 32 | $MAKE lint 33 | fi 34 | fi 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/travis/travis_setup_makefile_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mv Makefile.config.example Makefile.config 6 | 7 | if $WITH_CUDA; then 8 | # Only generate compute_50. 9 | GENCODE="-gencode arch=compute_50,code=sm_50" 10 | GENCODE="$GENCODE -gencode arch=compute_50,code=compute_50" 11 | echo "CUDA_ARCH := $GENCODE" >> Makefile.config 12 | fi 13 | 14 | cat << 'EOF' >> Makefile.config 15 | ANACONDA_HOME := $(HOME)/miniconda 16 | PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ 17 | $(ANACONDA_HOME)/include/python2.7 \ 18 | $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include 19 | PYTHON_LIB := $(ANACONDA_HOME)/lib 20 | INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include 21 | LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib 22 | WITH_PYTHON_LAYER := 1 23 | EOF 24 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/scripts/upload_model_to_gist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for valid directory 4 | DIRNAME=$1 5 | if [ ! -f $DIRNAME/readme.md ]; then 6 | echo "usage: upload_model_to_gist.sh " 7 | echo " /readme.md must exist" 8 | fi 9 | cd $DIRNAME 10 | FILES=`find . -maxdepth 1 -type f ! -name "*.caffemodel*" | xargs echo` 11 | 12 | # Check for gist tool. 13 | gist -v >/dev/null 2>&1 || { echo >&2 "I require 'gist' but it's not installed. Do 'gem install gist'."; exit 1; } 14 | 15 | NAME=`sed -n 's/^name:[[:space:]]*//p' readme.md` 16 | if [ -z "$NAME" ]; then 17 | echo " /readme.md must contain name field in the front-matter." 18 | fi 19 | 20 | GIST=`sed -n 's/^gist_id:[[:space:]]*//p' readme.md` 21 | if [ -z "$GIST" ]; then 22 | echo "Uploading new Gist" 23 | gist -p -d "$NAME" $FILES 24 | else 25 | echo "Updating existing Gist, id $GIST" 26 | gist -u $GIST -d "$NAME" $FILES 27 | fi 28 | 29 | RESULT=$? 30 | if [ $RESULT -eq 0 ]; then 31 | echo "You've uploaded your model!" 32 | echo "Don't forget to add the gist_id field to your /readme.md now!" 33 | echo "Run the command again after you do that, to make sure the Gist id propagates." 34 | echo "" 35 | echo "And do share your model over at https://github.com/BVLC/caffe/wiki/Model-Zoo" 36 | else 37 | echo "Something went wrong!" 38 | fi 39 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # generate protobuf sources 2 | file(GLOB proto_files proto/*.proto) 3 | caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) 4 | 5 | # include python files either to force generation 6 | add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) 7 | set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! 8 | caffe_default_properties(proto) 9 | 10 | # --[ Caffe library 11 | 12 | # creates 'test_srcs', 'srcs', 'test_cuda', 'cuda' lists 13 | caffe_pickup_caffe_sources(${PROJECT_SOURCE_DIR}) 14 | 15 | if(HAVE_CUDA) 16 | caffe_cuda_compile(cuda_objs ${cuda}) 17 | list(APPEND srcs ${cuda_objs} ${cuda}) 18 | endif() 19 | 20 | add_library(caffe ${srcs}) 21 | target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) 22 | caffe_default_properties(caffe) 23 | 24 | # ---[ Tests 25 | add_subdirectory(test) 26 | 27 | # ---[ Install 28 | install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) 29 | install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) 30 | install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) 31 | 32 | file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) 33 | list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) 34 | install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) 35 | 36 | 37 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/internal_thread.hpp" 3 | 4 | namespace caffe { 5 | 6 | InternalThread::~InternalThread() { 7 | WaitForInternalThreadToExit(); 8 | } 9 | 10 | bool InternalThread::is_started() const { 11 | return thread_.get() != NULL && thread_->joinable(); 12 | } 13 | 14 | 15 | bool InternalThread::StartInternalThread() { 16 | if (!WaitForInternalThreadToExit()) { 17 | return false; 18 | } 19 | try { 20 | thread_.reset( 21 | new boost::thread(&InternalThread::InternalThreadEntry, this)); 22 | } catch (...) { 23 | return false; 24 | } 25 | return true; 26 | } 27 | 28 | /** Will not return until the internal thread has exited. */ 29 | bool InternalThread::WaitForInternalThreadToExit() { 30 | if (is_started()) { 31 | try { 32 | thread_->join(); 33 | } catch (...) { 34 | return false; 35 | } 36 | } 37 | return true; 38 | } 39 | 40 | } // namespace caffe 41 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/absval_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/neuron_layers.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void AbsValLayer::LayerSetUp(const vector*>& bottom, 11 | const vector*>& top) { 12 | NeuronLayer::LayerSetUp(bottom, top); 13 | CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not " 14 | "allow in-place computation."; 15 | } 16 | 17 | template 18 | void AbsValLayer::Forward_cpu( 19 | const vector*>& bottom, const vector*>& top) { 20 | const int count = top[0]->count(); 21 | Dtype* top_data = top[0]->mutable_cpu_data(); 22 | caffe_abs(count, bottom[0]->cpu_data(), top_data); 23 | } 24 | 25 | template 26 | void AbsValLayer::Backward_cpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | const int count = top[0]->count(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | if (propagate_down[0]) { 31 | const Dtype* bottom_data = bottom[0]->cpu_data(); 32 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 33 | caffe_cpu_sign(count, bottom_data, bottom_diff); 34 | caffe_mul(count, bottom_diff, top_diff, bottom_diff); 35 | } 36 | } 37 | 38 | #ifdef CPU_ONLY 39 | STUB_GPU(AbsValLayer); 40 | #endif 41 | 42 | INSTANTIATE_CLASS(AbsValLayer); 43 | REGISTER_LAYER_CLASS(AbsVal); 44 | 45 | } // namespace caffe 46 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/absval_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void AbsValLayer::Forward_gpu( 11 | const vector*>& bottom, const vector*>& top) { 12 | const int count = top[0]->count(); 13 | Dtype* top_data = top[0]->mutable_gpu_data(); 14 | caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); 15 | } 16 | 17 | template 18 | void AbsValLayer::Backward_gpu(const vector*>& top, 19 | const vector& propagate_down, const vector*>& bottom) { 20 | const int count = top[0]->count(); 21 | const Dtype* top_data = top[0]->gpu_data(); 22 | const Dtype* top_diff = top[0]->gpu_diff(); 23 | if (propagate_down[0]) { 24 | const Dtype* bottom_data = bottom[0]->gpu_data(); 25 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 26 | caffe_gpu_sign(count, bottom_data, bottom_diff); 27 | caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); 28 | } 29 | } 30 | 31 | INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); 32 | 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/argmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void ArgMaxLayer::LayerSetUp(const vector*>& bottom, 13 | const vector*>& top) { 14 | out_max_val_ = this->layer_param_.argmax_param().out_max_val(); 15 | top_k_ = this->layer_param_.argmax_param().top_k(); 16 | CHECK_GE(top_k_, 1) << " top k must not be less than 1."; 17 | CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) 18 | << "top_k must be less than or equal to the number of classes."; 19 | } 20 | 21 | template 22 | void ArgMaxLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | if (out_max_val_) { 25 | // Produces max_ind and max_val 26 | top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); 27 | } else { 28 | // Produces only max_ind 29 | top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); 30 | } 31 | } 32 | 33 | template 34 | void ArgMaxLayer::Forward_cpu(const vector*>& bottom, 35 | const vector*>& top) { 36 | const Dtype* bottom_data = bottom[0]->cpu_data(); 37 | Dtype* top_data = top[0]->mutable_cpu_data(); 38 | int num = bottom[0]->num(); 39 | int dim = bottom[0]->count() / bottom[0]->num(); 40 | for (int i = 0; i < num; ++i) { 41 | std::vector > bottom_data_vector; 42 | for (int j = 0; j < dim; ++j) { 43 | bottom_data_vector.push_back( 44 | std::make_pair(bottom_data[i * dim + j], j)); 45 | } 46 | std::partial_sort( 47 | bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, 48 | bottom_data_vector.end(), std::greater >()); 49 | for (int j = 0; j < top_k_; ++j) { 50 | top_data[top[0]->offset(i, 0, j)] = bottom_data_vector[j].second; 51 | } 52 | if (out_max_val_) { 53 | for (int j = 0; j < top_k_; ++j) { 54 | top_data[top[0]->offset(i, 1, j)] = bottom_data_vector[j].first; 55 | } 56 | } 57 | } 58 | } 59 | 60 | INSTANTIATE_CLASS(ArgMaxLayer); 61 | REGISTER_LAYER_CLASS(ArgMax); 62 | 63 | } // namespace caffe 64 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/base_data_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/data_layers.hpp" 4 | 5 | namespace caffe { 6 | 7 | template 8 | void BasePrefetchingDataLayer::Forward_gpu( 9 | const vector*>& bottom, const vector*>& top) { 10 | // First, join the thread 11 | JoinPrefetchThread(); 12 | // Reshape to loaded data. 13 | top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), 14 | this->prefetch_data_.height(), this->prefetch_data_.width()); 15 | // Copy the data 16 | caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), 17 | top[0]->mutable_gpu_data()); 18 | if (this->output_labels_) { 19 | caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), 20 | top[1]->mutable_gpu_data()); 21 | } 22 | // Start a new prefetch thread 23 | CreatePrefetchThread(); 24 | } 25 | 26 | INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/bnll_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | const float kBNLL_THRESHOLD = 50.; 10 | 11 | template 12 | void BNLLLayer::Forward_cpu(const vector*>& bottom, 13 | const vector*>& top) { 14 | const Dtype* bottom_data = bottom[0]->cpu_data(); 15 | Dtype* top_data = top[0]->mutable_cpu_data(); 16 | const int count = bottom[0]->count(); 17 | for (int i = 0; i < count; ++i) { 18 | top_data[i] = bottom_data[i] > 0 ? 19 | bottom_data[i] + log(1. + exp(-bottom_data[i])) : 20 | log(1. + exp(bottom_data[i])); 21 | } 22 | } 23 | 24 | template 25 | void BNLLLayer::Backward_cpu(const vector*>& top, 26 | const vector& propagate_down, 27 | const vector*>& bottom) { 28 | if (propagate_down[0]) { 29 | const Dtype* bottom_data = bottom[0]->cpu_data(); 30 | const Dtype* top_diff = top[0]->cpu_diff(); 31 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 32 | const int count = bottom[0]->count(); 33 | Dtype expval; 34 | for (int i = 0; i < count; ++i) { 35 | expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); 36 | bottom_diff[i] = top_diff[i] * expval / (expval + 1.); 37 | } 38 | } 39 | } 40 | 41 | #ifdef CPU_ONLY 42 | STUB_GPU(BNLLLayer); 43 | #endif 44 | 45 | INSTANTIATE_CLASS(BNLLLayer); 46 | REGISTER_LAYER_CLASS(BNLL); 47 | 48 | } // namespace caffe 49 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/bnll_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | const float kBNLL_THRESHOLD = 50.; 10 | 11 | template 12 | __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { 13 | CUDA_KERNEL_LOOP(index, n) { 14 | out[index] = in[index] > 0 ? 15 | in[index] + log(1. + exp(-in[index])) : 16 | log(1. + exp(in[index])); 17 | } 18 | } 19 | 20 | template 21 | void BNLLLayer::Forward_gpu(const vector*>& bottom, 22 | const vector*>& top) { 23 | const Dtype* bottom_data = bottom[0]->gpu_data(); 24 | Dtype* top_data = top[0]->mutable_gpu_data(); 25 | const int count = bottom[0]->count(); 26 | // NOLINT_NEXT_LINE(whitespace/operators) 27 | BNLLForward<<>>( 28 | count, bottom_data, top_data); 29 | CUDA_POST_KERNEL_CHECK; 30 | } 31 | 32 | template 33 | __global__ void BNLLBackward(const int n, const Dtype* in_diff, 34 | const Dtype* in_data, Dtype* out_diff) { 35 | CUDA_KERNEL_LOOP(index, n) { 36 | Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); 37 | out_diff[index] = in_diff[index] * expval / (expval + 1.); 38 | } 39 | } 40 | 41 | template 42 | void BNLLLayer::Backward_gpu(const vector*>& top, 43 | const vector& propagate_down, 44 | const vector*>& bottom) { 45 | if (propagate_down[0]) { 46 | const Dtype* bottom_data = bottom[0]->gpu_data(); 47 | const Dtype* top_diff = top[0]->gpu_diff(); 48 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 49 | const int count = bottom[0]->count(); 50 | // NOLINT_NEXT_LINE(whitespace/operators) 51 | BNLLBackward<<>>( 52 | count, top_diff, bottom_data, bottom_diff); 53 | CUDA_POST_KERNEL_CHECK; 54 | } 55 | } 56 | 57 | INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); 58 | 59 | 60 | } // namespace caffe 61 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/concat_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void ConcatLayer::Forward_gpu(const vector*>& bottom, 11 | const vector*>& top) { 12 | Dtype* top_data = top[0]->mutable_gpu_data(); 13 | int offset_concat_axis = 0; 14 | const int top_concat_axis = top[0]->shape(concat_axis_); 15 | for (int i = 0; i < bottom.size(); ++i) { 16 | const Dtype* bottom_data = bottom[i]->gpu_data(); 17 | const int bottom_concat_axis = bottom[i]->shape(concat_axis_); 18 | for (int n = 0; n < num_concats_; ++n) { 19 | caffe_copy(bottom_concat_axis * concat_input_size_, 20 | bottom_data + n * bottom_concat_axis * concat_input_size_, 21 | top_data + (n * top_concat_axis + offset_concat_axis) 22 | * concat_input_size_); 23 | } 24 | offset_concat_axis += bottom_concat_axis; 25 | } 26 | } 27 | 28 | template 29 | void ConcatLayer::Backward_gpu(const vector*>& top, 30 | const vector& propagate_down, const vector*>& bottom) { 31 | const Dtype* top_diff = top[0]->gpu_diff(); 32 | int offset_concat_axis = 0; 33 | const int top_concat_axis = top[0]->shape(concat_axis_); 34 | for (int i = 0; i < bottom.size(); ++i) { 35 | if (!propagate_down[i]) { continue; } 36 | Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); 37 | const int bottom_concat_axis = bottom[i]->shape(concat_axis_); 38 | for (int n = 0; n < num_concats_; ++n) { 39 | caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + 40 | (n * top_concat_axis + offset_concat_axis) * concat_input_size_, 41 | bottom_diff + n * bottom_concat_axis * concat_input_size_); 42 | } 43 | offset_concat_axis += bottom_concat_axis; 44 | } 45 | } 46 | 47 | INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); 48 | 49 | } // namespace caffe 50 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/filler.hpp" 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/im2col.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, 14 | const vector*>& top) { 15 | PoolingLayer::LayerSetUp(bottom, top); 16 | // Sanity check: CUDNN currently only supports pad == 0. 17 | CHECK_EQ(this->pad_h_, 0); 18 | CHECK_EQ(this->pad_w_, 0); 19 | CUDNN_CHECK(cudnnCreate(&handle_)); 20 | cudnn::createTensor4dDesc(&bottom_desc_); 21 | cudnn::createTensor4dDesc(&top_desc_); 22 | cudnn::createPoolingDesc(&pooling_desc_, 23 | this->layer_param_.pooling_param().pool(), &mode_, 24 | this->kernel_h_, this->kernel_w_, this->stride_h_, this->stride_w_); 25 | handles_setup_ = true; 26 | } 27 | 28 | template 29 | void CuDNNPoolingLayer::Reshape(const vector*>& bottom, 30 | const vector*>& top) { 31 | PoolingLayer::Reshape(bottom, top); 32 | cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), 33 | this->channels_, this->height_, this->width_); 34 | cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), 35 | this->channels_, this->pooled_height_, this->pooled_width_); 36 | } 37 | 38 | template 39 | CuDNNPoolingLayer::~CuDNNPoolingLayer() { 40 | // Check that handles have been setup before destroying. 41 | if (!handles_setup_) { return; } 42 | 43 | cudnnDestroyTensor4dDescriptor(bottom_desc_); 44 | cudnnDestroyTensor4dDescriptor(top_desc_); 45 | cudnnDestroyPoolingDescriptor(pooling_desc_); 46 | cudnnDestroy(handle_); 47 | } 48 | 49 | INSTANTIATE_CLASS(CuDNNPoolingLayer); 50 | 51 | } // namespace caffe 52 | #endif 53 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_pooling_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | 4 | #include "caffe/filler.hpp" 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/im2col.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, 14 | const vector*>& top) { 15 | const Dtype* bottom_data = bottom[0]->gpu_data(); 16 | Dtype* top_data = top[0]->mutable_gpu_data(); 17 | CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, 18 | bottom_desc_, bottom_data, top_desc_, top_data)); 19 | } 20 | 21 | template 22 | void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, 23 | const vector& propagate_down, const vector*>& bottom) { 24 | if (!propagate_down[0]) { 25 | return; 26 | } 27 | const Dtype* top_diff = top[0]->gpu_diff(); 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* bottom_data = bottom[0]->gpu_data(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, 32 | top_desc_, top_data, top_desc_, top_diff, 33 | bottom_desc_, bottom_data, bottom_desc_, bottom_diff)); 34 | } 35 | 36 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNPoolingLayer); 37 | 38 | } // namespace caffe 39 | #endif 40 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | ReLULayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNReLULayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | ReLULayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNReLULayer::~CuDNNReLULayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNReLULayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_relu_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | // Fallback to standard Caffe for leaky ReLU. 14 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) { 15 | return ReLULayer::Forward_gpu(bottom, top); 16 | } 17 | 18 | const Dtype* bottom_data = bottom[0]->gpu_data(); 19 | Dtype* top_data = top[0]->mutable_gpu_data(); 20 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 21 | CUDNN_ACTIVATION_RELU, 22 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 23 | } 24 | 25 | template 26 | void CuDNNReLULayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, 28 | const vector*>& bottom) { 29 | if (!propagate_down[0]) { 30 | return; 31 | } 32 | 33 | // Fallback to standard Caffe for leaky ReLU. 34 | if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) { 35 | return ReLULayer::Backward_gpu(top, propagate_down, bottom); 36 | } 37 | 38 | const Dtype* top_data = top[0]->gpu_data(); 39 | const Dtype* top_diff = top[0]->gpu_diff(); 40 | const Dtype* bottom_data = bottom[0]->gpu_data(); 41 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 42 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 43 | CUDNN_ACTIVATION_RELU, 44 | this->top_desc_, top_data, this->top_desc_, top_diff, 45 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 46 | } 47 | 48 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNReLULayer); 49 | 50 | } // namespace caffe 51 | #endif 52 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | SigmoidLayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | SigmoidLayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNSigmoidLayer::~CuDNNSigmoidLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNSigmoidLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_sigmoid_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 16 | CUDNN_ACTIVATION_SIGMOID, 17 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, 23 | const vector*>& bottom) { 24 | if (!propagate_down[0]) { 25 | return; 26 | } 27 | 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = bottom[0]->gpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 33 | CUDNN_ACTIVATION_SIGMOID, 34 | this->top_desc_, top_data, this->top_desc_, top_diff, 35 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSigmoidLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_softmax_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | #include 5 | 6 | #include "thrust/device_vector.h" 7 | 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/math_functions.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, 16 | const vector*>& top) { 17 | SoftmaxLayer::LayerSetUp(bottom, top); 18 | // Initialize CUDNN. 19 | CUDNN_CHECK(cudnnCreate(&handle_)); 20 | cudnn::createTensor4dDesc(&bottom_desc_); 21 | cudnn::createTensor4dDesc(&top_desc_); 22 | handles_setup_ = true; 23 | } 24 | 25 | template 26 | void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, 27 | const vector*>& top) { 28 | SoftmaxLayer::Reshape(bottom, top); 29 | int N = this->outer_num_; 30 | int K = bottom[0]->shape(this->softmax_axis_); 31 | int H = this->inner_num_; 32 | int W = 1; 33 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 34 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 35 | } 36 | 37 | template 38 | CuDNNSoftmaxLayer::~CuDNNSoftmaxLayer() { 39 | // Check that handles have been setup before destroying. 40 | if (!handles_setup_) { return; } 41 | 42 | cudnnDestroyTensor4dDescriptor(bottom_desc_); 43 | cudnnDestroyTensor4dDescriptor(top_desc_); 44 | cudnnDestroy(handle_); 45 | } 46 | 47 | INSTANTIATE_CLASS(CuDNNSoftmaxLayer); 48 | 49 | } // namespace caffe 50 | #endif 51 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_softmax_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | #include 5 | 6 | #include "thrust/device_vector.h" 7 | 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/math_functions.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | const Dtype* bottom_data = bottom[0]->gpu_data(); 18 | Dtype* top_data = top[0]->mutable_gpu_data(); 19 | CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, 20 | CUDNN_SOFTMAX_MODE_CHANNEL, 21 | bottom_desc_, bottom_data, top_desc_, top_data)); 22 | } 23 | 24 | template 25 | void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, 26 | const vector& propagate_down, const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = bottom[0]->gpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, 33 | CUDNN_SOFTMAX_MODE_CHANNEL, 34 | top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff)); 35 | } 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNSoftmaxLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | TanHLayer::LayerSetUp(bottom, top); 14 | // initialize cuDNN 15 | CUDNN_CHECK(cudnnCreate(&handle_)); 16 | cudnn::createTensor4dDesc(&bottom_desc_); 17 | cudnn::createTensor4dDesc(&top_desc_); 18 | handles_setup_ = true; 19 | } 20 | 21 | template 22 | void CuDNNTanHLayer::Reshape(const vector*>& bottom, 23 | const vector*>& top) { 24 | TanHLayer::Reshape(bottom, top); 25 | const int N = bottom[0]->num(); 26 | const int K = bottom[0]->channels(); 27 | const int H = bottom[0]->height(); 28 | const int W = bottom[0]->width(); 29 | cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); 30 | cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); 31 | } 32 | 33 | template 34 | CuDNNTanHLayer::~CuDNNTanHLayer() { 35 | // Check that handles have been setup before destroying. 36 | if (!handles_setup_) { return; } 37 | 38 | cudnnDestroyTensor4dDescriptor(this->bottom_desc_); 39 | cudnnDestroyTensor4dDescriptor(this->top_desc_); 40 | cudnnDestroy(this->handle_); 41 | } 42 | 43 | INSTANTIATE_CLASS(CuDNNTanHLayer); 44 | 45 | } // namespace caffe 46 | #endif 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/cudnn_tanh_layer.cu: -------------------------------------------------------------------------------- 1 | #ifdef USE_CUDNN 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | CUDNN_CHECK(cudnnActivationForward(this->handle_, 16 | CUDNN_ACTIVATION_TANH, 17 | this->bottom_desc_, bottom_data, this->top_desc_, top_data)); 18 | } 19 | 20 | template 21 | void CuDNNTanHLayer::Backward_gpu(const vector*>& top, 22 | const vector& propagate_down, 23 | const vector*>& bottom) { 24 | if (!propagate_down[0]) { 25 | return; 26 | } 27 | 28 | const Dtype* top_data = top[0]->gpu_data(); 29 | const Dtype* top_diff = top[0]->gpu_diff(); 30 | const Dtype* bottom_data = bottom[0]->gpu_data(); 31 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 32 | CUDNN_CHECK(cudnnActivationBackward(this->handle_, 33 | CUDNN_ACTIVATION_TANH, 34 | this->top_desc_, top_data, this->top_desc_, top_diff, 35 | this->bottom_desc_, bottom_data, this->bottom_desc_, bottom_diff)); 36 | } 37 | 38 | INSTANTIATE_LAYER_GPU_FUNCS(CuDNNTanHLayer); 39 | 40 | } // namespace caffe 41 | #endif 42 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/euclidean_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void EuclideanLossLayer::Reshape( 12 | const vector*>& bottom, const vector*>& top) { 13 | LossLayer::Reshape(bottom, top); 14 | CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) 15 | << "Inputs must have the same dimension."; 16 | diff_.ReshapeLike(*bottom[0]); 17 | } 18 | 19 | template 20 | void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, 21 | const vector*>& top) { 22 | int count = bottom[0]->count(); 23 | caffe_sub( 24 | count, 25 | bottom[0]->cpu_data(), 26 | bottom[1]->cpu_data(), 27 | diff_.mutable_cpu_data()); 28 | Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); 29 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 30 | top[0]->mutable_cpu_data()[0] = loss; 31 | } 32 | 33 | template 34 | void EuclideanLossLayer::Backward_cpu(const vector*>& top, 35 | const vector& propagate_down, const vector*>& bottom) { 36 | for (int i = 0; i < 2; ++i) { 37 | if (propagate_down[i]) { 38 | const Dtype sign = (i == 0) ? 1 : -1; 39 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 40 | caffe_cpu_axpby( 41 | bottom[i]->count(), // count 42 | alpha, // alpha 43 | diff_.cpu_data(), // a 44 | Dtype(0), // beta 45 | bottom[i]->mutable_cpu_diff()); // b 46 | } 47 | } 48 | } 49 | 50 | #ifdef CPU_ONLY 51 | STUB_GPU(EuclideanLossLayer); 52 | #endif 53 | 54 | INSTANTIATE_CLASS(EuclideanLossLayer); 55 | REGISTER_LAYER_CLASS(EuclideanLoss); 56 | 57 | } // namespace caffe 58 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/euclidean_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/io.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | int count = bottom[0]->count(); 14 | caffe_gpu_sub( 15 | count, 16 | bottom[0]->gpu_data(), 17 | bottom[1]->gpu_data(), 18 | diff_.mutable_gpu_data()); 19 | Dtype dot; 20 | caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); 21 | Dtype loss = dot / bottom[0]->num() / Dtype(2); 22 | top[0]->mutable_cpu_data()[0] = loss; 23 | } 24 | 25 | template 26 | void EuclideanLossLayer::Backward_gpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | for (int i = 0; i < 2; ++i) { 29 | if (propagate_down[i]) { 30 | const Dtype sign = (i == 0) ? 1 : -1; 31 | const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); 32 | caffe_gpu_axpby( 33 | bottom[i]->count(), // count 34 | alpha, // alpha 35 | diff_.gpu_data(), // a 36 | Dtype(0), // beta 37 | bottom[i]->mutable_gpu_diff()); // b 38 | } 39 | } 40 | } 41 | 42 | INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer); 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/exp_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void ExpLayer::LayerSetUp(const vector*>& bottom, 12 | const vector*>& top) { 13 | NeuronLayer::LayerSetUp(bottom, top); 14 | const Dtype base = this->layer_param_.exp_param().base(); 15 | if (base != Dtype(-1)) { 16 | CHECK_GT(base, 0) << "base must be strictly positive."; 17 | } 18 | // If base == -1, interpret the base as e and set log_base = 1 exactly. 19 | // Otherwise, calculate its log explicitly. 20 | const Dtype log_base = (base == Dtype(-1)) ? Dtype(1) : log(base); 21 | CHECK(!isnan(log_base)) 22 | << "NaN result: log(base) = log(" << base << ") = " << log_base; 23 | CHECK(!isinf(log_base)) 24 | << "Inf result: log(base) = log(" << base << ") = " << log_base; 25 | const Dtype input_scale = this->layer_param_.exp_param().scale(); 26 | const Dtype input_shift = this->layer_param_.exp_param().shift(); 27 | inner_scale_ = log_base * input_scale; 28 | outer_scale_ = (input_shift == Dtype(0)) ? Dtype(1) : pow(base, input_shift); 29 | } 30 | 31 | template 32 | void ExpLayer::Forward_cpu(const vector*>& bottom, 33 | const vector*>& top) { 34 | const int count = bottom[0]->count(); 35 | const Dtype* bottom_data = bottom[0]->cpu_data(); 36 | Dtype* top_data = top[0]->mutable_cpu_data(); 37 | if (inner_scale_ == Dtype(1)) { 38 | caffe_exp(count, bottom_data, top_data); 39 | } else { 40 | caffe_cpu_scale(count, inner_scale_, bottom_data, top_data); 41 | caffe_exp(count, top_data, top_data); 42 | } 43 | if (outer_scale_ != Dtype(1)) { 44 | caffe_scal(count, outer_scale_, top_data); 45 | } 46 | } 47 | 48 | template 49 | void ExpLayer::Backward_cpu(const vector*>& top, 50 | const vector& propagate_down, const vector*>& bottom) { 51 | if (!propagate_down[0]) { return; } 52 | const int count = bottom[0]->count(); 53 | const Dtype* top_data = top[0]->cpu_data(); 54 | const Dtype* top_diff = top[0]->cpu_diff(); 55 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 56 | caffe_mul(count, top_data, top_diff, bottom_diff); 57 | if (inner_scale_ != Dtype(1)) { 58 | caffe_scal(count, inner_scale_, bottom_diff); 59 | } 60 | } 61 | 62 | #ifdef CPU_ONLY 63 | STUB_GPU(ExpLayer); 64 | #endif 65 | 66 | INSTANTIATE_CLASS(ExpLayer); 67 | REGISTER_LAYER_CLASS(Exp); 68 | 69 | } // namespace caffe 70 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/exp_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void ExpLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const int count = bottom[0]->count(); 14 | const Dtype* bottom_data = bottom[0]->gpu_data(); 15 | Dtype* top_data = top[0]->mutable_gpu_data(); 16 | if (inner_scale_ == Dtype(1)) { 17 | caffe_gpu_exp(count, bottom_data, top_data); 18 | } else { 19 | caffe_gpu_scale(count, inner_scale_, bottom_data, top_data); 20 | caffe_gpu_exp(count, top_data, top_data); 21 | } 22 | if (outer_scale_ != Dtype(1)) { 23 | caffe_gpu_scal(count, outer_scale_, top_data); 24 | } 25 | } 26 | 27 | template 28 | void ExpLayer::Backward_gpu(const vector*>& top, 29 | const vector& propagate_down, const vector*>& bottom) { 30 | if (!propagate_down[0]) { return; } 31 | const int count = bottom[0]->count(); 32 | const Dtype* top_data = top[0]->gpu_data(); 33 | const Dtype* top_diff = top[0]->gpu_diff(); 34 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 35 | caffe_gpu_mul(count, top_data, top_diff, bottom_diff); 36 | if (inner_scale_ != Dtype(1)) { 37 | caffe_gpu_scal(count, inner_scale_, bottom_diff); 38 | } 39 | } 40 | 41 | INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer); 42 | 43 | 44 | } // namespace caffe 45 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/flatten_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void FlattenLayer::Reshape(const vector*>& bottom, 11 | const vector*>& top) { 12 | vector top_shape(2); 13 | top_shape[0] = bottom[0]->num(); 14 | top_shape[1] = bottom[0]->count() / bottom[0]->num(); 15 | top[0]->Reshape(top_shape); 16 | CHECK_EQ(top[0]->count(), bottom[0]->count()); 17 | } 18 | 19 | template 20 | void FlattenLayer::Forward_cpu(const vector*>& bottom, 21 | const vector*>& top) { 22 | top[0]->ShareData(*bottom[0]); 23 | } 24 | 25 | template 26 | void FlattenLayer::Backward_cpu(const vector*>& top, 27 | const vector& propagate_down, const vector*>& bottom) { 28 | bottom[0]->ShareDiff(*top[0]); 29 | } 30 | 31 | INSTANTIATE_CLASS(FlattenLayer); 32 | REGISTER_LAYER_CLASS(Flatten); 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/hdf5_data_layer.cu: -------------------------------------------------------------------------------- 1 | /* 2 | TODO: 3 | - only load parts of the file, in accordance with a prototxt param "max_mem" 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include "hdf5.h" 11 | #include "hdf5_hl.h" 12 | 13 | #include "caffe/data_layers.hpp" 14 | #include "caffe/layer.hpp" 15 | #include "caffe/util/io.hpp" 16 | 17 | namespace caffe { 18 | 19 | template 20 | void HDF5DataLayer::Forward_gpu(const vector*>& bottom, 21 | const vector*>& top) { 22 | const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); 23 | for (int i = 0; i < batch_size; ++i, ++current_row_) { 24 | if (current_row_ == hdf_blobs_[0]->shape(0)) { 25 | if (num_files_ > 1) { 26 | current_file_ += 1; 27 | if (current_file_ == num_files_) { 28 | current_file_ = 0; 29 | if (this->layer_param_.hdf5_data_param().shuffle()) { 30 | std::random_shuffle(file_permutation_.begin(), 31 | file_permutation_.end()); 32 | } 33 | DLOG(INFO) << "Looping around to first file."; 34 | } 35 | LoadHDF5FileData( 36 | hdf_filenames_[file_permutation_[current_file_]].c_str()); 37 | } 38 | current_row_ = 0; 39 | if (this->layer_param_.hdf5_data_param().shuffle()) 40 | std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); 41 | } 42 | for (int j = 0; j < this->layer_param_.top_size(); ++j) { 43 | int data_dim = top[j]->count() / top[j]->shape(0); 44 | caffe_copy(data_dim, 45 | &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] 46 | * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); 47 | } 48 | } 49 | } 50 | 51 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer); 52 | 53 | } // namespace caffe 54 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/hdf5_output_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "hdf5.h" 4 | #include "hdf5_hl.h" 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/common.hpp" 8 | #include "caffe/layer.hpp" 9 | #include "caffe/util/io.hpp" 10 | #include "caffe/vision_layers.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, 16 | const vector*>& top) { 17 | CHECK_GE(bottom.size(), 2); 18 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()); 19 | data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), 20 | bottom[0]->height(), bottom[0]->width()); 21 | label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), 22 | bottom[1]->height(), bottom[1]->width()); 23 | const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); 24 | const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); 25 | 26 | for (int i = 0; i < bottom[0]->num(); ++i) { 27 | caffe_copy(data_datum_dim, &bottom[0]->gpu_data()[i * data_datum_dim], 28 | &data_blob_.mutable_cpu_data()[i * data_datum_dim]); 29 | caffe_copy(label_datum_dim, &bottom[1]->gpu_data()[i * label_datum_dim], 30 | &label_blob_.mutable_cpu_data()[i * label_datum_dim]); 31 | } 32 | SaveBlobs(); 33 | } 34 | 35 | template 36 | void HDF5OutputLayer::Backward_gpu(const vector*>& top, 37 | const vector& propagate_down, const vector*>& bottom) { 38 | return; 39 | } 40 | 41 | INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer); 42 | 43 | } // namespace caffe 44 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/im2col_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/im2col.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | void Im2colLayer::Forward_gpu(const vector*>& bottom, 12 | const vector*>& top) { 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | Dtype* top_data = top[0]->mutable_gpu_data(); 15 | for (int n = 0; n < bottom[0]->num(); ++n) { 16 | im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, 17 | width_, kernel_h_, kernel_w_, pad_h_, pad_w_, 18 | stride_h_, stride_w_, top_data + top[0]->offset(n)); 19 | } 20 | } 21 | 22 | template 23 | void Im2colLayer::Backward_gpu(const vector*>& top, 24 | const vector& propagate_down, const vector*>& bottom) { 25 | const Dtype* top_diff = top[0]->gpu_diff(); 26 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 27 | for (int n = 0; n < top[0]->num(); ++n) { 28 | col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, 29 | kernel_h_, kernel_w_, pad_h_, pad_w_, 30 | stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); 31 | } 32 | } 33 | 34 | 35 | INSTANTIATE_LAYER_GPU_FUNCS(Im2colLayer); 36 | 37 | } // namespace caffe 38 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/inner_product_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/blob.hpp" 4 | #include "caffe/common.hpp" 5 | #include "caffe/filler.hpp" 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void InnerProductLayer::Forward_gpu(const vector*>& bottom, 14 | const vector*>& top) { 15 | const Dtype* bottom_data = bottom[0]->gpu_data(); 16 | Dtype* top_data = top[0]->mutable_gpu_data(); 17 | const Dtype* weight = this->blobs_[0]->gpu_data(); 18 | caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., 19 | bottom_data, weight, (Dtype)0., top_data); 20 | if (bias_term_) { 21 | caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., 22 | bias_multiplier_.gpu_data(), 23 | this->blobs_[1]->gpu_data(), (Dtype)1., top_data); 24 | } 25 | } 26 | 27 | template 28 | void InnerProductLayer::Backward_gpu(const vector*>& top, 29 | const vector& propagate_down, 30 | const vector*>& bottom) { 31 | if (this->param_propagate_down_[0]) { 32 | const Dtype* top_diff = top[0]->gpu_diff(); 33 | const Dtype* bottom_data = bottom[0]->gpu_data(); 34 | // Gradient with respect to weight 35 | caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., 36 | top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); 37 | } 38 | if (bias_term_ && this->param_propagate_down_[1]) { 39 | const Dtype* top_diff = top[0]->gpu_diff(); 40 | // Gradient with respect to bias 41 | caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, 42 | bias_multiplier_.gpu_data(), (Dtype)0., 43 | this->blobs_[1]->mutable_gpu_diff()); 44 | } 45 | if (propagate_down[0]) { 46 | const Dtype* top_diff = top[0]->gpu_diff(); 47 | // Gradient with respect to bottom data 48 | caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., 49 | top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., 50 | bottom[0]->mutable_gpu_diff()); 51 | } 52 | } 53 | 54 | INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); 55 | 56 | } // namespace caffe 57 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/io.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | #include "caffe/vision_layers.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void LossLayer::LayerSetUp( 15 | const vector*>& bottom, const vector*>& top) { 16 | // LossLayers have a non-zero (1) loss by default. 17 | if (this->layer_param_.loss_weight_size() == 0) { 18 | this->layer_param_.add_loss_weight(Dtype(1)); 19 | } 20 | } 21 | 22 | template 23 | void LossLayer::Reshape( 24 | const vector*>& bottom, const vector*>& top) { 25 | CHECK_EQ(bottom[0]->num(), bottom[1]->num()) 26 | << "The data and label should have the same number."; 27 | vector loss_shape(0); // Loss layers output a scalar; 0 axes. 28 | top[0]->Reshape(loss_shape); 29 | } 30 | 31 | INSTANTIATE_CLASS(LossLayer); 32 | 33 | } // namespace caffe 34 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/multinomial_logistic_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "caffe/layer.hpp" 7 | #include "caffe/util/io.hpp" 8 | #include "caffe/util/math_functions.hpp" 9 | #include "caffe/vision_layers.hpp" 10 | 11 | namespace caffe { 12 | 13 | template 14 | void MultinomialLogisticLossLayer::Reshape( 15 | const vector*>& bottom, const vector*>& top) { 16 | LossLayer::Reshape(bottom, top); 17 | CHECK_EQ(bottom[1]->channels(), 1); 18 | CHECK_EQ(bottom[1]->height(), 1); 19 | CHECK_EQ(bottom[1]->width(), 1); 20 | } 21 | 22 | template 23 | void MultinomialLogisticLossLayer::Forward_cpu( 24 | const vector*>& bottom, const vector*>& top) { 25 | const Dtype* bottom_data = bottom[0]->cpu_data(); 26 | const Dtype* bottom_label = bottom[1]->cpu_data(); 27 | int num = bottom[0]->num(); 28 | int dim = bottom[0]->count() / bottom[0]->num(); 29 | Dtype loss = 0; 30 | for (int i = 0; i < num; ++i) { 31 | int label = static_cast(bottom_label[i]); 32 | Dtype prob = std::max( 33 | bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); 34 | loss -= log(prob); 35 | } 36 | top[0]->mutable_cpu_data()[0] = loss / num; 37 | } 38 | 39 | template 40 | void MultinomialLogisticLossLayer::Backward_cpu( 41 | const vector*>& top, const vector& propagate_down, 42 | const vector*>& bottom) { 43 | if (propagate_down[1]) { 44 | LOG(FATAL) << this->type() 45 | << " Layer cannot backpropagate to label inputs."; 46 | } 47 | if (propagate_down[0]) { 48 | const Dtype* bottom_data = bottom[0]->cpu_data(); 49 | const Dtype* bottom_label = bottom[1]->cpu_data(); 50 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 51 | int num = bottom[0]->num(); 52 | int dim = bottom[0]->count() / bottom[0]->num(); 53 | caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); 54 | const Dtype scale = - top[0]->cpu_diff()[0] / num; 55 | for (int i = 0; i < num; ++i) { 56 | int label = static_cast(bottom_label[i]); 57 | Dtype prob = std::max( 58 | bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); 59 | bottom_diff[i * dim + label] = scale / prob; 60 | } 61 | } 62 | } 63 | 64 | INSTANTIATE_CLASS(MultinomialLogisticLossLayer); 65 | REGISTER_LAYER_CLASS(MultinomialLogisticLoss); 66 | 67 | } // namespace caffe 68 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/neuron_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/vision_layers.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | void NeuronLayer::Reshape(const vector*>& bottom, 10 | const vector*>& top) { 11 | top[0]->ReshapeLike(*bottom[0]); 12 | } 13 | 14 | INSTANTIATE_CLASS(NeuronLayer); 15 | 16 | } // namespace caffe 17 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/relu_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void ReLULayer::Forward_cpu(const vector*>& bottom, 11 | const vector*>& top) { 12 | const Dtype* bottom_data = bottom[0]->cpu_data(); 13 | Dtype* top_data = top[0]->mutable_cpu_data(); 14 | const int count = bottom[0]->count(); 15 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 16 | for (int i = 0; i < count; ++i) { 17 | top_data[i] = std::max(bottom_data[i], Dtype(0)) 18 | + negative_slope * std::min(bottom_data[i], Dtype(0)); 19 | } 20 | } 21 | 22 | template 23 | void ReLULayer::Backward_cpu(const vector*>& top, 24 | const vector& propagate_down, 25 | const vector*>& bottom) { 26 | if (propagate_down[0]) { 27 | const Dtype* bottom_data = bottom[0]->cpu_data(); 28 | const Dtype* top_diff = top[0]->cpu_diff(); 29 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 30 | const int count = bottom[0]->count(); 31 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 32 | for (int i = 0; i < count; ++i) { 33 | bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) 34 | + negative_slope * (bottom_data[i] <= 0)); 35 | } 36 | } 37 | } 38 | 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(ReLULayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(ReLULayer); 45 | 46 | } // namespace caffe 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/relu_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, 11 | Dtype negative_slope) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; 14 | } 15 | } 16 | 17 | template 18 | void ReLULayer::Forward_gpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = top[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 24 | // NOLINT_NEXT_LINE(whitespace/operators) 25 | ReLUForward<<>>( 26 | count, bottom_data, top_data, negative_slope); 27 | CUDA_POST_KERNEL_CHECK; 28 | // << " count: " << count << " bottom_data: " 29 | // << (unsigned long)bottom_data 30 | // << " top_data: " << (unsigned long)top_data 31 | // << " blocks: " << CAFFE_GET_BLOCKS(count) 32 | // << " threads: " << CAFFE_CUDA_NUM_THREADS; 33 | } 34 | 35 | template 36 | __global__ void ReLUBackward(const int n, const Dtype* in_diff, 37 | const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { 38 | CUDA_KERNEL_LOOP(index, n) { 39 | out_diff[index] = in_diff[index] * ((in_data[index] > 0) 40 | + (in_data[index] <= 0) * negative_slope); 41 | } 42 | } 43 | 44 | template 45 | void ReLULayer::Backward_gpu(const vector*>& top, 46 | const vector& propagate_down, 47 | const vector*>& bottom) { 48 | if (propagate_down[0]) { 49 | const Dtype* bottom_data = bottom[0]->gpu_data(); 50 | const Dtype* top_diff = top[0]->gpu_diff(); 51 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 52 | const int count = bottom[0]->count(); 53 | Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); 54 | // NOLINT_NEXT_LINE(whitespace/operators) 55 | ReLUBackward<<>>( 56 | count, top_diff, bottom_data, bottom_diff, negative_slope); 57 | CUDA_POST_KERNEL_CHECK; 58 | } 59 | } 60 | 61 | 62 | INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); 63 | 64 | 65 | } // namespace caffe 66 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/vision_layers.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void SigmoidCrossEntropyLossLayer::Forward_gpu( 13 | const vector*>& bottom, const vector*>& top) { 14 | // The forward pass computes the sigmoid outputs. 15 | sigmoid_bottom_vec_[0] = bottom[0]; 16 | sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); 17 | // Compute the loss (negative log likelihood) 18 | const int count = bottom[0]->count(); 19 | const int num = bottom[0]->num(); 20 | // Stable version of loss computation from input data 21 | const Dtype* input_data = bottom[0]->cpu_data(); 22 | const Dtype* target = bottom[1]->cpu_data(); 23 | Dtype loss = 0; 24 | for (int i = 0; i < count; ++i) { 25 | loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - 26 | log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); 27 | } 28 | top[0]->mutable_cpu_data()[0] = loss / num; 29 | } 30 | 31 | template 32 | void SigmoidCrossEntropyLossLayer::Backward_gpu( 33 | const vector*>& top, const vector& propagate_down, 34 | const vector*>& bottom) { 35 | if (propagate_down[1]) { 36 | LOG(FATAL) << this->type() 37 | << " Layer cannot backpropagate to label inputs."; 38 | } 39 | if (propagate_down[0]) { 40 | // First, compute the diff 41 | const int count = bottom[0]->count(); 42 | const int num = bottom[0]->num(); 43 | const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); 44 | const Dtype* target = bottom[1]->gpu_data(); 45 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 46 | caffe_copy(count, sigmoid_output_data, bottom_diff); 47 | caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); 48 | // Scale down gradient 49 | const Dtype loss_weight = top[0]->cpu_diff()[0]; 50 | caffe_gpu_scal(count, loss_weight / num, bottom_diff); 51 | } 52 | } 53 | 54 | INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); 55 | 56 | 57 | } // namespace caffe 58 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/sigmoid_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | inline Dtype sigmoid(Dtype x) { 12 | return 1. / (1. + exp(-x)); 13 | } 14 | 15 | template 16 | void SigmoidLayer::Forward_cpu(const vector*>& bottom, 17 | const vector*>& top) { 18 | const Dtype* bottom_data = bottom[0]->cpu_data(); 19 | Dtype* top_data = top[0]->mutable_cpu_data(); 20 | const int count = bottom[0]->count(); 21 | for (int i = 0; i < count; ++i) { 22 | top_data[i] = sigmoid(bottom_data[i]); 23 | } 24 | } 25 | 26 | template 27 | void SigmoidLayer::Backward_cpu(const vector*>& top, 28 | const vector& propagate_down, 29 | const vector*>& bottom) { 30 | if (propagate_down[0]) { 31 | const Dtype* top_data = top[0]->cpu_data(); 32 | const Dtype* top_diff = top[0]->cpu_diff(); 33 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 34 | const int count = bottom[0]->count(); 35 | for (int i = 0; i < count; ++i) { 36 | const Dtype sigmoid_x = top_data[i]; 37 | bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 38 | } 39 | } 40 | } 41 | 42 | #ifdef CPU_ONLY 43 | STUB_GPU(SigmoidLayer); 44 | #endif 45 | 46 | INSTANTIATE_CLASS(SigmoidLayer); 47 | 48 | 49 | } // namespace caffe 50 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/sigmoid_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layer.hpp" 6 | #include "caffe/vision_layers.hpp" 7 | 8 | namespace caffe { 9 | 10 | template 11 | __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = 1. / (1. + exp(-in[index])); 14 | } 15 | } 16 | 17 | template 18 | void SigmoidLayer::Forward_gpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = top[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | // NOLINT_NEXT_LINE(whitespace/operators) 24 | SigmoidForward<<>>( 25 | count, bottom_data, top_data); 26 | CUDA_POST_KERNEL_CHECK; 27 | // << " count: " << count << " bottom_data: " 28 | // << (unsigned long)bottom_data 29 | // << " top_data: " << (unsigned long)top_data 30 | // << " blocks: " << CAFFE_GET_BLOCKS(count) 31 | // << " threads: " << CAFFE_CUDA_NUM_THREADS; 32 | } 33 | 34 | template 35 | __global__ void SigmoidBackward(const int n, const Dtype* in_diff, 36 | const Dtype* out_data, Dtype* out_diff) { 37 | CUDA_KERNEL_LOOP(index, n) { 38 | const Dtype sigmoid_x = out_data[index]; 39 | out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); 40 | } 41 | } 42 | 43 | template 44 | void SigmoidLayer::Backward_gpu(const vector*>& top, 45 | const vector& propagate_down, 46 | const vector*>& bottom) { 47 | if (propagate_down[0]) { 48 | const Dtype* top_data = top[0]->gpu_data(); 49 | const Dtype* top_diff = top[0]->gpu_diff(); 50 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 51 | const int count = bottom[0]->count(); 52 | // NOLINT_NEXT_LINE(whitespace/operators) 53 | SigmoidBackward<<>>( 54 | count, top_diff, top_data, bottom_diff); 55 | CUDA_POST_KERNEL_CHECK; 56 | } 57 | } 58 | 59 | INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer); 60 | 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/silence_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common_layers.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SilenceLayer::Backward_cpu(const vector*>& top, 11 | const vector& propagate_down, const vector*>& bottom) { 12 | for (int i = 0; i < bottom.size(); ++i) { 13 | if (propagate_down[i]) { 14 | caffe_set(bottom[i]->count(), Dtype(0), 15 | bottom[i]->mutable_cpu_data()); 16 | } 17 | } 18 | } 19 | 20 | #ifdef CPU_ONLY 21 | STUB_GPU(SilenceLayer); 22 | #endif 23 | 24 | INSTANTIATE_CLASS(SilenceLayer); 25 | REGISTER_LAYER_CLASS(Silence); 26 | 27 | } // namespace caffe 28 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/silence_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common_layers.hpp" 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SilenceLayer::Forward_gpu(const vector*>& bottom, 11 | const vector*>& top) { 12 | // Do nothing. 13 | } 14 | 15 | template 16 | void SilenceLayer::Backward_gpu(const vector*>& top, 17 | const vector& propagate_down, const vector*>& bottom) { 18 | for (int i = 0; i < bottom.size(); ++i) { 19 | if (propagate_down[i]) { 20 | caffe_gpu_set(bottom[i]->count(), Dtype(0), 21 | bottom[i]->mutable_gpu_data()); 22 | } 23 | } 24 | } 25 | 26 | INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); 27 | 28 | } // namespace caffe 29 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/slice_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SliceLayer::Forward_gpu(const vector*>& bottom, 11 | const vector*>& top) { 12 | int offset_slice_axis = 0; 13 | const Dtype* bottom_data = bottom[0]->gpu_data(); 14 | const int bottom_slice_axis = bottom[0]->shape(slice_axis_); 15 | for (int i = 0; i < top.size(); ++i) { 16 | Dtype* top_data = top[i]->mutable_gpu_data(); 17 | const int top_slice_axis = top[i]->shape(slice_axis_); 18 | for (int n = 0; n < num_slices_; ++n) { 19 | const int top_offset = n * top_slice_axis * slice_size_; 20 | const int bottom_offset = 21 | (n * bottom_slice_axis + offset_slice_axis) * slice_size_; 22 | caffe_copy(top_slice_axis * slice_size_, 23 | bottom_data + bottom_offset, top_data + top_offset); 24 | } 25 | offset_slice_axis += top_slice_axis; 26 | } 27 | } 28 | 29 | template 30 | void SliceLayer::Backward_gpu(const vector*>& top, 31 | const vector& propagate_down, const vector*>& bottom) { 32 | if (!propagate_down[0]) { return; } 33 | int offset_slice_axis = 0; 34 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 35 | const int bottom_slice_axis = bottom[0]->shape(slice_axis_); 36 | for (int i = 0; i < top.size(); ++i) { 37 | const Dtype* top_diff = top[i]->gpu_diff(); 38 | const int top_slice_axis = top[i]->shape(slice_axis_); 39 | for (int n = 0; n < num_slices_; ++n) { 40 | const int top_offset = n * top_slice_axis * slice_size_; 41 | const int bottom_offset = 42 | (n * bottom_slice_axis + offset_slice_axis) * slice_size_; 43 | caffe_copy(top_slice_axis * slice_size_, 44 | top_diff + top_offset, bottom_diff + bottom_offset); 45 | } 46 | offset_slice_axis += top_slice_axis; 47 | } 48 | } 49 | 50 | INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); 51 | 52 | } // namespace caffe 53 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/smooth_L1_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | // ------------------------------------------------------------------ 2 | // Fast R-CNN 3 | // Copyright (c) 2015 Microsoft 4 | // Licensed under The MIT License [see fast-rcnn/LICENSE for details] 5 | // Written by Ross Girshick 6 | // ------------------------------------------------------------------ 7 | 8 | #include "caffe/fast_rcnn_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void SmoothL1LossLayer::LayerSetUp( 14 | const vector*>& bottom, const vector*>& top) { 15 | has_weights_ = (bottom.size() == 3); 16 | } 17 | 18 | template 19 | void SmoothL1LossLayer::Reshape( 20 | const vector*>& bottom, const vector*>& top) { 21 | LossLayer::Reshape(bottom, top); 22 | CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); 23 | CHECK_EQ(bottom[0]->height(), bottom[1]->height()); 24 | CHECK_EQ(bottom[0]->width(), bottom[1]->width()); 25 | if (has_weights_) { 26 | CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); 27 | CHECK_EQ(bottom[0]->height(), bottom[2]->height()); 28 | CHECK_EQ(bottom[0]->width(), bottom[2]->width()); 29 | } 30 | diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), 31 | bottom[0]->height(), bottom[0]->width()); 32 | errors_.Reshape(bottom[0]->num(), bottom[0]->channels(), 33 | bottom[0]->height(), bottom[0]->width()); 34 | } 35 | 36 | template 37 | void SmoothL1LossLayer::Forward_cpu(const vector*>& bottom, 38 | const vector*>& top) { 39 | NOT_IMPLEMENTED; 40 | } 41 | 42 | template 43 | void SmoothL1LossLayer::Backward_cpu(const vector*>& top, 44 | const vector& propagate_down, const vector*>& bottom) { 45 | NOT_IMPLEMENTED; 46 | } 47 | 48 | #ifdef CPU_ONLY 49 | STUB_GPU(SmoothL1LossLayer); 50 | #endif 51 | 52 | INSTANTIATE_CLASS(SmoothL1LossLayer); 53 | REGISTER_LAYER_CLASS(SmoothL1Loss); 54 | 55 | } // namespace caffe 56 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/split_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SplitLayer::Reshape(const vector*>& bottom, 11 | const vector*>& top) { 12 | count_ = bottom[0]->count(); 13 | for (int i = 0; i < top.size(); ++i) { 14 | // Do not allow in-place computation in the SplitLayer. Instead, share data 15 | // by reference in the forward pass, and keep separate diff allocations in 16 | // the backward pass. (Technically, it should be possible to share the diff 17 | // blob of the first split output with the input, but this seems to cause 18 | // some strange effects in practice...) 19 | CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " 20 | "allow in-place computation."; 21 | top[i]->ReshapeLike(*bottom[0]); 22 | CHECK_EQ(count_, top[i]->count()); 23 | } 24 | } 25 | 26 | template 27 | void SplitLayer::Forward_cpu(const vector*>& bottom, 28 | const vector*>& top) { 29 | for (int i = 0; i < top.size(); ++i) { 30 | top[i]->ShareData(*bottom[0]); 31 | } 32 | } 33 | 34 | template 35 | void SplitLayer::Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, const vector*>& bottom) { 37 | if (!propagate_down[0]) { return; } 38 | if (top.size() == 1) { 39 | caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); 40 | return; 41 | } 42 | caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), 43 | bottom[0]->mutable_cpu_diff()); 44 | // Add remaining top blob diffs. 45 | for (int i = 2; i < top.size(); ++i) { 46 | const Dtype* top_diff = top[i]->cpu_diff(); 47 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 48 | caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); 49 | } 50 | } 51 | 52 | 53 | #ifdef CPU_ONLY 54 | STUB_GPU(SplitLayer); 55 | #endif 56 | 57 | INSTANTIATE_CLASS(SplitLayer); 58 | REGISTER_LAYER_CLASS(Split); 59 | 60 | } // namespace caffe 61 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/split_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/util/math_functions.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | void SplitLayer::Forward_gpu(const vector*>& bottom, 11 | const vector*>& top) { 12 | for (int i = 0; i < top.size(); ++i) { 13 | top[i]->ShareData(*bottom[0]); 14 | } 15 | } 16 | 17 | template 18 | void SplitLayer::Backward_gpu(const vector*>& top, 19 | const vector& propagate_down, const vector*>& bottom) { 20 | if (!propagate_down[0]) { return; } 21 | if (top.size() == 1) { 22 | caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); 23 | return; 24 | } 25 | caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), 26 | bottom[0]->mutable_gpu_diff()); 27 | // Add remaining top blob diffs. 28 | for (int i = 2; i < top.size(); ++i) { 29 | const Dtype* top_diff = top[i]->gpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 31 | caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); 32 | } 33 | } 34 | 35 | 36 | INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); 37 | 38 | } // namespace caffe 39 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/tanh_layer.cpp: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | void TanHLayer::Forward_cpu(const vector*>& bottom, 14 | const vector*>& top) { 15 | const Dtype* bottom_data = bottom[0]->cpu_data(); 16 | Dtype* top_data = top[0]->mutable_cpu_data(); 17 | const int count = bottom[0]->count(); 18 | for (int i = 0; i < count; ++i) { 19 | top_data[i] = tanh(bottom_data[i]); 20 | } 21 | } 22 | 23 | template 24 | void TanHLayer::Backward_cpu(const vector*>& top, 25 | const vector& propagate_down, 26 | const vector*>& bottom) { 27 | if (propagate_down[0]) { 28 | const Dtype* top_data = top[0]->cpu_data(); 29 | const Dtype* top_diff = top[0]->cpu_diff(); 30 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 31 | const int count = bottom[0]->count(); 32 | Dtype tanhx; 33 | for (int i = 0; i < count; ++i) { 34 | tanhx = top_data[i]; 35 | bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); 36 | } 37 | } 38 | } 39 | 40 | #ifdef CPU_ONLY 41 | STUB_GPU(TanHLayer); 42 | #endif 43 | 44 | INSTANTIATE_CLASS(TanHLayer); 45 | 46 | } // namespace caffe 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/tanh_layer.cu: -------------------------------------------------------------------------------- 1 | // TanH neuron activation function layer. 2 | // Adapted from ReLU layer code written by Yangqing Jia 3 | 4 | #include 5 | #include 6 | 7 | #include "caffe/layer.hpp" 8 | #include "caffe/vision_layers.hpp" 9 | 10 | namespace caffe { 11 | 12 | template 13 | __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { 14 | CUDA_KERNEL_LOOP(index, n) { 15 | out[index] = tanh(in[index]); 16 | } 17 | } 18 | 19 | template 20 | void TanHLayer::Forward_gpu(const vector*>& bottom, 21 | const vector*>& top) { 22 | const Dtype* bottom_data = bottom[0]->gpu_data(); 23 | Dtype* top_data = top[0]->mutable_gpu_data(); 24 | const int count = bottom[0]->count(); 25 | // NOLINT_NEXT_LINE(whitespace/operators) 26 | TanHForward<<>>( 27 | count, bottom_data, top_data); 28 | CUDA_POST_KERNEL_CHECK; 29 | } 30 | 31 | template 32 | __global__ void TanHBackward(const int n, const Dtype* in_diff, 33 | const Dtype* out_data, Dtype* out_diff) { 34 | CUDA_KERNEL_LOOP(index, n) { 35 | Dtype tanhx = out_data[index]; 36 | out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); 37 | } 38 | } 39 | 40 | template 41 | void TanHLayer::Backward_gpu(const vector*>& top, 42 | const vector& propagate_down, 43 | const vector*>& bottom) { 44 | if (propagate_down[0]) { 45 | const Dtype* top_data = top[0]->gpu_data(); 46 | const Dtype* top_diff = top[0]->gpu_diff(); 47 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 48 | const int count = bottom[0]->count(); 49 | // NOLINT_NEXT_LINE(whitespace/operators) 50 | TanHBackward<<>>( 51 | count, top_diff, top_data, bottom_diff); 52 | CUDA_POST_KERNEL_CHECK; 53 | } 54 | } 55 | 56 | INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer); 57 | 58 | 59 | } // namespace caffe 60 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/threshold_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/layer.hpp" 4 | #include "caffe/vision_layers.hpp" 5 | 6 | 7 | namespace caffe { 8 | 9 | template 10 | void ThresholdLayer::LayerSetUp(const vector*>& bottom, 11 | const vector*>& top) { 12 | NeuronLayer::LayerSetUp(bottom, top); 13 | threshold_ = this->layer_param_.threshold_param().threshold(); 14 | } 15 | 16 | template 17 | void ThresholdLayer::Forward_cpu(const vector*>& bottom, 18 | const vector*>& top) { 19 | const Dtype* bottom_data = bottom[0]->cpu_data(); 20 | Dtype* top_data = top[0]->mutable_cpu_data(); 21 | const int count = bottom[0]->count(); 22 | for (int i = 0; i < count; ++i) { 23 | top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); 24 | } 25 | } 26 | 27 | #ifdef CPU_ONLY 28 | STUB_GPU_FORWARD(ThresholdLayer, Forward); 29 | #endif 30 | 31 | INSTANTIATE_CLASS(ThresholdLayer); 32 | REGISTER_LAYER_CLASS(Threshold); 33 | 34 | } // namespace caffe 35 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/layers/threshold_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/vision_layers.hpp" 6 | 7 | namespace caffe { 8 | 9 | template 10 | __global__ void ThresholdForward(const int n, const Dtype threshold, 11 | const Dtype* in, Dtype* out) { 12 | CUDA_KERNEL_LOOP(index, n) { 13 | out[index] = in[index] > threshold ? 1 : 0; 14 | } 15 | } 16 | 17 | template 18 | void ThresholdLayer::Forward_gpu(const vector*>& bottom, 19 | const vector*>& top) { 20 | const Dtype* bottom_data = bottom[0]->gpu_data(); 21 | Dtype* top_data = top[0]->mutable_gpu_data(); 22 | const int count = bottom[0]->count(); 23 | // NOLINT_NEXT_LINE(whitespace/operators) 24 | ThresholdForward<<>>( 25 | count, threshold_, bottom_data, top_data); 26 | CUDA_POST_KERNEL_CHECK; 27 | } 28 | 29 | 30 | INSTANTIATE_LAYER_GPU_FORWARD(ThresholdLayer); 31 | 32 | 33 | } // namespace caffe 34 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/syncedmem.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/common.hpp" 4 | #include "caffe/syncedmem.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | SyncedMemory::~SyncedMemory() { 10 | if (cpu_ptr_ && own_cpu_data_) { 11 | CaffeFreeHost(cpu_ptr_); 12 | } 13 | 14 | #ifndef CPU_ONLY 15 | if (gpu_ptr_) { 16 | CUDA_CHECK(cudaFree(gpu_ptr_)); 17 | } 18 | #endif // CPU_ONLY 19 | } 20 | 21 | inline void SyncedMemory::to_cpu() { 22 | switch (head_) { 23 | case UNINITIALIZED: 24 | CaffeMallocHost(&cpu_ptr_, size_); 25 | caffe_memset(size_, 0, cpu_ptr_); 26 | head_ = HEAD_AT_CPU; 27 | own_cpu_data_ = true; 28 | break; 29 | case HEAD_AT_GPU: 30 | #ifndef CPU_ONLY 31 | if (cpu_ptr_ == NULL) { 32 | CaffeMallocHost(&cpu_ptr_, size_); 33 | own_cpu_data_ = true; 34 | } 35 | caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_); 36 | head_ = SYNCED; 37 | #else 38 | NO_GPU; 39 | #endif 40 | break; 41 | case HEAD_AT_CPU: 42 | case SYNCED: 43 | break; 44 | } 45 | } 46 | 47 | inline void SyncedMemory::to_gpu() { 48 | #ifndef CPU_ONLY 49 | switch (head_) { 50 | case UNINITIALIZED: 51 | CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); 52 | caffe_gpu_memset(size_, 0, gpu_ptr_); 53 | head_ = HEAD_AT_GPU; 54 | break; 55 | case HEAD_AT_CPU: 56 | if (gpu_ptr_ == NULL) { 57 | CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); 58 | } 59 | caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_); 60 | head_ = SYNCED; 61 | break; 62 | case HEAD_AT_GPU: 63 | case SYNCED: 64 | break; 65 | } 66 | #else 67 | NO_GPU; 68 | #endif 69 | } 70 | 71 | const void* SyncedMemory::cpu_data() { 72 | to_cpu(); 73 | return (const void*)cpu_ptr_; 74 | } 75 | 76 | void SyncedMemory::set_cpu_data(void* data) { 77 | CHECK(data); 78 | if (own_cpu_data_) { 79 | CaffeFreeHost(cpu_ptr_); 80 | } 81 | cpu_ptr_ = data; 82 | head_ = HEAD_AT_CPU; 83 | own_cpu_data_ = false; 84 | } 85 | 86 | const void* SyncedMemory::gpu_data() { 87 | #ifndef CPU_ONLY 88 | to_gpu(); 89 | return (const void*)gpu_ptr_; 90 | #else 91 | NO_GPU; 92 | #endif 93 | } 94 | 95 | void* SyncedMemory::mutable_cpu_data() { 96 | to_cpu(); 97 | head_ = HEAD_AT_CPU; 98 | return cpu_ptr_; 99 | } 100 | 101 | void* SyncedMemory::mutable_gpu_data() { 102 | #ifndef CPU_ONLY 103 | to_gpu(); 104 | head_ = HEAD_AT_GPU; 105 | return gpu_ptr_; 106 | #else 107 | NO_GPU; 108 | #endif 109 | } 110 | 111 | 112 | } // namespace caffe 113 | 114 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # The option allows to include in build only selected test files and exclude all others 2 | # Usage example: 3 | # cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" 4 | set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") 5 | caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) 6 | caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) 7 | 8 | # For 'make runtest' target we don't need to embed test data paths to 9 | # source files, because test target is executed in source directory 10 | # That's why the lines below are commented. TODO: remove them 11 | 12 | # definition needed to include CMake generated files 13 | #add_definitions(-DCMAKE_BUILD) 14 | 15 | # generates test_data/sample_data_list.txt.gen.cmake 16 | #caffe_configure_testdatafile(test_data/sample_data_list.txt) 17 | 18 | set(the_target test.testbin) 19 | set(test_args --gtest_shuffle) 20 | 21 | if(HAVE_CUDA) 22 | caffe_cuda_compile(test_cuda_objs ${test_cuda}) 23 | list(APPEND test_srcs ${test_cuda_objs} ${test_cuda}) 24 | else() 25 | list(APPEND test_args --gtest_filter="-*GPU*") 26 | endif() 27 | 28 | # ---[ Adding test target 29 | add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) 30 | target_link_libraries(${the_target} gtest ${Caffe_LINK}) 31 | caffe_default_properties(${the_target}) 32 | caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") 33 | 34 | # ---[ Adding runtest 35 | add_custom_target(runtest COMMAND ${the_target} ${test_args} 36 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) 37 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_caffe_main.cpp: -------------------------------------------------------------------------------- 1 | // The main caffe test code. Your test cpp code should include this hpp 2 | // to allow a main function to be compiled into the binary. 3 | 4 | #include "caffe/caffe.hpp" 5 | #include "caffe/test/test_caffe_main.hpp" 6 | 7 | namespace caffe { 8 | #ifndef CPU_ONLY 9 | cudaDeviceProp CAFFE_TEST_CUDA_PROP; 10 | #endif 11 | } 12 | 13 | #ifndef CPU_ONLY 14 | using caffe::CAFFE_TEST_CUDA_PROP; 15 | #endif 16 | 17 | int main(int argc, char** argv) { 18 | ::testing::InitGoogleTest(&argc, argv); 19 | caffe::GlobalInit(&argc, &argv); 20 | #ifndef CPU_ONLY 21 | // Before starting testing, let's first print out a few cuda defice info. 22 | int device; 23 | cudaGetDeviceCount(&device); 24 | cout << "Cuda number of devices: " << device << endl; 25 | if (argc > 1) { 26 | // Use the given device 27 | device = atoi(argv[1]); 28 | cudaSetDevice(device); 29 | cout << "Setting to use device " << device << endl; 30 | } else if (CUDA_TEST_DEVICE >= 0) { 31 | // Use the device assigned in build configuration; but with a lower priority 32 | device = CUDA_TEST_DEVICE; 33 | } 34 | cudaGetDevice(&device); 35 | cout << "Current device id: " << device << endl; 36 | cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); 37 | #endif 38 | // invoke the test. 39 | return RUN_ALL_TESTS(); 40 | } 41 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_common.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "gtest/gtest.h" 4 | 5 | #include "caffe/common.hpp" 6 | #include "caffe/syncedmem.hpp" 7 | #include "caffe/util/math_functions.hpp" 8 | 9 | #include "caffe/test/test_caffe_main.hpp" 10 | 11 | namespace caffe { 12 | 13 | class CommonTest : public ::testing::Test {}; 14 | 15 | #ifndef CPU_ONLY // GPU Caffe singleton test. 16 | 17 | TEST_F(CommonTest, TestCublasHandlerGPU) { 18 | int cuda_device_id; 19 | CUDA_CHECK(cudaGetDevice(&cuda_device_id)); 20 | EXPECT_TRUE(Caffe::cublas_handle()); 21 | } 22 | 23 | #endif 24 | 25 | TEST_F(CommonTest, TestBrewMode) { 26 | Caffe::set_mode(Caffe::CPU); 27 | EXPECT_EQ(Caffe::mode(), Caffe::CPU); 28 | Caffe::set_mode(Caffe::GPU); 29 | EXPECT_EQ(Caffe::mode(), Caffe::GPU); 30 | } 31 | 32 | TEST_F(CommonTest, TestRandSeedCPU) { 33 | SyncedMemory data_a(10 * sizeof(int)); 34 | SyncedMemory data_b(10 * sizeof(int)); 35 | Caffe::set_random_seed(1701); 36 | caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); 37 | 38 | Caffe::set_random_seed(1701); 39 | caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); 40 | 41 | for (int i = 0; i < 10; ++i) { 42 | EXPECT_EQ(static_cast(data_a.cpu_data())[i], 43 | static_cast(data_b.cpu_data())[i]); 44 | } 45 | } 46 | 47 | #ifndef CPU_ONLY // GPU Caffe singleton test. 48 | 49 | TEST_F(CommonTest, TestRandSeedGPU) { 50 | SyncedMemory data_a(10 * sizeof(unsigned int)); 51 | SyncedMemory data_b(10 * sizeof(unsigned int)); 52 | Caffe::set_random_seed(1701); 53 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 54 | static_cast(data_a.mutable_gpu_data()), 10)); 55 | Caffe::set_random_seed(1701); 56 | CURAND_CHECK(curandGenerate(Caffe::curand_generator(), 57 | static_cast(data_b.mutable_gpu_data()), 10)); 58 | for (int i = 0; i < 10; ++i) { 59 | EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], 60 | ((const unsigned int*)(data_b.cpu_data()))[i]); 61 | } 62 | } 63 | 64 | #endif 65 | 66 | } // namespace caffe 67 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_data/generate_sample_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate data used in the HDF5DataLayer test. 3 | """ 4 | import os 5 | import numpy as np 6 | import h5py 7 | 8 | num_cols = 8 9 | num_rows = 10 10 | height = 6 11 | width = 5 12 | total_size = num_cols * num_rows * height * width 13 | 14 | data = np.arange(total_size) 15 | data = data.reshape(num_rows, num_cols, height, width) 16 | data = data.astype('float32') 17 | 18 | # We had a bug where data was copied into label, but the tests weren't 19 | # catching it, so let's make label 1-indexed. 20 | label = 1 + np.arange(num_rows)[:, np.newaxis] 21 | label = label.astype('float32') 22 | 23 | # We add an extra label2 dataset to test HDF5 layer's ability 24 | # to handle arbitrary number of output ("top") Blobs. 25 | label2 = label + 1 26 | 27 | print data 28 | print label 29 | 30 | with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: 31 | f['data'] = data 32 | f['label'] = label 33 | f['label2'] = label2 34 | 35 | with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: 36 | f.create_dataset( 37 | 'data', data=data + total_size, 38 | compression='gzip', compression_opts=1 39 | ) 40 | f.create_dataset( 41 | 'label', data=label, 42 | compression='gzip', compression_opts=1 43 | ) 44 | f.create_dataset( 45 | 'label2', data=label2, 46 | compression='gzip', compression_opts=1 47 | ) 48 | 49 | with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: 50 | f.write(os.path.dirname(__file__) + '/sample_data.h5\n') 51 | f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') 52 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_data/sample_data.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/src/caffe/test/test_data/sample_data.h5 -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_data/sample_data_2_gzip.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/src/caffe/test/test_data/sample_data_2_gzip.h5 -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_data/sample_data_list.txt: -------------------------------------------------------------------------------- 1 | src/caffe/test/test_data/sample_data.h5 2 | src/caffe/test/test_data/sample_data_2_gzip.h5 3 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_hinge_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/filler.hpp" 11 | #include "caffe/vision_layers.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | #include "caffe/test/test_gradient_check_util.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class HingeLossLayerTest : public MultiDeviceTest { 20 | typedef typename TypeParam::Dtype Dtype; 21 | 22 | protected: 23 | HingeLossLayerTest() 24 | : blob_bottom_data_(new Blob(10, 5, 1, 1)), 25 | blob_bottom_label_(new Blob(10, 1, 1, 1)), 26 | blob_top_loss_(new Blob()) { 27 | // fill the values 28 | Caffe::set_random_seed(1701); 29 | FillerParameter filler_param; 30 | filler_param.set_std(10); 31 | GaussianFiller filler(filler_param); 32 | filler.Fill(this->blob_bottom_data_); 33 | blob_bottom_vec_.push_back(blob_bottom_data_); 34 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 35 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 36 | } 37 | blob_bottom_vec_.push_back(blob_bottom_label_); 38 | blob_top_vec_.push_back(blob_top_loss_); 39 | } 40 | virtual ~HingeLossLayerTest() { 41 | delete blob_bottom_data_; 42 | delete blob_bottom_label_; 43 | delete blob_top_loss_; 44 | } 45 | Blob* const blob_bottom_data_; 46 | Blob* const blob_bottom_label_; 47 | Blob* const blob_top_loss_; 48 | vector*> blob_bottom_vec_; 49 | vector*> blob_top_vec_; 50 | }; 51 | 52 | TYPED_TEST_CASE(HingeLossLayerTest, TestDtypesAndDevices); 53 | 54 | 55 | TYPED_TEST(HingeLossLayerTest, TestGradientL1) { 56 | typedef typename TypeParam::Dtype Dtype; 57 | LayerParameter layer_param; 58 | HingeLossLayer layer(layer_param); 59 | GradientChecker checker(1e-2, 2e-3, 1701, 1, 0.01); 60 | checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, 61 | this->blob_top_vec_, 0); 62 | } 63 | 64 | TYPED_TEST(HingeLossLayerTest, TestGradientL2) { 65 | typedef typename TypeParam::Dtype Dtype; 66 | LayerParameter layer_param; 67 | // Set norm to L2 68 | HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param(); 69 | hinge_loss_param->set_norm(HingeLossParameter_Norm_L2); 70 | HingeLossLayer layer(layer_param); 71 | GradientChecker checker(1e-2, 1e-2, 1701); 72 | checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, 73 | this->blob_top_vec_, 0); 74 | } 75 | 76 | } // namespace caffe 77 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_infogain_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/filler.hpp" 11 | #include "caffe/loss_layers.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | #include "caffe/test/test_gradient_check_util.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class InfogainLossLayerTest : public MultiDeviceTest { 20 | typedef typename TypeParam::Dtype Dtype; 21 | 22 | protected: 23 | InfogainLossLayerTest() 24 | : blob_bottom_data_(new Blob(10, 5, 1, 1)), 25 | blob_bottom_label_(new Blob(10, 1, 1, 1)), 26 | blob_bottom_infogain_(new Blob(1, 1, 5, 5)), 27 | blob_top_loss_(new Blob()) { 28 | Caffe::set_random_seed(1701); 29 | FillerParameter filler_param; 30 | PositiveUnitballFiller filler(filler_param); 31 | filler.Fill(this->blob_bottom_data_); 32 | blob_bottom_vec_.push_back(blob_bottom_data_); 33 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 34 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 35 | } 36 | blob_bottom_vec_.push_back(blob_bottom_label_); 37 | filler_param.set_min(0.1); 38 | filler_param.set_max(2.0); 39 | UniformFiller infogain_filler(filler_param); 40 | infogain_filler.Fill(this->blob_bottom_infogain_); 41 | blob_bottom_vec_.push_back(blob_bottom_infogain_); 42 | blob_top_vec_.push_back(blob_top_loss_); 43 | } 44 | virtual ~InfogainLossLayerTest() { 45 | delete blob_bottom_data_; 46 | delete blob_bottom_label_; 47 | delete blob_bottom_infogain_; 48 | delete blob_top_loss_; 49 | } 50 | Blob* const blob_bottom_data_; 51 | Blob* const blob_bottom_label_; 52 | Blob* const blob_bottom_infogain_; 53 | Blob* const blob_top_loss_; 54 | vector*> blob_bottom_vec_; 55 | vector*> blob_top_vec_; 56 | }; 57 | 58 | TYPED_TEST_CASE(InfogainLossLayerTest, TestDtypesAndDevices); 59 | 60 | 61 | TYPED_TEST(InfogainLossLayerTest, TestGradient) { 62 | typedef typename TypeParam::Dtype Dtype; 63 | LayerParameter layer_param; 64 | InfogainLossLayer layer(layer_param); 65 | GradientChecker checker(1e-4, 2e-2, 1701, 1, 0.01); 66 | checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, 67 | this->blob_top_vec_, 0); 68 | } 69 | 70 | } // namespace caffe 71 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_internal_thread.cpp: -------------------------------------------------------------------------------- 1 | #include "glog/logging.h" 2 | #include "gtest/gtest.h" 3 | 4 | #include "caffe/internal_thread.hpp" 5 | 6 | #include "caffe/test/test_caffe_main.hpp" 7 | 8 | namespace caffe { 9 | 10 | 11 | class InternalThreadTest : public ::testing::Test {}; 12 | 13 | TEST_F(InternalThreadTest, TestStartAndExit) { 14 | InternalThread thread; 15 | EXPECT_FALSE(thread.is_started()); 16 | EXPECT_TRUE(thread.StartInternalThread()); 17 | EXPECT_TRUE(thread.is_started()); 18 | EXPECT_TRUE(thread.WaitForInternalThreadToExit()); 19 | EXPECT_FALSE(thread.is_started()); 20 | } 21 | 22 | } // namespace caffe 23 | 24 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_layer_factory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "gtest/gtest.h" 5 | 6 | #include "caffe/common.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/layer_factory.hpp" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | template 15 | class LayerFactoryTest : public MultiDeviceTest {}; 16 | 17 | TYPED_TEST_CASE(LayerFactoryTest, TestDtypesAndDevices); 18 | 19 | TYPED_TEST(LayerFactoryTest, TestCreateLayer) { 20 | typedef typename TypeParam::Dtype Dtype; 21 | typename LayerRegistry::CreatorRegistry& registry = 22 | LayerRegistry::Registry(); 23 | shared_ptr > layer; 24 | LayerParameter layer_param; 25 | for (typename LayerRegistry::CreatorRegistry::iterator iter = 26 | registry.begin(); iter != registry.end(); ++iter) { 27 | // Special case: PythonLayer is checked by pytest 28 | if (iter->first == "Python") { continue; } 29 | layer_param.set_type(iter->first); 30 | layer = LayerRegistry::CreateLayer(layer_param); 31 | EXPECT_EQ(iter->first, layer->type()); 32 | } 33 | } 34 | 35 | } // namespace caffe 36 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_multinomial_logistic_loss_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/blob.hpp" 9 | #include "caffe/common.hpp" 10 | #include "caffe/filler.hpp" 11 | #include "caffe/vision_layers.hpp" 12 | 13 | #include "caffe/test/test_caffe_main.hpp" 14 | #include "caffe/test/test_gradient_check_util.hpp" 15 | 16 | namespace caffe { 17 | 18 | template 19 | class MultinomialLogisticLossLayerTest : public ::testing::Test { 20 | protected: 21 | MultinomialLogisticLossLayerTest() 22 | : blob_bottom_data_(new Blob(10, 5, 1, 1)), 23 | blob_bottom_label_(new Blob(10, 1, 1, 1)), 24 | blob_top_loss_(new Blob()) { 25 | Caffe::set_random_seed(1701); 26 | // fill the values 27 | FillerParameter filler_param; 28 | PositiveUnitballFiller filler(filler_param); 29 | filler.Fill(this->blob_bottom_data_); 30 | blob_bottom_vec_.push_back(blob_bottom_data_); 31 | for (int i = 0; i < blob_bottom_label_->count(); ++i) { 32 | blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; 33 | } 34 | blob_bottom_vec_.push_back(blob_bottom_label_); 35 | blob_top_vec_.push_back(blob_top_loss_); 36 | } 37 | virtual ~MultinomialLogisticLossLayerTest() { 38 | delete blob_bottom_data_; 39 | delete blob_bottom_label_; 40 | delete blob_top_loss_; 41 | } 42 | Blob* const blob_bottom_data_; 43 | Blob* const blob_bottom_label_; 44 | Blob* const blob_top_loss_; 45 | vector*> blob_bottom_vec_; 46 | vector*> blob_top_vec_; 47 | }; 48 | 49 | TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); 50 | 51 | 52 | TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { 53 | LayerParameter layer_param; 54 | Caffe::set_mode(Caffe::CPU); 55 | MultinomialLogisticLossLayer layer(layer_param); 56 | layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); 57 | GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); 58 | checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, 59 | this->blob_top_vec_, 0); 60 | } 61 | 62 | } // namespace caffe 63 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_platform.cpp: -------------------------------------------------------------------------------- 1 | #ifndef CPU_ONLY 2 | 3 | #include 4 | #include 5 | 6 | #include "glog/logging.h" 7 | #include "gtest/gtest.h" 8 | 9 | #include "caffe/test/test_caffe_main.hpp" 10 | 11 | namespace caffe { 12 | 13 | extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; 14 | 15 | class PlatformTest : public ::testing::Test {}; 16 | 17 | TEST_F(PlatformTest, TestInitialization) { 18 | printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major); 19 | printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor); 20 | printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name); 21 | printf("Total global memory: %lu\n", 22 | CAFFE_TEST_CUDA_PROP.totalGlobalMem); 23 | printf("Total shared memory per block: %lu\n", 24 | CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); 25 | printf("Total registers per block: %d\n", 26 | CAFFE_TEST_CUDA_PROP.regsPerBlock); 27 | printf("Warp size: %d\n", 28 | CAFFE_TEST_CUDA_PROP.warpSize); 29 | printf("Maximum memory pitch: %lu\n", 30 | CAFFE_TEST_CUDA_PROP.memPitch); 31 | printf("Maximum threads per block: %d\n", 32 | CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); 33 | for (int i = 0; i < 3; ++i) 34 | printf("Maximum dimension %d of block: %d\n", i, 35 | CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); 36 | for (int i = 0; i < 3; ++i) 37 | printf("Maximum dimension %d of grid: %d\n", i, 38 | CAFFE_TEST_CUDA_PROP.maxGridSize[i]); 39 | printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); 40 | printf("Total constant memory: %lu\n", 41 | CAFFE_TEST_CUDA_PROP.totalConstMem); 42 | printf("Texture alignment: %lu\n", 43 | CAFFE_TEST_CUDA_PROP.textureAlignment); 44 | printf("Concurrent copy and execution: %s\n", 45 | (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); 46 | printf("Number of multiprocessors: %d\n", 47 | CAFFE_TEST_CUDA_PROP.multiProcessorCount); 48 | printf("Kernel execution timeout: %s\n", 49 | (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); 50 | printf("Unified virtual addressing: %s\n", 51 | (CAFFE_TEST_CUDA_PROP.unifiedAddressing ? "Yes" : "No")); 52 | EXPECT_TRUE(true); 53 | } 54 | 55 | } // namespace caffe 56 | 57 | #endif // CPU_ONLY 58 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/caffe/test/test_protobuf.cpp: -------------------------------------------------------------------------------- 1 | // This is simply a script that tries serializing protocol buffer in text 2 | // format. Nothing special here and no actual code is being tested. 3 | #include 4 | 5 | #include "google/protobuf/text_format.h" 6 | #include "gtest/gtest.h" 7 | 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/test/test_caffe_main.hpp" 11 | 12 | namespace caffe { 13 | 14 | class ProtoTest : public ::testing::Test {}; 15 | 16 | TEST_F(ProtoTest, TestSerialization) { 17 | LayerParameter param; 18 | param.set_name("test"); 19 | param.set_type("Test"); 20 | std::cout << "Printing in binary format." << std::endl; 21 | std::cout << param.SerializeAsString() << std::endl; 22 | std::cout << "Printing in text format." << std::endl; 23 | std::string str; 24 | google::protobuf::TextFormat::PrintToString(param, &str); 25 | std::cout << str << std::endl; 26 | EXPECT_TRUE(true); 27 | } 28 | 29 | } // namespace caffe 30 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/gtest/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) 2 | caffe_default_properties(gtest) 3 | 4 | #add_library(gtest_main gtest_main.cc) 5 | #target_link_libraries(gtest_main gtest) 6 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/src/gtest/gtest_main.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2006, Google Inc. 2 | // All rights reserved. 3 | // 4 | // Redistribution and use in source and binary forms, with or without 5 | // modification, are permitted provided that the following conditions are 6 | // met: 7 | // 8 | // * Redistributions of source code must retain the above copyright 9 | // notice, this list of conditions and the following disclaimer. 10 | // * Redistributions in binary form must reproduce the above 11 | // copyright notice, this list of conditions and the following disclaimer 12 | // in the documentation and/or other materials provided with the 13 | // distribution. 14 | // * Neither the name of Google Inc. nor the names of its 15 | // contributors may be used to endorse or promote products derived from 16 | // this software without specific prior written permission. 17 | // 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | #include 31 | 32 | #include "gtest/gtest.h" 33 | 34 | GTEST_API_ int main(int argc, char **argv) { 35 | std::cout << "Running main() from gtest_main.cc\n"; 36 | 37 | testing::InitGoogleTest(&argc, argv); 38 | return RUN_ALL_TESTS(); 39 | } 40 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Collect source files 2 | file(GLOB_RECURSE srcs ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 3 | 4 | # Build each source file independently 5 | foreach(source ${srcs}) 6 | get_filename_component(name ${source} NAME_WE) 7 | 8 | # caffe target already exits 9 | if(name MATCHES "caffe") 10 | set(name ${name}.bin) 11 | endif() 12 | 13 | # target 14 | add_executable(${name} ${source}) 15 | target_link_libraries(${name} ${Caffe_LINK}) 16 | caffe_default_properties(${name}) 17 | 18 | # set back RUNTIME_OUTPUT_DIRECTORY 19 | caffe_set_runtime_directory(${name} "${PROJECT_BINARY_DIR}/tools") 20 | caffe_set_solution_folder(${name} tools) 21 | 22 | # restore output name without suffix 23 | if(name MATCHES "caffe.bin") 24 | set_target_properties(${name} PROPERTIES OUTPUT_NAME caffe) 25 | endif() 26 | 27 | # Install 28 | install(TARGETS ${name} DESTINATION bin) 29 | endforeach(source) 30 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/device_query.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/common.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe device_query " 5 | "[--device_id=0] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/extra/extract_seconds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import datetime 3 | import os 4 | import sys 5 | 6 | def extract_datetime_from_line(line, year): 7 | # Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565 8 | line = line.strip().split() 9 | month = int(line[0][1:3]) 10 | day = int(line[0][3:]) 11 | timestamp = line[1] 12 | pos = timestamp.rfind('.') 13 | ts = [int(x) for x in timestamp[:pos].split(':')] 14 | hour = ts[0] 15 | minute = ts[1] 16 | second = ts[2] 17 | microsecond = int(timestamp[pos + 1:]) 18 | dt = datetime.datetime(year, month, day, hour, minute, second, microsecond) 19 | return dt 20 | 21 | 22 | def get_log_created_year(input_file): 23 | """Get year from log file system timestamp 24 | """ 25 | 26 | log_created_time = os.path.getctime(input_file) 27 | log_created_year = datetime.datetime.fromtimestamp(log_created_time).year 28 | return log_created_year 29 | 30 | 31 | def get_start_time(line_iterable, year): 32 | """Find start time from group of lines 33 | """ 34 | 35 | start_datetime = None 36 | for line in line_iterable: 37 | line = line.strip() 38 | if line.find('Solving') != -1: 39 | start_datetime = extract_datetime_from_line(line, year) 40 | break 41 | return start_datetime 42 | 43 | 44 | def extract_seconds(input_file, output_file): 45 | with open(input_file, 'r') as f: 46 | lines = f.readlines() 47 | log_created_year = get_log_created_year(input_file) 48 | start_datetime = get_start_time(lines, log_created_year) 49 | assert start_datetime, 'Start time not found' 50 | 51 | out = open(output_file, 'w') 52 | for line in lines: 53 | line = line.strip() 54 | if line.find('Iteration') != -1: 55 | dt = extract_datetime_from_line(line, log_created_year) 56 | elapsed_seconds = (dt - start_datetime).total_seconds() 57 | out.write('%f\n' % elapsed_seconds) 58 | out.close() 59 | 60 | if __name__ == '__main__': 61 | if len(sys.argv) < 3: 62 | print('Usage: ./extract_seconds input_file output_file') 63 | exit(1) 64 | extract_seconds(sys.argv[1], sys.argv[2]) 65 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/extra/launch_resize_and_crop_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer 3 | 4 | # If you encounter error that the address already in use, kill the process. 5 | # 11235 is the port of server process 6 | # https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py 7 | # sudo netstat -ap | grep 11235 8 | # The last column of the output is PID/Program name 9 | # kill -9 PID 10 | # Second solution: 11 | # nmap localhost 12 | # fuser -k 11235/tcp 13 | # Or just wait a few seconds. 14 | 15 | ## Launch your Mapreduce locally 16 | # num_clients: number of processes 17 | # image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. 18 | # input: the file containing one image path relative to input_folder each line 19 | # input_folder: where are the original images 20 | # output_folder: where to save the resized and cropped images 21 | ./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 22 | 23 | ## Launch your Mapreduce with MPI 24 | # mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ 25 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/extra/parse_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage parse_log.sh caffe.log 3 | # It creates the following two text files, each containing a table: 4 | # caffe.log.test (columns: '#Iters Seconds TestAccuracy TestLoss') 5 | # caffe.log.train (columns: '#Iters Seconds TrainingLoss LearningRate') 6 | 7 | 8 | # get the dirname of the script 9 | DIR="$( cd "$(dirname "$0")" ; pwd -P )" 10 | 11 | if [ "$#" -lt 1 ] 12 | then 13 | echo "Usage parse_log.sh /path/to/your.log" 14 | exit 15 | fi 16 | LOG=`basename $1` 17 | grep -B 1 'Test ' $1 > aux.txt 18 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 19 | grep 'Test net output #0' aux.txt | awk '{print $11}' > aux1.txt 20 | grep 'Test net output #1' aux.txt | awk '{print $11}' > aux2.txt 21 | 22 | # Extracting elapsed seconds 23 | # For extraction of time since this line contains the start time 24 | grep '] Solving ' $1 > aux3.txt 25 | grep 'Testing net' $1 >> aux3.txt 26 | $DIR/extract_seconds.py aux3.txt aux4.txt 27 | 28 | # Generating 29 | echo '#Iters Seconds TestAccuracy TestLoss'> $LOG.test 30 | paste aux0.txt aux4.txt aux1.txt aux2.txt | column -t >> $LOG.test 31 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt 32 | 33 | # For extraction of time since this line contains the start time 34 | grep '] Solving ' $1 > aux.txt 35 | grep ', loss = ' $1 >> aux.txt 36 | grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt 37 | grep ', loss = ' $1 | awk '{print $9}' > aux1.txt 38 | grep ', lr = ' $1 | awk '{print $9}' > aux2.txt 39 | 40 | # Extracting elapsed seconds 41 | $DIR/extract_seconds.py aux.txt aux3.txt 42 | 43 | # Generating 44 | echo '#Iters Seconds TrainingLoss LearningRate'> $LOG.train 45 | paste aux0.txt aux3.txt aux1.txt aux2.txt | column -t >> $LOG.train 46 | rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt 47 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/finetune_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--weights=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/net_speed_benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe time --model=... " 5 | "[--iterations=50] [--gpu] [--device_id=0]"; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/test_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe test --model=... " 5 | "--weights=... instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/train_net.cpp: -------------------------------------------------------------------------------- 1 | #include "caffe/caffe.hpp" 2 | 3 | int main(int argc, char** argv) { 4 | LOG(FATAL) << "Deprecated. Use caffe train --solver=... " 5 | "[--snapshot=...] instead."; 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/upgrade_net_proto_binary.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | ::google::InitGoogleLogging(argv[0]); 20 | if (argc != 3) { 21 | LOG(ERROR) << "Usage: " 22 | << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; 23 | return 1; 24 | } 25 | 26 | NetParameter net_param; 27 | string input_filename(argv[1]); 28 | if (!ReadProtoFromBinaryFile(input_filename, &net_param)) { 29 | LOG(ERROR) << "Failed to parse input binary file as NetParameter: " 30 | << input_filename; 31 | return 2; 32 | } 33 | bool need_upgrade = NetNeedsUpgrade(net_param); 34 | bool success = true; 35 | if (need_upgrade) { 36 | success = UpgradeNetAsNeeded(input_filename, &net_param); 37 | if (!success) { 38 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 39 | << "see details above."; 40 | } 41 | } else { 42 | LOG(ERROR) << "File already in V1 proto format: " << argv[1]; 43 | } 44 | 45 | WriteProtoToBinaryFile(net_param, argv[2]); 46 | 47 | LOG(ERROR) << "Wrote upgraded NetParameter binary proto to " << argv[2]; 48 | return !success; 49 | } 50 | -------------------------------------------------------------------------------- /caffe-fast-rcnn/tools/upgrade_net_proto_text.cpp: -------------------------------------------------------------------------------- 1 | // This is a script to upgrade "V0" network prototxts to the new format. 2 | // Usage: 3 | // upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out 4 | 5 | #include 6 | #include // NOLINT(readability/streams) 7 | #include // NOLINT(readability/streams) 8 | #include 9 | 10 | #include "caffe/caffe.hpp" 11 | #include "caffe/util/io.hpp" 12 | #include "caffe/util/upgrade_proto.hpp" 13 | 14 | using std::ofstream; 15 | 16 | using namespace caffe; // NOLINT(build/namespaces) 17 | 18 | int main(int argc, char** argv) { 19 | ::google::InitGoogleLogging(argv[0]); 20 | if (argc != 3) { 21 | LOG(ERROR) << "Usage: " 22 | << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; 23 | return 1; 24 | } 25 | 26 | NetParameter net_param; 27 | string input_filename(argv[1]); 28 | if (!ReadProtoFromTextFile(input_filename, &net_param)) { 29 | LOG(ERROR) << "Failed to parse input text file as NetParameter: " 30 | << input_filename; 31 | return 2; 32 | } 33 | bool need_upgrade = NetNeedsUpgrade(net_param); 34 | bool need_data_upgrade = NetNeedsDataUpgrade(net_param); 35 | bool success = true; 36 | if (need_upgrade) { 37 | success = UpgradeNetAsNeeded(input_filename, &net_param); 38 | if (!success) { 39 | LOG(ERROR) << "Encountered error(s) while upgrading prototxt; " 40 | << "see details above."; 41 | } 42 | } else { 43 | LOG(ERROR) << "File already in latest proto format: " << input_filename; 44 | } 45 | 46 | if (need_data_upgrade) { 47 | UpgradeNetDataTransformation(&net_param); 48 | } 49 | 50 | // Save new format prototxt. 51 | WriteProtoToTextFile(net_param, argv[2]); 52 | 53 | LOG(ERROR) << "Wrote upgraded NetParameter text proto to " << argv[2]; 54 | return !success; 55 | } 56 | -------------------------------------------------------------------------------- /data/.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/.tgz -------------------------------------------------------------------------------- /data/cache/.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/cache/.cache -------------------------------------------------------------------------------- /data/demo/demo_boxes.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/demo/demo_boxes.mat -------------------------------------------------------------------------------- /data/demo/im0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/demo/im0.jpg -------------------------------------------------------------------------------- /data/demo/im1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/demo/im1.jpg -------------------------------------------------------------------------------- /data/demo/im2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/demo/im2.jpg -------------------------------------------------------------------------------- /data/demo/im3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/data/demo/im3.jpg -------------------------------------------------------------------------------- /data/scripts/fetch_coco_matlab_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | 6 | NAME="coco_matlab_data" 7 | EXT=".tgz" 8 | FILE=$NAME$EXT 9 | URL=http://www.cs.berkeley.edu/~wckuo/fast-dbox-data/$FILE 10 | CHECKSUM=a731fa22fb3833033f9475e7558bf9c4 11 | 12 | if [ -f $FILE ]; then 13 | echo "File already exists. Checking md5..." 14 | os=`uname -s` 15 | if [ "$os" = "Linux" ]; then 16 | checksum=`md5sum $FILE | awk '{ print $1 }'` 17 | elif [ "$os" = "Darwin" ]; then 18 | checksum=`cat $FILE | md5` 19 | fi 20 | if [ "$checksum" = "$CHECKSUM" ]; then 21 | echo "Checksum is correct. No need to download." 22 | exit 0 23 | else 24 | echo "Checksum is incorrect. Need to download again." 25 | fi 26 | fi 27 | 28 | echo "Downloading matlab COCO image ids and ground truth bboxes ..." 29 | 30 | wget $URL -O $FILE 31 | 32 | echo "Unzipping..." 33 | tar zxvf $FILE 34 | 35 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 36 | 37 | -------------------------------------------------------------------------------- /data/scripts/fetch_edge_box_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | NAME="edge_box_data" 6 | EXT=".tgz" 7 | FILE=$NAME$EXT 8 | echo $FILE 9 | URL=ftp://ftp.cs.berkeley.edu/pub/projects/vision/$FILE 10 | CHECKSUM=c341da31579e305292bf5ec08298b87c 11 | 12 | if [ -f $FILE ]; then 13 | echo "File already exists. Checking md5..." 14 | os=`uname -s` 15 | if [ "$os" = "Linux" ]; then 16 | checksum=`md5sum $FILE | awk '{ print $1 }'` 17 | elif [ "$os" = "Darwin" ]; then 18 | checksum=`cat $FILE | md5` 19 | fi 20 | if [ "$checksum" = "$CHECKSUM" ]; then 21 | echo "Checksum is correct. No need to download." 22 | exit 0 23 | else 24 | echo "Checksum is incorrect. Need to download again." 25 | fi 26 | fi 27 | 28 | echo "Downloading Edge boxes for COCO train, val, and test-dev set..." 29 | 30 | wget $URL -O $FILE 31 | 32 | echo "Unzipping..." 33 | 34 | tar zxvf $FILE 35 | 36 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 37 | 38 | -------------------------------------------------------------------------------- /data/scripts/fetch_fast_dbox_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | 6 | NAME="fast_dbox_data" 7 | EXT=".tgz" 8 | FILE=$NAME$EXT 9 | URL=ftp://ftp.cs.berkeley.edu/pub/projects/vision/$FILE 10 | CHECKSUM=1489ffc9a84508fde71259cd76d3208b 11 | 12 | if [ -f $FILE ]; then 13 | echo "File already exists. Checking md5..." 14 | os=`uname -s` 15 | if [ "$os" = "Linux" ]; then 16 | checksum=`md5sum $FILE | awk '{ print $1 }'` 17 | elif [ "$os" = "Darwin" ]; then 18 | checksum=`cat $FILE | md5` 19 | fi 20 | if [ "$checksum" = "$CHECKSUM" ]; then 21 | echo "Checksum is correct. No need to download." 22 | exit 0 23 | else 24 | echo "Checksum is incorrect. Need to download again." 25 | fi 26 | fi 27 | 28 | echo "Downloading Fast DeepBoxes for COCO train, val, and test-dev set..." 29 | 30 | wget $URL -O $FILE 31 | 32 | echo "Unzipping..." 33 | tar zxvf $FILE 34 | 35 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 36 | 37 | -------------------------------------------------------------------------------- /data/scripts/fetch_imagenet_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | 6 | NAME="imagenet_model" 7 | EXT=".tgz" 8 | FILE=$NAME$EXT 9 | URL=http://www.cs.berkeley.edu/~wckuo/fast-dbox-data/$FILE 10 | CHECKSUM=8a30a5e0d8dd4f07d475e672d61ecb7a 11 | 12 | if [ -f $FILE ]; then 13 | echo "File already exists. Checking md5..." 14 | os=`uname -s` 15 | if [ "$os" = "Linux" ]; then 16 | checksum=`md5sum $FILE | awk '{ print $1 }'` 17 | elif [ "$os" = "Darwin" ]; then 18 | checksum=`cat $FILE | md5` 19 | fi 20 | if [ "$checksum" = "$CHECKSUM" ]; then 21 | echo "Checksum is correct. No need to download." 22 | exit 0 23 | else 24 | echo "Checksum is incorrect. Need to download again." 25 | fi 26 | fi 27 | 28 | echo "Downloading Alex net Imagenet model for initialization of DeepBox training..." 29 | 30 | wget $URL -O $FILE 31 | 32 | echo "Unzipping..." 33 | tar zxvf $FILE 34 | 35 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 36 | 37 | -------------------------------------------------------------------------------- /data/scripts/fetch_slid_window_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | 6 | FILE=slid_window_data.tgz 7 | URL=http://www.cs.berkeley.edu/~wckuo/fast-dbox-data/$FILE 8 | CHECKSUM=cb24fcee9d7b83286e56e01f959ebc23 9 | 10 | if [ -f $FILE ]; then 11 | echo "File already exists. Checking md5..." 12 | os=`uname -s` 13 | if [ "$os" = "Linux" ]; then 14 | checksum=`md5sum $FILE | awk '{ print $1 }'` 15 | elif [ "$os" = "Darwin" ]; then 16 | checksum=`cat $FILE | md5` 17 | fi 18 | if [ "$checksum" = "$CHECKSUM" ]; then 19 | echo "Checksum is correct. No need to download." 20 | exit 0 21 | else 22 | echo "Checksum is incorrect. Need to download again." 23 | fi 24 | fi 25 | 26 | echo "Downloading Edge boxes for COCO train, val, and test-dev set..." 27 | 28 | wget $URL -O $FILE 29 | 30 | echo "Unzipping..." 31 | 32 | #tar zxvf $FILE 33 | 34 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 35 | 36 | -------------------------------------------------------------------------------- /eval/eval_fast_dbox.m: -------------------------------------------------------------------------------- 1 | % Setup directories 2 | function eval_fast_dbox() 3 | name = 'fast-dbox-multiscale'; 4 | suffix = ['results_' name]; 5 | addpath('./MSCOCO/MatlabAPI'); 6 | dataDir='./MSCOCO'; 7 | split = 'val'; 8 | year = '2014'; 9 | dataType = [split year]; 10 | annFile=sprintf('%s/annotations/instances_%s.json',dataDir,dataType); 11 | 12 | %% load coco 13 | coco=CocoApi(annFile); 14 | imgIds = coco.getImgIds(); 15 | num_imgs = numel(imgIds); 16 | 17 | fprintf('loading gtbbox ...\n'); 18 | load(['./data/coco_matlab_data/COCO_' split '_gtbbox.mat']); 19 | fprintf('loading fast dboxes score...\n'); 20 | load(['./output/default/coco_val2014/' name '/fast_dbox_output_scores.mat']); 21 | 22 | fprintf('loading edge boxes ...\n'); 23 | load(['./data/edge_box_data/' split '2014.mat']); 24 | eboxes = boxes; 25 | clear('boxes'); 26 | 27 | 28 | mean_dbox_Mprop = zeros(2,11); 29 | mean_edgebox = zeros(2,11); 30 | sum_gt = 0; 31 | 32 | %proxy vector 33 | num_iter = numel(eboxes); 34 | for img_id = 1:num_iter 35 | fprintf('Evaluate fast DeepBox COCO %s images:%d\n',split,img_id); 36 | sel_gtarray = gtbbox{img_id}; 37 | sel_num_objs = size(sel_gtarray,1); 38 | [~,I_sort] = sort(score_list{img_id}(:,2),'descend'); 39 | trim_bbs_m = eboxes{img_id}(I_sort,:); 40 | boxes = eboxes{img_id}; 41 | 42 | %Evaluation of boxes 43 | if isempty(sel_gtarray) 44 | fprintf('img %d no specified categories .... \n',img_id); 45 | else 46 | evalRes_dbox_Mprop =evalbbox(trim_bbs_m,sel_gtarray); 47 | evalRes_edgebox=evalbbox(boxes,sel_gtarray); 48 | mean_dbox_Mprop = mean_dbox_Mprop +evalRes_dbox_Mprop; 49 | mean_edgebox = mean_edgebox+evalRes_edgebox; 50 | sum_gt = sum_gt + size(sel_gtarray,1); 51 | end 52 | 53 | 54 | end 55 | 56 | fprintf('Fast DeepBox evaluation results:\n'); 57 | display(mean_dbox_Mprop/sum_gt); 58 | fprintf('Edge boxes evaluation results:\n'); 59 | display(mean_edgebox/sum_gt); 60 | fprintf('Difference:\n'); 61 | display((mean_dbox_Mprop-mean_edgebox)/sum_gt); 62 | 63 | fprintf('Save to file ...'); 64 | 65 | save(['./eval/' suffix '.mat'],'mean_dbox_Mprop','mean_edgebox','sum_gt','-v7'); 66 | fprintf('done\n'); 67 | 68 | fprintf('Plot results and compute AUC....\n'); 69 | plot_fast_rcnn(name); 70 | -------------------------------------------------------------------------------- /eval/evalbbox.m: -------------------------------------------------------------------------------- 1 | function evalRes=evalbbox(sel_box,gtarray) 2 | % fprintf('Visbbox for %s \n',nm); 3 | thr_num = [1 2 5 10 20 50 100 200 500 1000 2000]; 4 | thr_IoU = [0.5 0.7]; 5 | max_num = size(sel_box,1); 6 | thr_num(thr_num>max_num)=max_num; 7 | N = size(thr_num,2); 8 | M = size(thr_IoU,2); 9 | evalRes = zeros(M,N); 10 | 11 | for i = 1:N 12 | for j = 1:M 13 | [~,~,objIoU]=scorebboxes(gtarray,sel_box(1:thr_num(i),:),thr_IoU(j)); 14 | objmaxIoU = max(objIoU,[],1); 15 | hit_obj = objmaxIoU >= thr_IoU(j); 16 | evalRes(j,i)=sum(hit_obj); 17 | end 18 | end 19 | 20 | end 21 | -------------------------------------------------------------------------------- /eval/plot_fast_dbox.m: -------------------------------------------------------------------------------- 1 | function plot_fast_dbox(name) 2 | load(['result_' name '.mat']); 3 | dr_e = mean_edgebox./sum_gt; 4 | dr_ds = mean_dbox_Mprop./sum_gt; 5 | thr_num = [1 2 5 10 20 50 100 200 500 1000 2000]; 6 | auc_thr = log10(thr_num)/log10(2000); 7 | AUC_edge = zeros(1,2); 8 | AUC_dbox = zeros(1,2); 9 | for i = 1:2 10 | xv = cat(2,0,dr_e(i,:),0); 11 | yv = cat(2,0,auc_thr,1); 12 | AUC_edge(i) = polyarea(xv,yv); 13 | xds = cat(2,0,dr_ds(i,:),0); 14 | yds = cat(2,0,auc_thr,1); 15 | AUC_ds(i) = polyarea(xds,yds); 16 | end 17 | figure(1); 18 | semilogx(thr_num,dr_e(1,:),'linewidth',3,'color','b');hold on; 19 | semilogx(thr_num,dr_ds(1,:),'linewidth',3,'color','r'); 20 | axh = gca; 21 | set(axh,'XTick',thr_num,'Fontsize',14); 22 | h = legend('Edgebox','DeepBox FastRCNN'); 23 | set(h,'Location','NorthWest'); 24 | title('COCO Evaluation IoU=0.5','Fontsize',14,'Fontweight','bold'); 25 | xlabel('Number of proposals','Fontsize',14,'Fontweight','bold'); 26 | ylabel('Recall','Fontsize',14,'Fontweight','demi'); 27 | axis([1 2000 0 0.80]); 28 | hold off; 29 | 30 | 31 | figure(2); 32 | semilogx(thr_num,dr_e(2,:),'linewidth',3,'color','b');hold on; 33 | semilogx(thr_num,dr_ds(2,:),'linewidth',3,'color','r'); 34 | axh = gca; 35 | set(axh,'XTick',thr_num,'Fontsize',14); 36 | h = legend('Edgebox','DeepBox FastRCNN'); 37 | set(h,'Location','NorthWest'); 38 | title('COCO Evaluation IoU=0.7','Fontsize',14,'Fontweight','bold'); 39 | xlabel('Number of proposals','Fontsize',14,'Fontweight','bold'); 40 | ylabel('Recall','Fontsize',14,'Fontweight','demi'); 41 | axis([1 2000 0 0.6]); 42 | hold off; 43 | 44 | fprintf('DeepBox FRCNN AUC:\n'); 45 | display(AUC_ds); 46 | fprintf('EdgeBox AUC:\n'); 47 | display(AUC_edge); 48 | fprintf('Ratio FRCNN AUC to EdgeBox AUC:\n'); 49 | display(AUC_ds./AUC_edge); 50 | 51 | -------------------------------------------------------------------------------- /eval/scorebboxes.m: -------------------------------------------------------------------------------- 1 | function [label,overlap,objIoU]=scorebboxes(gtarray,bboxes,PosWinThr) 2 | % gtarray: nx4, n: # of objects, each row is [x1,y1,x2,y2] 3 | % bboxes: Nx4, n: # of bboxes in this particular image, each row is [x1,y1,x2,y2] 4 | % label: Nx1, label of each bbox 5 | % overlap: Nx1, maximum overlap of each bbox with ground truth 6 | gt_area = (gtarray(:,3)-gtarray(:,1)).*(gtarray(:,4)-gtarray(:,2)); 7 | bb_area = (bboxes(:,3)-bboxes(:,1)).*(bboxes(:,4)-bboxes(:,2)); 8 | 9 | %Find the zeros in gt area 10 | zero_idx = (gt_area == 0); 11 | if any(zero_idx) 12 | if ((gtarray(zero_idx,3)-gtarray(zero_idx,1)+1)==0) 13 | gt_area(zero_idx) = (gtarray(zero_idx,3)-gtarray(zero_idx,1)+0.01).*(gtarray(zero_idx,4)-gtarray(zero_idx,2)); 14 | gt_wh = cat(2,gtarray(:,1:2),gtarray(:,3)-gtarray(:,1)+0.01,gtarray(:,4)-gtarray(:,2)); 15 | else 16 | gt_area(zero_idx) = (gtarray(zero_idx,3)-gtarray(zero_idx,1)).*(gtarray(zero_idx,4)-gtarray(zero_idx,2)+0.01); 17 | gt_wh = cat(2,gtarray(:,1:2),gtarray(:,3)-gtarray(:,1),gtarray(:,4)-gtarray(:,2)+0.01); 18 | end 19 | else 20 | gt_wh = cat(2,gtarray(:,1:2),gtarray(:,3)-gtarray(:,1),gtarray(:,4)-gtarray(:,2)); 21 | end 22 | 23 | bb_wh = cat(2,bboxes(:,1:2),bboxes(:,3)-bboxes(:,1),bboxes(:,4)-bboxes(:,2)); 24 | Int_area = rectint(bb_wh,gt_wh); 25 | 26 | Union_area = repmat(bb_area,1,size(gt_area,1))+repmat(gt_area',size(bb_area,1),1); 27 | objIoU = Int_area./(Union_area-Int_area); 28 | 29 | %Label sliding window by maximum overlap with objects 30 | [overlap,~] = max(objIoU,[],2); 31 | overlap = floor(overlap*1000)/1000; 32 | PosWinIdx = overlap>=PosWinThr; 33 | label = zeros(size(bboxes,1),1); 34 | label(PosWinIdx) = 1; 35 | 36 | 37 | end -------------------------------------------------------------------------------- /models/DboxNet/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "models/DboxNet/train.prototxt" 2 | base_lr: 0.001 3 | lr_policy: "step" 4 | gamma: 0.1 5 | stepsize: 40000 6 | display: 20 7 | average_loss: 100 8 | momentum: 0.9 9 | weight_decay: 0.0005 10 | # We disable standard caffe solver snapshotting and implement our own snapshot 11 | # function 12 | snapshot: 0 13 | # We still use the snapshot prefix, though 14 | snapshot_prefix: "fast-dbox-multiscale" 15 | # debug_info: true 16 | -------------------------------------------------------------------------------- /output/default/scripts/fetch_fast_dbox_models.sh: -------------------------------------------------------------------------------- 1 | ##!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" 4 | cd $DIR 5 | NAME="coco_train2014" 6 | EXT=".tgz" 7 | FILE=$NAME$EXT 8 | echo $FILE 9 | URL=http://www.cs.berkeley.edu/~wckuo/fast-dbox-data/$FILE 10 | CHECKSUM=b8109d3fbfe31533bbf03b22d1a07a8a 11 | 12 | if [ -f $FILE ]; then 13 | echo "File already exists. Checking md5..." 14 | os=`uname -s` 15 | if [ "$os" = "Linux" ]; then 16 | checksum=`md5sum $FILE | awk '{ print $1 }'` 17 | elif [ "$os" = "Darwin" ]; then 18 | checksum=`cat $FILE | md5` 19 | fi 20 | if [ "$checksum" = "$CHECKSUM" ]; then 21 | echo "Checksum is correct. No need to download." 22 | exit 0 23 | else 24 | echo "Checksum is incorrect. Need to download again." 25 | fi 26 | fi 27 | 28 | echo "Downloading multiscale/single scale Fast DeepBox models trained on COCO. Multiscale sliding window model is also provided ..." 29 | 30 | wget $URL -O $FILE 31 | 32 | echo "Unzipping..." 33 | 34 | tar zxvf $FILE 35 | 36 | echo "Done. Please run this command again to verify that checksum = $CHECKSUM." 37 | 38 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python setup.py build_ext --inplace 3 | rm -rf build 4 | -------------------------------------------------------------------------------- /src/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | from .imdb_coco import imdb 9 | from .coco_imdb import coco_imdb 10 | -------------------------------------------------------------------------------- /src/datasets/factory.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | # -------------------------------------------------------- 8 | # Fast DeepBox 9 | # Written by Weicheng Kuo, 2015. 10 | # See LICENSE in the project root for license information. 11 | # -------------------------------------------------------- 12 | 13 | 14 | 15 | __sets = {} 16 | 17 | import datasets.coco_imdb 18 | import numpy as np 19 | 20 | def _proposals_top_k(split, year, top_k): 21 | imdb = datasets.coco_imdb(split, year) 22 | imdb.roidb_handler = imdb.proposals_roidb 23 | imdb.config['top_k'] = top_k 24 | return imdb 25 | 26 | # Set up voc__ using selective search "fast" mode 27 | for year in ['2014','2015']: 28 | for split in ['train', 'val','test','test-dev']: 29 | name = 'coco_{}{}'.format(split, year) 30 | __sets[name] = (lambda split=split, year=year: 31 | datasets.coco_imdb(split, year)) 32 | 33 | # Set up voc___top_ using selective search "quality" mode 34 | # but only returning the first k boxes 35 | for top_k in np.arange(500, 5000, 500): 36 | for year in ['2015','2014']: 37 | for split in ['test','test-dev','train', 'val']: 38 | name = 'coco_{}{}_top_{:d}'.format(split, year, top_k) 39 | __sets[name] = (lambda split=split, year=year, top_k=top_k: 40 | _proposals_top_k(split, year, top_k)) 41 | 42 | def get_imdb(name): 43 | if not __sets.has_key(name): 44 | raise KeyError('Unknown dataset: {}'.format(name)) 45 | return __sets[name]() 46 | 47 | def list_imdbs(): 48 | return __sets.keys() 49 | -------------------------------------------------------------------------------- /src/setup.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | # -------------------------------------------------------- 8 | # Fast DeepBox 9 | # Written by Weicheng Kuo, 2015. 10 | # See LICENSE in the project root for license information. 11 | # -------------------------------------------------------- 12 | 13 | 14 | import numpy as np 15 | from distutils.core import setup 16 | from distutils.extension import Extension 17 | from Cython.Distutils import build_ext 18 | 19 | cmdclass = {} 20 | ext_modules = [ 21 | Extension( 22 | "utils.cython_bbox", 23 | ["utils/bbox.pyx"], 24 | ) 25 | ] 26 | cmdclass.update({'build_ext': build_ext}) 27 | 28 | setup( 29 | name='fast_dbox', 30 | cmdclass=cmdclass, 31 | ext_modules=ext_modules, 32 | include_dirs=[np.get_include()] 33 | ) 34 | -------------------------------------------------------------------------------- /src/utils/.gitignore: -------------------------------------------------------------------------------- 1 | *.c 2 | *.so 3 | -------------------------------------------------------------------------------- /src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /src/utils/bbox.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Sergey Karayev 6 | # -------------------------------------------------------- 7 | 8 | cimport cython 9 | import numpy as np 10 | cimport numpy as np 11 | 12 | DTYPE = np.float 13 | ctypedef np.float_t DTYPE_t 14 | 15 | def bbox_overlaps( 16 | np.ndarray[DTYPE_t, ndim=2] boxes, 17 | np.ndarray[DTYPE_t, ndim=2] query_boxes): 18 | """ 19 | Parameters 20 | ---------- 21 | boxes: (N, 4) ndarray of float 22 | query_boxes: (K, 4) ndarray of float 23 | Returns 24 | ------- 25 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes 26 | """ 27 | cdef unsigned int N = boxes.shape[0] 28 | cdef unsigned int K = query_boxes.shape[0] 29 | cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) 30 | cdef DTYPE_t iw, ih, box_area 31 | cdef DTYPE_t ua 32 | cdef unsigned int k, n 33 | for k in range(K): 34 | box_area = ( 35 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) * 36 | (query_boxes[k, 3] - query_boxes[k, 1] + 1) 37 | ) 38 | for n in range(N): 39 | iw = ( 40 | min(boxes[n, 2], query_boxes[k, 2]) - 41 | max(boxes[n, 0], query_boxes[k, 0]) + 1 42 | ) 43 | if iw > 0: 44 | ih = ( 45 | min(boxes[n, 3], query_boxes[k, 3]) - 46 | max(boxes[n, 1], query_boxes[k, 1]) + 1 47 | ) 48 | if ih > 0: 49 | ua = float( 50 | (boxes[n, 2] - boxes[n, 0] + 1) * 51 | (boxes[n, 3] - boxes[n, 1] + 1) + 52 | box_area - iw * ih 53 | ) 54 | overlaps[n, k] = iw * ih / ua 55 | return overlaps 56 | -------------------------------------------------------------------------------- /src/utils/blob.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | import cv2 10 | 11 | def im_list_to_blob(ims): 12 | max_shape = np.array([im.shape for im in ims]).max(axis=0) 13 | num_images = len(ims) 14 | blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), 15 | dtype=np.float32) 16 | for i in xrange(num_images): 17 | im = ims[i] 18 | blob[i, 0:im.shape[0], 0:im.shape[1], :] = im 19 | # Move channels (axis 3) to axis 1 20 | # Axis order will become: (batch elem, channel, height, width) 21 | channel_swap = (0, 3, 1, 2) 22 | blob = blob.transpose(channel_swap) 23 | return blob 24 | 25 | def prep_im_for_blob(im, pixel_means, target_size, max_size): 26 | im = im.astype(np.float32, copy=False) 27 | im -= pixel_means 28 | im_shape = im.shape 29 | im_size_min = np.min(im_shape[0:2]) 30 | im_size_max = np.max(im_shape[0:2]) 31 | im_scale = float(target_size) / float(im_size_min) 32 | # Prevent the biggest axis from being more than MAX_SIZE 33 | if np.round(im_scale * im_size_max) > max_size: 34 | im_scale = float(max_size) / float(im_size_max) 35 | im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, 36 | interpolation=cv2.INTER_LINEAR) 37 | 38 | return im, im_scale 39 | -------------------------------------------------------------------------------- /src/utils/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import time 9 | 10 | class Timer(object): 11 | def __init__(self): 12 | self.total_time = 0. 13 | self.calls = 0 14 | self.start_time = 0. 15 | self.diff = 0. 16 | self.average_time = 0. 17 | 18 | def tic(self): 19 | # using time.time instead of time.clock because time time.clock 20 | # does not normalize for multithreading 21 | self.start_time = time.time() 22 | 23 | def toc(self, average=True): 24 | self.diff = time.time() - self.start_time 25 | self.total_time += self.diff 26 | self.calls += 1 27 | self.average_time = self.total_time / self.calls 28 | if average: 29 | return self.average_time 30 | else: 31 | return self.diff 32 | -------------------------------------------------------------------------------- /tools/.train_net.py.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weichengkuo/DeepBox/c4f8c065b6a51cf296540cc453a44f0519aaacc9/tools/.train_net.py.swp --------------------------------------------------------------------------------