├── .gitattributes
├── README.md
└── matconvnet-1.0-beta24
├── .gitattributes
├── .gitignore
├── .gitmodules
├── CONTRIBUTING.md
├── COPYING
├── Makefile
├── README.md
├── doc
├── Makefile
├── blocks.tex
├── figures
│ ├── imnet.pdf
│ ├── pepper.pdf
│ └── svg
│ │ ├── conv.svg
│ │ ├── convt.svg
│ │ ├── matconvnet-blue.svg
│ │ └── matconvnet-white.svg
├── fundamentals.tex
├── geometry.tex
├── impl.tex
├── intro.tex
├── matconvnet-manual.tex
├── matdoc.py
├── matdocparser.py
├── references.bib
├── site
│ ├── docs
│ │ ├── about.md
│ │ ├── css
│ │ │ └── fixes.css
│ │ ├── developers.md
│ │ ├── faq.md
│ │ ├── figures
│ │ │ ├── stn-perf.png
│ │ │ └── stn-samples.png
│ │ ├── functions.md
│ │ ├── gpu.md
│ │ ├── index.md
│ │ ├── install-alt.md
│ │ ├── install.md
│ │ ├── js
│ │ │ ├── mathjaxhelper.js
│ │ │ └── toggle.js
│ │ ├── pretrained.md
│ │ ├── quick.md
│ │ ├── spatial-transformer.md
│ │ ├── training.md
│ │ └── wrappers.md
│ ├── mkdocs.yml
│ └── theme
│ │ ├── base.html
│ │ ├── content.html
│ │ ├── css
│ │ └── base.css
│ │ ├── js
│ │ └── base.js
│ │ ├── matconvnet-blue.svg
│ │ ├── nav.html
│ │ └── toc.html
└── wrappers.tex
├── examples
├── +solver
│ ├── adadelta.m
│ ├── adagrad.m
│ ├── adam.m
│ └── rmsprop.m
├── cifar
│ ├── cnn_cifar.m
│ ├── cnn_cifar_init.m
│ └── cnn_cifar_init_nin.m
├── cnn_train.m
├── cnn_train_dag.m
├── custom_imdb
│ ├── cnn_toy_data.m
│ └── cnn_toy_data_generator.m
├── fast_rcnn
│ ├── +dagnn
│ │ └── LossSmoothL1.m
│ ├── 000004.jpg
│ ├── 000004_boxes.mat
│ ├── README.md
│ ├── bbox_functions
│ │ ├── bbox_clip.m
│ │ ├── bbox_draw.m
│ │ ├── bbox_nms.m
│ │ ├── bbox_overlap.m
│ │ ├── bbox_remove_duplicates.m
│ │ ├── bbox_scale.m
│ │ ├── bbox_transform.m
│ │ └── bbox_transform_inv.m
│ ├── datasets
│ │ ├── add_bboxreg_targets.m
│ │ ├── attach_proposals.m
│ │ ├── cnn_setup_data_voc07.m
│ │ └── cnn_setup_data_voc07_ssw.m
│ ├── fast_rcnn_demo.m
│ ├── fast_rcnn_eval_get_batch.m
│ ├── fast_rcnn_evaluate.m
│ ├── fast_rcnn_init.m
│ ├── fast_rcnn_train.m
│ └── fast_rcnn_train_get_batch.m
├── framing-u-net
│ ├── data
│ │ ├── imdb_120view.mat
│ │ ├── imdb_60view.mat
│ │ └── imdb_90view.mat
│ ├── demo_cnn_sparse_view.m
│ ├── findLastCheckpoint.m
│ ├── getBatchPatchVal.m
│ ├── getFOV.m
│ ├── getReconPatchVal.m
│ ├── install.m
│ ├── loadState.m
│ ├── matlab
│ │ ├── +dagnn
│ │ │ ├── Error.m
│ │ │ ├── EuclideanLoss.m
│ │ │ ├── UnPooling.m
│ │ │ ├── WaveDec.m
│ │ │ └── WaveRec.m
│ │ └── vl_euclideanloss.m
│ ├── nmse.m
│ └── recon_cnn4img.m
├── imagenet
│ ├── cnn_imagenet.m
│ ├── cnn_imagenet_camdemo.m
│ ├── cnn_imagenet_deploy.m
│ ├── cnn_imagenet_evaluate.m
│ ├── cnn_imagenet_googlenet.m
│ ├── cnn_imagenet_init.m
│ ├── cnn_imagenet_init_inception.m
│ ├── cnn_imagenet_init_resnet.m
│ ├── cnn_imagenet_minimal.m
│ ├── cnn_imagenet_setup_data.m
│ ├── cnn_imagenet_sync_labels.m
│ ├── getImageBatch.m
│ └── getImageStats.m
├── mnist
│ ├── cnn_mnist.m
│ ├── cnn_mnist_experiments.m
│ └── cnn_mnist_init.m
├── spatial_transformer
│ ├── cnn_stn_cluttered_mnist.m
│ ├── cnn_stn_cluttered_mnist_init.m
│ └── readme.txt
└── vggfaces
│ └── cnn_vgg_faces.m
├── matconvnet.sln
├── matconvnet.vcxproj
├── matconvnet.vcxproj.filters
├── matconvnet.xcodeproj
├── project.pbxproj
├── project.xcworkspace
│ └── contents.xcworkspacedata
└── xcshareddata
│ └── xcschemes
│ ├── matconv CPU.xcscheme
│ ├── matconv GPU.xcscheme
│ └── matconv cuDNN.xcscheme
├── matlab
├── +dagnn
│ ├── @DagNN
│ │ ├── DagNN.m
│ │ ├── addLayer.m
│ │ ├── eval.m
│ │ ├── fromSimpleNN.m
│ │ ├── getVarReceptiveFields.m
│ │ ├── getVarSizes.m
│ │ ├── initParams.m
│ │ ├── loadobj.m
│ │ ├── move.m
│ │ ├── print.m
│ │ ├── rebuild.m
│ │ ├── removeLayer.m
│ │ ├── renameLayer.m
│ │ ├── renameParam.m
│ │ ├── renameVar.m
│ │ ├── reset.m
│ │ ├── saveobj.m
│ │ ├── setLayerInputs.m
│ │ ├── setLayerOutputs.m
│ │ └── setLayerParams.m
│ ├── AffineGridGenerator.m
│ ├── BatchNorm.m
│ ├── BilinearSampler.m
│ ├── Concat.m
│ ├── Conv.m
│ ├── ConvTranspose.m
│ ├── Crop.m
│ ├── DropOut.m
│ ├── ElementWise.m
│ ├── Error.m
│ ├── EuclideanLoss.m
│ ├── Filter.m
│ ├── LRN.m
│ ├── Layer.m
│ ├── Loss.m
│ ├── NormOffset.m
│ ├── PDist.m
│ ├── Pooling.m
│ ├── ROIPooling.m
│ ├── ReLU.m
│ ├── Scale.m
│ ├── Sigmoid.m
│ ├── SoftMax.m
│ ├── SpatialNorm.m
│ ├── Sum.m
│ ├── UnPooling.m
│ ├── UniformScalingGridGenerator.m
│ ├── WaveDec.m
│ └── WaveRec.m
├── ParameterServer.m
├── compatibility
│ └── parallel
│ │ ├── gather.m
│ │ ├── labindex.m
│ │ └── numlabs.m
├── simplenn
│ ├── vl_simplenn.m
│ ├── vl_simplenn_diagnose.m
│ ├── vl_simplenn_display.m
│ ├── vl_simplenn_move.m
│ ├── vl_simplenn_start_parserv.m
│ └── vl_simplenn_tidy.m
├── src
│ ├── bits
│ │ ├── data.cpp
│ │ ├── data.cu
│ │ ├── data.hpp
│ │ ├── datacu.cu
│ │ ├── datacu.hpp
│ │ ├── datamex.cpp
│ │ ├── datamex.cu
│ │ ├── datamex.hpp
│ │ ├── impl
│ │ │ ├── bilinearsampler.hpp
│ │ │ ├── bilinearsampler_cpu.cpp
│ │ │ ├── bilinearsampler_gpu.cu
│ │ │ ├── blashelper.hpp
│ │ │ ├── bnorm.hpp
│ │ │ ├── bnorm_cpu.cpp
│ │ │ ├── bnorm_gpu.cu
│ │ │ ├── compat.h
│ │ │ ├── copy.hpp
│ │ │ ├── copy_cpu.cpp
│ │ │ ├── copy_gpu.cu
│ │ │ ├── cudnnhelper.hpp
│ │ │ ├── fast_mutex.h
│ │ │ ├── im2row.hpp
│ │ │ ├── im2row_cpu.cpp
│ │ │ ├── im2row_gpu.cu
│ │ │ ├── imread_gdiplus.cpp
│ │ │ ├── imread_helpers.hpp
│ │ │ ├── imread_libjpeg.cpp
│ │ │ ├── imread_quartz.cpp
│ │ │ ├── nnbias_blas.hpp
│ │ │ ├── nnbias_cudnn.cu
│ │ │ ├── nnbias_cudnn.hpp
│ │ │ ├── nnbilinearsampler_cudnn.cu
│ │ │ ├── nnbilinearsampler_cudnn.hpp
│ │ │ ├── nnbnorm_cudnn.cu
│ │ │ ├── nnbnorm_cudnn.hpp
│ │ │ ├── nnconv_blas.hpp
│ │ │ ├── nnconv_cudnn.cu
│ │ │ ├── nnconv_cudnn.hpp
│ │ │ ├── nnpooling_cudnn.cu
│ │ │ ├── nnpooling_cudnn.hpp
│ │ │ ├── normalize.hpp
│ │ │ ├── normalize_cpu.cpp
│ │ │ ├── normalize_gpu.cu
│ │ │ ├── pooling.hpp
│ │ │ ├── pooling_cpu.cpp
│ │ │ ├── pooling_gpu.cu
│ │ │ ├── roipooling.hpp
│ │ │ ├── roipooling_cpu.cpp
│ │ │ ├── roipooling_gpu.cu
│ │ │ ├── sharedmem.cuh
│ │ │ ├── subsample.hpp
│ │ │ ├── subsample_cpu.cpp
│ │ │ ├── subsample_gpu.cu
│ │ │ ├── tinythread.cpp
│ │ │ └── tinythread.h
│ │ ├── imread.cpp
│ │ ├── imread.hpp
│ │ ├── mexutils.h
│ │ ├── nnbias.cpp
│ │ ├── nnbias.cu
│ │ ├── nnbias.hpp
│ │ ├── nnbilinearsampler.cpp
│ │ ├── nnbilinearsampler.cu
│ │ ├── nnbilinearsampler.hpp
│ │ ├── nnbnorm.cpp
│ │ ├── nnbnorm.cu
│ │ ├── nnbnorm.hpp
│ │ ├── nnconv.cpp
│ │ ├── nnconv.cu
│ │ ├── nnconv.hpp
│ │ ├── nnfullyconnected.cpp
│ │ ├── nnfullyconnected.cu
│ │ ├── nnfullyconnected.hpp
│ │ ├── nnnormalize.cpp
│ │ ├── nnnormalize.cu
│ │ ├── nnnormalize.hpp
│ │ ├── nnpooling.cpp
│ │ ├── nnpooling.cu
│ │ ├── nnpooling.hpp
│ │ ├── nnroipooling.cpp
│ │ ├── nnroipooling.cu
│ │ ├── nnroipooling.hpp
│ │ ├── nnsubsample.cpp
│ │ ├── nnsubsample.cu
│ │ └── nnsubsample.hpp
│ ├── config
│ │ ├── mex_CUDA_glnxa64.sh
│ │ ├── mex_CUDA_glnxa64.xml
│ │ ├── mex_CUDA_maci64.sh
│ │ └── mex_CUDA_maci64.xml
│ ├── vl_cudatool.cpp
│ ├── vl_cudatool.cu
│ ├── vl_imreadjpeg.cpp
│ ├── vl_imreadjpeg.cu
│ ├── vl_imreadjpeg_old.cpp
│ ├── vl_imreadjpeg_old.cu
│ ├── vl_nnbilinearsampler.cpp
│ ├── vl_nnbilinearsampler.cu
│ ├── vl_nnbnorm.cpp
│ ├── vl_nnbnorm.cu
│ ├── vl_nnconv.cpp
│ ├── vl_nnconv.cu
│ ├── vl_nnconvt.cpp
│ ├── vl_nnconvt.cu
│ ├── vl_nnnormalize.cpp
│ ├── vl_nnnormalize.cu
│ ├── vl_nnpool.cpp
│ ├── vl_nnpool.cu
│ ├── vl_nnroipool.cpp
│ ├── vl_nnroipool.cu
│ ├── vl_taccummex.cpp
│ ├── vl_taccummex.cu
│ ├── vl_tmove.cpp
│ └── vl_tmove.cu
├── vl_argparse.m
├── vl_compilenn.m
├── vl_euclideanloss.m
├── vl_imreadjpeg.m
├── vl_nnbilinearsampler.m
├── vl_nnbnorm.m
├── vl_nnconcat.m
├── vl_nnconv.m
├── vl_nnconvt.m
├── vl_nncrop.m
├── vl_nndropout.m
├── vl_nnloss.m
├── vl_nnnoffset.m
├── vl_nnnormalize.m
├── vl_nnnormalizelp.m
├── vl_nnpdist.m
├── vl_nnpool.m
├── vl_nnrelu.m
├── vl_nnroipool.m
├── vl_nnsigmoid.m
├── vl_nnsoftmax.m
├── vl_nnsoftmaxloss.m
├── vl_nnspnorm.m
├── vl_rootnn.m
├── vl_setupnn.m
├── vl_taccum.m
├── vl_tmove.m
├── vl_tshow.m
└── xtest
│ ├── cmyk.jpg
│ ├── suite
│ ├── Scale.m
│ ├── nnbilinearsampler.m
│ ├── nnbnorm.m
│ ├── nnconcat.m
│ ├── nnconv.m
│ ├── nnconvt.m
│ ├── nndagnn.m
│ ├── nndropout.m
│ ├── nnloss.m
│ ├── nnmnist.m
│ ├── nnnormalize.m
│ ├── nnnormalizelp.m
│ ├── nnoffset.m
│ ├── nnpdist.m
│ ├── nnpool.m
│ ├── nnrelu.m
│ ├── nnroipool.m
│ ├── nnsigmoid.m
│ ├── nnsimplenn.m
│ ├── nnsoftmax.m
│ ├── nnsoftmaxloss.m
│ ├── nnsolvers.m
│ ├── nnspnorm.m
│ ├── nntest.m
│ └── tmovemex.m
│ ├── vl_bench_bnorm.m
│ ├── vl_bench_imreadjpeg.m
│ ├── vl_nnbnorm_old.m
│ ├── vl_test_bnorm.m
│ ├── vl_test_economic_relu.m
│ ├── vl_test_gpureset.m
│ ├── vl_test_imreadjpeg.m
│ ├── vl_test_print.m
│ └── vl_testnn.m
└── utils
├── evaluate_ref_models.m
├── get-file.sh
├── import-caffe.py
├── import-fast-rcnn.sh
├── import-fcn.sh
├── import-googlenet.sh
├── import-ref-models.sh
├── import-resnet.sh
├── layers.py
├── model2dot.m
├── preprocess-imagenet.sh
├── proto
├── __init__.py
├── caffe.proto
├── caffe_0115.proto
├── caffe_0115_pb2.py
├── caffe_6e3916.proto
├── caffe_6e3916_pb2.py
├── caffe_b590f1d.proto
├── caffe_b590f1d_pb2.py
├── caffe_fastrcnn.proto
├── caffe_fastrcnn_pb2.py
├── caffe_old.proto
├── caffe_old_pb2.py
├── caffe_pb2.py
├── get-protos.sh
├── googlenet_prototxt_patch.diff
├── vgg_caffe.proto
├── vgg_caffe_pb2.py
└── vgg_synset_words.txt
├── simplenn_caffe_compare.m
├── simplenn_caffe_deploy.m
├── simplenn_caffe_testdeploy.m
├── test_examples.m
└── tidy_ref_models.m
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Paper
2 | ===============
3 | * Deep Convolutional Framelets: A General Deep Learning Framework for Inverse Problems
4 | * In press in SIAM Journal on Imaging Sciences (2018): [https://arxiv.org/abs/1707.00372]
5 | * Framing U-Net via Deep Convolutional Framelets: Application to Sparse-view CT
6 | * In revision process: [https://arxiv.org/abs/1708.08333]
7 |
8 | Implementation
9 | ===============
10 | * MatConvNet (matconvnet-1.0-beta24)
11 | * Please run the matconvnet-1.0-beta24/matlab/vl_compilenn.m file to compile matconvnet.
12 | * There is instruction on "http://www.vlfeat.org/matconvnet/mfiles/vl_compilenn/"
13 | * Frameing U-Net (matconvnet-1.0-beta24/examples/framing_u-net)
14 | * Please run the matconvnet-1.0-beta24/examples/framing_u-net/install.m
15 | * Install the customized library
16 | * Download the trained networks such as standard cnn, u-net, and tight-frame u-net
17 |
18 | Trained network
19 | ===============
20 | * Trained network for 'Standard CNN' is uploaded.
21 | * Trained network for 'U-Net' is uploaded.
22 | * Trained network for 'Tight-frame U-Net' is uploaded.
23 |
24 | Test data
25 | ===============
26 | * Iillustate the Fig. 5 for Framing U-Net via Deep Convolutional Framelets:Application to Sparse-view CT
27 | * CT images from '2016 Low-Dose CT Grand Challenge' are uploaded to test.
28 | * Thanks Dr. Cynthia McCollough, the Mayo Clinic, the American Association of Physicists in Medicine(AAPM), and grand EB017095 and EB017185 from the National Institute of Biomedical Imaging and Bioengineering for providing the Low-Dose CT Grand Challenge dataset.
29 |
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto
2 | *.vcxproj text merge=union eol=crlf
3 | *.vcxproj.filters merge=union eol=crlf
4 | *.sln text merge=union eol=crlf
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/.gitignore:
--------------------------------------------------------------------------------
1 | *.xcodeproj/*xcuserdata*
2 | *.xcodeproj/project.xcworkspace/*xcuserdata*
3 | *.xcodeproj/project.xcworkspace/xcshareddata/
4 | mex/*
5 | mex
6 | data
7 | *.o
8 | *.pyc
9 | *~
10 | index.html
11 | matconvnet-*.tar.gz
12 | local
13 |
14 | # Documentation
15 | doc/figures/svg/*.pdf
16 | doc/figures/*.idraw
17 | doc/.texpadtmp/*
18 | doc/*.pdf
19 | doc/.build
20 |
21 | # Website
22 | doc/site/docs/mfiles
23 | doc/site/site
24 | doc/site/.build
25 | doc/site/theme/css/bootstrap.min.css
26 | doc/site/theme/css/bootstrap.min.css.map
27 | doc/site/theme/css/font-awesome.min.css
28 | doc/site/theme/fonts/fontawesome-webfont.eot
29 | doc/site/theme/fonts/fontawesome-webfont.svg
30 | doc/site/theme/fonts/fontawesome-webfont.ttf
31 | doc/site/theme/fonts/fontawesome-webfont.woff
32 | doc/site/theme/fonts/fontawesome-webfont.woff2
33 | doc/site/theme/js/bootstrap.min.js
34 | doc/site/theme/js/jquery.min.js
35 | doc/site/theme/js/jquery.min.map
36 | doc/site/theme/js/npm.js
37 |
38 | # Visual C
39 | *.suo
40 | *.user
41 | *.sdf
42 | *.opensdf
43 | doc/figures/svg/*.idraw
44 |
45 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/.gitmodules:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/.gitmodules
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing guidelines
2 |
3 | ## How to contribute to MatConvNet
4 |
5 | For a description of how the library is structured, take a look at the
6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on
7 | the MatConvNet website.
8 |
9 | ### Issues
10 |
11 | We are grateful for any reported issues which help to remove bugs and
12 | improve the overall quality of the library. In particular, you can use
13 | the issue tracker to:
14 |
15 | * report bugs and unexpected crashes
16 | * discuss library design decisions
17 | * request new features
18 |
19 | When reporting bugs, it really helps if you can provide the following:
20 |
21 | * Which steps are needed to reproduce the issue
22 | * MATLAB, compiler and CUDA version (where appropriate)
23 |
24 | Before opening an issue to report a bug, please make sure that the bug
25 | is reproducible on the latest version of the master branch.
26 |
27 | The most difficult bugs to remove are those which cause crashes of the
28 | core functions (e.g. CUDA errors etc.). In those cases, it is really
29 | useful to create a *minimal example* which is able to reproduce the
30 | issue. We know that this may mean a bit of work, but it helps us to
31 | remove the bug more quickly.
32 |
33 | ### Pull requests
34 |
35 | Please make any Pull Requests against the `devel` branch rather than
36 | the `master` branch which is maintained as the latest stable release
37 | of the library.
38 |
39 | As a general rule, it is much easier to accept small Pull Requests
40 | that make a single improvement to the library than complex code
41 | changes that affect multiple parts of the library. When submitting
42 | substantial changes, it is useful if unit tests are provided with the
43 | code.
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014-16 The MatConvNet Team.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms are permitted
5 | provided that the above copyright notice and this paragraph are
6 | duplicated in all such forms and that any documentation, advertising
7 | materials, and other materials related to such distribution and use
8 | acknowledge that the software was developed by the MatConvNet
9 | Team. The name of the MatConvNet Team may not be used to endorse or
10 | promote products derived from this software without specific prior
11 | written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT
12 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE
13 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
14 | PURPOSE.
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/README.md:
--------------------------------------------------------------------------------
1 | # MatConvNet: CNNs for MATLAB
2 |
3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural
4 | Networks* (CNNs) for computer vision applications. It is simple,
5 | efficient, and can run and learn state-of-the-art CNNs. Several
6 | example CNNs are included to classify and encode images. Please visit
7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more.
8 |
9 | In case of compilation issues, please read first the
10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and
11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub
12 | issue. For general inquiries regarding network design and training
13 | related questions, please use the
14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet).
15 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/figures/imnet.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/doc/figures/imnet.pdf
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/figures/pepper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/doc/figures/pepper.pdf
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/faq.md:
--------------------------------------------------------------------------------
1 | # Frequently-asked questions (FAQ)
2 |
3 | ## Running MatConvNet
4 |
5 | ### Do I need a specific version of the CUDA devkit?
6 |
7 | Officially, MathWorks supports a specific version of the CUDA devkit
8 | with each MATLAB version (see [here](install.md#gpu)). However, in
9 | practice we normally use the most recent version of CUDA (and cuDNN)
10 | available from NVIDIA without problems (see
11 | [here](install.md#nvcc)).
12 |
13 | ### Can I use MatConvNet with CuDNN?
14 |
15 | Yes, and this is the recommended way of running MatConvNet on NVIDIA
16 | GPUs. However, you need to install cuDNN and link it to
17 | MatConvNet. See the [installation instructions](install.md#cudnn) to
18 | know how.
19 |
20 | ### How do I fix the error `Attempt to execute SCRIPT vl_nnconv as a function`?
21 |
22 | Before the toolbox can be used, the
23 | [MEX files](http://www.mathworks.com/support/tech-notes/1600/1605.html
24 | ) must be compiled. Make sure to follow the
25 | [installation instructions](install.md). If you have done so and the
26 | MEX files are still not recognized, check that the directory
27 | `matlab/toolbox/mex` contains the missing files. If the files are
28 | there, there may be a problem with the way MEX files have been
29 | compiled.
30 |
31 | ### Why files such as `vl_nnconv.m` do not contain any code?
32 |
33 | Functions such as `vl_nnconv`, `vl_nnpool`, `vl_nnbnorm` and many
34 | others are implemented MEX files. In this case, M files such as
35 | `vl_nnconv.m` contain only the function documentation. The code of the
36 | function is actually found in `matlab/src/vl_nnconv.cu` (a CUDA/C++
37 | source file) or similar.
38 |
39 | ### Why do I get compilation error `error: unrecognized command line option "-std=c++11"` on a Linux machine?
40 |
41 | This is caused by an incompatible version of GCC compiler
42 | ([<4.6](https://gcc.gnu.org/projects/cxx-status.html#cxx11)) with your MATLAB.
43 | You can either install a newer version of GCC (if available), or you
44 | can force MATLAB not to use the offending compiler option and replace it with
45 | the previous name of the C++11 standard argument:
46 | * In MATLAB run: `mex -setup c++`.
47 | * Run `edit(fullfile(prefdir, 'mex_C++_glnxa64.xml'))` to edit your MATLAB
48 | compiler options.
49 | * Replace all occurrences of `-std=c++11` with `-std=c++0x` and save the file.
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/gpu.md:
--------------------------------------------------------------------------------
1 | # Using GPU acceleration
2 |
3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the
4 | [Parallel Computing Toolbox](http://www.mathworks.com/products/parallel-computing/). This
5 | toolbox requires CUDA-compatible cards, and you will need a copy of
6 | the corresponding
7 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to
8 | compile GPU support in MatConvNet (see
9 | [compiling](install#compiling)).
10 |
11 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox
12 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore,
13 | switching to use the GPU is as simple as converting the input CPU
14 | arrays in GPU arrays.
15 |
16 | In order to make the very best of powerful GPUs, it is important to
17 | balance the load between CPU and GPU in order to avoid starving the
18 | latter. In training on a problem like ImageNet, the CPU(s) in your
19 | system will be busy loading data from disk and streaming it to the GPU
20 | to evaluate the CNN and its derivative. MatConvNet includes the
21 | utility `vl_imreadjpeg` to accelerate and parallelize loading images
22 | into memory (this function is currently a bottleneck will be made more
23 | powerful in future releases).
24 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/js/mathjaxhelper.js:
--------------------------------------------------------------------------------
1 | /*
2 | #if false
3 | Prevent Unity to try compiling this js
4 | */
5 | MathJax.Hub.Config({
6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] }
7 | });
8 | /*
9 | #endif
10 | */
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/js/toggle.js:
--------------------------------------------------------------------------------
1 | function toggle_visibility(id) {
2 | var e = document.getElementById(id);
3 | if(e.style.display == 'block')
4 | e.style.display = 'none';
5 | else
6 | e.style.display = 'block';
7 | }
8 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/spatial-transformer.md:
--------------------------------------------------------------------------------
1 | # Spatial Transformer Networks
2 |
3 | This example demonstrates the use of a Spatial Transformer Network
4 | for classifying distorted MNIST digits in clutter.
5 | The source files used in this examples can be found in the
6 | `examples/spatial_transformer` directory.
7 |
8 | The spatial transformer network is defined in the `cnn_stn_cluttered_mnist.m`
9 | file. It has three components: (1) a localization network which
10 | predicts six affine transformation parameters for an input image,
11 | (2) a bilinear sampler which applies the above transformation
12 | to the input image, and (3) a classification network which classifies the
13 | output of the bilinear sampler.
14 |
15 | The picture below shows input images and their transformed versions as determined
16 | by the STN. Note how the STN has learned to rectify the input image.
17 |
18 | 
19 |
20 | The following graph compares the training and test errors of two CNNs:
21 | a STN and, a plain classification CNN (with the same configuration as the
22 | classification component of the STN). We note that the STN performs significantly
23 | better (STN test-error = 5.7%, CNN test-error = 14.2%).
24 |
25 | 
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/docs/training.md:
--------------------------------------------------------------------------------
1 | ## Using MatConvNet to train convnets
2 |
3 | MatConvNet can be used to train models, typically by using a form of
4 | stochastic gradient descent (SGD) and back-propagation.
5 |
6 | The following learning demonstrators are provided in the MatConvNet
7 | package:
8 |
9 | - **MNIST**. See `examples/mnist/cnn_mnist.m`.
10 | - **CIFAR**. See `examples/cifar/cnn_cifar.m`.
11 | - **ImageNet**. See `examples/imagenet/cnn_imagenet.m`.
12 |
13 | These demos are self-contained; MNIST and CIFAR, in particular,
14 | automatically download and unpack the required data, so that they
15 | should work out-of-the-box.
16 |
17 | While MNIST and CIFAR are small datasets (by today's standard) and
18 | training is feasible on a CPU, ImageNet requires a powerful GPU to
19 | complete in a reasonable time (a few days!). It also requires the
20 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to
21 | accelerate reading large batches of JPEG images and avoid starving the
22 | GPU.
23 |
24 | All these demos use the `example/cnn_train.m` and
25 | `example/cnn_train_dag.m` SGD drivers, which are simple
26 | implementations of the standard SGD with momentum, done directly in
27 | MATLAB code. However, it should be easy to implement your own
28 | specialized or improved solver.
29 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/theme/content.html:
--------------------------------------------------------------------------------
1 | {% if meta.source %}
2 |
3 | {% for filename in meta.source %}
4 | {{ filename }}
5 | {% endfor %}
6 |
7 | {% endif %}
8 |
9 | {{ content }}
10 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/theme/js/base.js:
--------------------------------------------------------------------------------
1 |
2 | /* Highlight */
3 | $( document ).ready(function() {
4 | hljs.initHighlightingOnLoad();
5 | $('table').addClass('table table-striped table-hover');
6 | });
7 |
8 |
9 | $('body').scrollspy({
10 | target: '.bs-sidebar',
11 | });
12 |
13 |
14 | /* Prevent disabled links from causing a page reload */
15 | $("li.disabled a").click(function() {
16 | event.preventDefault();
17 | });
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/doc/site/theme/toc.html:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/+solver/adadelta.m:
--------------------------------------------------------------------------------
1 | function [w, state] = adadelta(w, state, grad, opts, ~)
2 | %ADADELTA
3 | % Example AdaDelta solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
4 | %
5 | % AdaDelta sets its own learning rate, so any learning rate set in the
6 | % options of CNN_TRAIN and CNN_TRAIN_DAG will be ignored.
7 | %
8 | % If called without any input argument, returns the default options
9 | % structure.
10 | %
11 | % Solver options: (opts.train.solverOpts)
12 | %
13 | % `epsilon`:: 1e-6
14 | % Small additive constant to regularize variance estimate.
15 | %
16 | % `rho`:: 0.9
17 | % Moving average window for variance update, between 0 and 1 (larger
18 | % values result in slower/more stable updating).
19 |
20 | % Copyright (C) 2016 Joao F. Henriques.
21 | % All rights reserved.
22 | %
23 | % This file is part of the VLFeat library and is made available under
24 | % the terms of the BSD license (see the COPYING file).
25 |
26 | if nargin == 0 % Return the default solver options
27 | w = struct('epsilon', 1e-6, 'rho', 0.9) ;
28 | return ;
29 | end
30 |
31 | if isequal(state, 0) % First iteration, initialize state struct
32 | state = struct('g_sqr', 0, 'delta_sqr', 0) ;
33 | end
34 |
35 | rho = opts.rho ;
36 |
37 | state.g_sqr = state.g_sqr * rho + grad.^2 * (1 - rho) ;
38 | new_delta = -sqrt((state.delta_sqr + opts.epsilon) ./ ...
39 | (state.g_sqr + opts.epsilon)) .* grad ;
40 | state.delta_sqr = state.delta_sqr * rho + new_delta.^2 * (1 - rho) ;
41 |
42 | w = w + new_delta ;
43 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/+solver/adagrad.m:
--------------------------------------------------------------------------------
1 | function [w, g_sqr] = adagrad(w, g_sqr, grad, opts, lr)
2 | %ADAGRAD
3 | % Example AdaGrad solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
4 | %
5 | % Set the initial learning rate for AdaGrad in the options for
6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for
7 | % SGD may be inappropriate for AdaGrad; the default is 0.001.
8 | %
9 | % If called without any input argument, returns the default options
10 | % structure.
11 | %
12 | % Solver options: (opts.train.solverOpts)
13 | %
14 | % `epsilon`:: 1e-10
15 | % Small additive constant to regularize variance estimate.
16 | %
17 | % `rho`:: 1
18 | % Moving average window for variance update, between 0 and 1 (larger
19 | % values result in slower/more stable updating). This is similar to
20 | % RHO in AdaDelta and RMSProp. Standard AdaGrad is obtained with a RHO
21 | % value of 1 (use total average instead of a moving average).
22 | %
23 | % A possibly undesirable effect of standard AdaGrad is that the update
24 | % will monotonically decrease to 0, until training eventually stops. This
25 | % is because the AdaGrad update is inversely proportional to the total
26 | % variance of the gradients seen so far.
27 | % With RHO smaller than 1, a moving average is used instead. This
28 | % prevents the final update from monotonically decreasing to 0.
29 |
30 | % Copyright (C) 2016 Joao F. Henriques.
31 | % All rights reserved.
32 | %
33 | % This file is part of the VLFeat library and is made available under
34 | % the terms of the BSD license (see the COPYING file).
35 |
36 | if nargin == 0 % Return the default solver options
37 | w = struct('epsilon', 1e-10, 'rho', 1) ;
38 | return ;
39 | end
40 |
41 | g_sqr = g_sqr * opts.rho + grad.^2 ;
42 |
43 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ;
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/+solver/rmsprop.m:
--------------------------------------------------------------------------------
1 | function [w, g_sqr] = rmsprop(w, g_sqr, grad, opts, lr)
2 | %RMSPROP
3 | % Example RMSProp solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
4 | %
5 | % Set the initial learning rate for RMSProp in the options for
6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for
7 | % SGD may be inappropriate for RMSProp; the default is 0.001.
8 | %
9 | % If called without any input argument, returns the default options
10 | % structure.
11 | %
12 | % Solver options: (opts.train.solverOpts)
13 | %
14 | % `epsilon`:: 1e-8
15 | % Small additive constant to regularize variance estimate.
16 | %
17 | % `rho`:: 0.99
18 | % Moving average window for variance update, between 0 and 1 (larger
19 | % values result in slower/more stable updating).
20 |
21 | % Copyright (C) 2016 Joao F. Henriques.
22 | % All rights reserved.
23 | %
24 | % This file is part of the VLFeat library and is made available under
25 | % the terms of the BSD license (see the COPYING file).
26 |
27 | if nargin == 0 % Return the default solver options
28 | w = struct('epsilon', 1e-8, 'rho', 0.99) ;
29 | return ;
30 | end
31 |
32 | g_sqr = g_sqr * opts.rho + grad.^2 * (1 - opts.rho) ;
33 |
34 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ;
35 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/custom_imdb/cnn_toy_data_generator.m:
--------------------------------------------------------------------------------
1 | function cnn_toy_data_generator(dataDir)
2 | %CNN_TOY_DATA_GENERATOR
3 | % Generates toy data in the given path: random image of triangles,
4 | % squares and circles.
5 | %
6 | % The directory format is: '///.png', where
7 | % is 'train' or 'val', is an integer between 1 and 3, and
8 | % is the sample index.
9 |
10 | % Copyright (C) 2017 Joao F. Henriques.
11 | % All rights reserved.
12 | %
13 | % This file is part of the VLFeat library and is made available under
14 | % the terms of the BSD license (see the COPYING file).
15 |
16 | % Set random seed
17 | rng('default') ;
18 | rng(0) ;
19 |
20 | % The sets, and number of samples per label in each set
21 | sets = {'train', 'val'} ;
22 | numSamples = [1500, 150] ;
23 |
24 | % Number of polygon points in each class. The circle is created with 50
25 | % points.
26 | numPoints = [3, 4, 50] ;
27 |
28 | for s = 1:2 % Iterate sets
29 | for label = 1:3 % Iterate labels
30 | fprintf('Generating images for set %s, label %i...\n', sets{s}, label) ;
31 |
32 | mkdir(sprintf('%s/%s/%i', dataDir, sets{s}, label)) ;
33 |
34 | for i = 1:numSamples(s) % Iterate samples
35 | % Points of a regular polygon, with random rotation and scale
36 | radius = randi([11, 14]) ;
37 | angles = rand(1) * 2 * pi + (0 : 2 * pi / numPoints(label) : 2 * pi) ;
38 | xs = 16.5 + cos(angles) * radius ;
39 | ys = 16.5 + sin(angles) * radius ;
40 |
41 | % Generate image
42 | image = poly2mask(xs, ys, 32, 32) ;
43 |
44 | % Save it
45 | imwrite(image, sprintf('%s/%s/%i/%04i.png', dataDir, sets{s}, label, i)) ;
46 | end
47 | end
48 | end
49 |
50 | end
51 |
52 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/000004.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/fast_rcnn/000004.jpg
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/000004_boxes.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/fast_rcnn/000004_boxes.mat
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/README.md:
--------------------------------------------------------------------------------
1 | # Fast-RCNN demo
2 |
3 | This folder contains an example implementation of Fast-RCNN [1] in
4 | MatConvNet. The example trains and test on the PASCAL VOC 2007 data.
5 |
6 | There are three entry-point scripts:
7 |
8 | * `fast_rcnn_demo.m`: runs the original Caffe model imported in MatConvNet.
9 | * `fast_rcnn_train.m`: trains a new model from scratch, using pre-computed proposals.
10 | * `fast_rcnn_evaluate.m`: evaluates the trained model.
11 |
12 | Note that the code does not ship with a proposal generation method, so
13 | proposals must be precomputed (using e.g. edge boxes or selective
14 | search windows).
15 |
16 | The `fast_rcnn_demo.m` code should run out of the box, downloading the
17 | model as needed.
18 |
19 | To test the training code using the first GPU on your system, use
20 | something like:
21 |
22 | run matlab/vl_setupnn
23 | addpath examples/fast_rcnn
24 | fast_rcnn_train('train',struct('gpus',1)) ;
25 | fast_rcnn_evaluate('gpu',1) ;
26 |
27 | ## References
28 |
29 | 1. *Fast R-CNN*, R. Girshick, International Conference on Computer
30 | Vision (ICCV), 2015.
31 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_clip.m:
--------------------------------------------------------------------------------
1 | function boxes = bbox_clip(boxes, im_size)
2 | % bbox_clip Clip boxes to image boundaries.
3 | %
4 | % Copyright (C) 2016 Hakan Bilen.
5 | % All rights reserved.
6 | %
7 | % This file is part of the VLFeat library and is made available under
8 | % the terms of the BSD license (see the COPYING file).
9 | boxes(:,1) = max(min(boxes(:,1),im_size(2)),1);
10 | boxes(:,2) = max(min(boxes(:,2),im_size(1)),1);
11 | boxes(:,3) = max(min(boxes(:,3),im_size(2)),1);
12 | boxes(:,4) = max(min(boxes(:,4),im_size(1)),1);
13 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_draw.m:
--------------------------------------------------------------------------------
1 | function im = bbox_draw(im,boxes,c,t)
2 |
3 | % copied from Ross Girshick
4 | % Fast R-CNN
5 | % Copyright (c) 2015 Microsoft
6 | % Licensed under The MIT License [see LICENSE for details]
7 | % Written by Ross Girshick
8 | % --------------------------------------------------------
9 | % source: https://github.com/rbgirshick/fast-rcnn/blob/master/matlab/showboxes.m
10 | %
11 | %
12 | % Fast R-CNN
13 | %
14 | % Copyright (c) Microsoft Corporation
15 | %
16 | % All rights reserved.
17 | %
18 | % MIT License
19 | %
20 | % Permission is hereby granted, free of charge, to any person obtaining a
21 | % copy of this software and associated documentation files (the "Software"),
22 | % to deal in the Software without restriction, including without limitation
23 | % the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 | % and/or sell copies of the Software, and to permit persons to whom the
25 | % Software is furnished to do so, subject to the following conditions:
26 | %
27 | % The above copyright notice and this permission notice shall be included
28 | % in all copies or substantial portions of the Software.
29 | %
30 | % THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 | % IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 | % FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33 | % THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
34 | % OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
35 | % ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36 | % OTHER DEALINGS IN THE SOFTWARE.
37 |
38 | image(im);
39 | axis image;
40 | axis off;
41 | set(gcf, 'Color', 'white');
42 |
43 | if nargin<3
44 | c = 'r';
45 | t = 2;
46 | end
47 |
48 | s = '-';
49 | if ~isempty(boxes)
50 | x1 = boxes(:, 1);
51 | y1 = boxes(:, 2);
52 | x2 = boxes(:, 3);
53 | y2 = boxes(:, 4);
54 | line([x1 x1 x2 x2 x1]', [y1 y2 y2 y1 y1]', ...
55 | 'color', c, 'linewidth', t, 'linestyle', s);
56 | for i = 1:size(boxes, 1)
57 | text(double(x1(i)), double(y1(i)) - 2, ...
58 | sprintf('%.4f', boxes(i, end)), ...
59 | 'backgroundcolor', 'b', 'color', 'w', 'FontSize', 10);
60 | end
61 | end
62 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_overlap.m:
--------------------------------------------------------------------------------
1 | function overlaps = bbox_overlap(boxes1,boxes2)
2 | % Copyright (C) 2016 Hakan Bilen.
3 | % All rights reserved.
4 | %
5 | % This file is part of the VLFeat library and is made available under
6 | % the terms of the BSD license (see the COPYING file).
7 | x11 = boxes1(:,1);
8 | y11 = boxes1(:,2);
9 | x12 = boxes1(:,3);
10 | y12 = boxes1(:,4);
11 |
12 | x21 = boxes2(:,1);
13 | y21 = boxes2(:,2);
14 | x22 = boxes2(:,3);
15 | y22 = boxes2(:,4);
16 |
17 | N1 = size(boxes1,1);
18 | N2 = size(boxes2,1);
19 |
20 | area1 = (x12-x11+1) .* (y12-y11+1);
21 | area2 = (x22-x21+1) .* (y22-y21+1);
22 |
23 | overlaps = zeros(N1,N2);
24 |
25 | for i=1:N1
26 |
27 | xx1 = max(x11(i), x21);
28 | yy1 = max(y11(i), y21);
29 | xx2 = min(x12(i), x22);
30 | yy2 = min(y12(i), y22);
31 |
32 | w = max(0.0, xx2-xx1+1);
33 | h = max(0.0, yy2-yy1+1);
34 |
35 | inter = w.*h;
36 | overlaps(i,:) = inter ./ (area1(i) + area2 - inter);
37 | end
38 |
39 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_remove_duplicates.m:
--------------------------------------------------------------------------------
1 | function bboxeso = bbox_remove_duplicates(bboxes, minSize, maxNum)
2 | % Copyright (C) 2016 Hakan Bilen.
3 | % All rights reserved.
4 | %
5 | % This file is part of the VLFeat library and is made available under
6 | % the terms of the BSD license (see the COPYING file).
7 | bboxeso = cell(size(bboxes));
8 | for i=1:numel(bboxes)
9 | bbox = bboxes{i};
10 | % remove small bbox
11 | isGood = (bbox(:,3)>=bbox(:,1)-1+minSize) & (bbox(:,4)>=bbox(:,2)-1+minSize);
12 | bbox = bbox(isGood,:);
13 | % remove duplicate ones
14 | [dummy, uniqueIdx] = unique(bbox, 'rows', 'first');
15 | uniqueIdx = sort(uniqueIdx);
16 | bbox = bbox(uniqueIdx,:);
17 | % limit number for training
18 | nB = min(size(bbox,1),maxNum);
19 |
20 | bboxeso{i} = bbox(1:nB,:);
21 | end
22 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_scale.m:
--------------------------------------------------------------------------------
1 | function boxOut = bbox_scale2(boxIn,scale,szOut)
2 | % Copyright (C) 2016 Hakan Bilen.
3 | % All rights reserved.
4 | %
5 | % This file is part of the VLFeat library and is made available under
6 | % the terms of the BSD license (see the COPYING file).
7 |
8 | if isempty(boxIn), boxOut = []; return; end
9 |
10 | boxOut = scale * (boxIn-1) + 1;
11 |
12 | boxOut = [max(1,round(boxOut(:,1))),...
13 | max(1,round(boxOut(:,2))),...
14 | min(szOut(1),round(boxOut(:,3))),...
15 | min(szOut(2),round(boxOut(:,4)))];
16 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_transform.m:
--------------------------------------------------------------------------------
1 | function targets = bbox_transform(ex_rois, gt_rois)
2 | % Copyright (C) 2016 Hakan Bilen.
3 | % All rights reserved.
4 | %
5 | % This file is part of the VLFeat library and is made available under
6 | % the terms of the BSD license (see the COPYING file).
7 |
8 | ex_widths = ex_rois(:, 3) - ex_rois(:, 1) + 1.0 ;
9 | ex_heights = ex_rois(:, 4) - ex_rois(:, 2) + 1.0 ;
10 | ex_ctr_x = ex_rois(:, 1) + 0.5 * ex_widths ;
11 | ex_ctr_y = ex_rois(:, 2) + 0.5 * ex_heights ;
12 |
13 | gt_widths = gt_rois(:, 3) - gt_rois(:, 1) + 1.0 ;
14 | gt_heights = gt_rois(:, 4) - gt_rois(:, 2) + 1.0 ;
15 | gt_ctr_x = gt_rois(:, 1) + 0.5 * gt_widths ;
16 | gt_ctr_y = gt_rois(:, 2) + 0.5 * gt_heights ;
17 |
18 | targets_dx = (gt_ctr_x - ex_ctr_x) ./ ex_widths ;
19 | targets_dy = (gt_ctr_y - ex_ctr_y) ./ ex_heights ;
20 | targets_dw = log(gt_widths ./ ex_widths) ;
21 | targets_dh = log(gt_heights ./ ex_heights) ;
22 |
23 | targets = [targets_dx, targets_dy, targets_dw, targets_dh] ;
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/fast_rcnn/bbox_functions/bbox_transform_inv.m:
--------------------------------------------------------------------------------
1 | function pred_boxes = bbox_transform_inv(boxes, deltas)
2 | % Copyright (C) 2016 Hakan Bilen.
3 | % All rights reserved.
4 | %
5 | % This file is part of the VLFeat library and is made available under
6 | % the terms of the BSD license (see the COPYING file).
7 |
8 | if isempty(boxes), return; end
9 |
10 | widths = boxes(:,3) - boxes(:,1);
11 | heights = boxes(:,4) - boxes(:,2);
12 | ctr_x = boxes(:,1) + 0.5 * widths;
13 | ctr_y = boxes(:,2) + 0.5 * heights;
14 |
15 | dx = deltas(:,1);
16 | dy = deltas(:,2);
17 | dw = deltas(:,3);
18 | dh = deltas(:,4);
19 |
20 | pred_ctr_x = dx .* widths + ctr_x;
21 | pred_ctr_y = dy .* heights + ctr_y;
22 | pred_w = exp(dw) .* widths;
23 | pred_h = exp(dh) .* heights;
24 |
25 | pred_boxes = zeros(size(deltas), 'like', deltas);
26 | % x1
27 | pred_boxes(:, 1) = pred_ctr_x - 0.5 * pred_w;
28 | % y1
29 | pred_boxes(:, 2) = pred_ctr_y - 0.5 * pred_h;
30 | % x2
31 | pred_boxes(:, 3) = pred_ctr_x + 0.5 * pred_w;
32 | % y2
33 | pred_boxes(:, 4) = pred_ctr_y + 0.5 * pred_h;
34 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_120view.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_120view.mat
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_60view.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_60view.mat
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_90view.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/framing-u-net/data/imdb_90view.mat
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/findLastCheckpoint.m:
--------------------------------------------------------------------------------
1 | function epoch = findLastCheckpoint(modelDir)
2 | % -------------------------------------------------------------------------
3 | list = dir(fullfile(modelDir, 'net-epoch-*.mat')) ;
4 | tokens = regexp({list.name}, 'net-epoch-([\d]+).mat', 'tokens') ;
5 | epoch = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ;
6 | epoch = max([epoch 0]) ;
7 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/getBatchPatchVal.m:
--------------------------------------------------------------------------------
1 |
2 | % --------------------------------------------------------------------
3 | function [dst, nsmp] = getBatchPatchVal(src, opts)
4 | % --------------------------------------------------------------------
5 | nsz = opts.imageSize;
6 | patch = opts.inputSize;
7 | ker = opts.kernalSize;
8 |
9 | nsz(1) = nsz(1) + 2*ker(1);
10 | nsz(2) = nsz(2) + 2*ker(2);
11 |
12 | niy = ceil(nsz(1)/patch(1) + nsz(1)/patch(1)*2*ker(1)/patch(1));
13 | nix = ceil(nsz(2)/patch(2) + nsz(2)/patch(2)*2*ker(2)/patch(2));
14 | nsmp = niy*nix;
15 |
16 | %%
17 | iy_set = fix(linspace(1, nsz(1) - patch(1) + 1, niy));
18 | ix_set = fix(linspace(1, nsz(2) - patch(2) + 1, nix));
19 |
20 | by = (1:patch(1)) - 1;
21 | bx = (1:patch(2)) - 1;
22 |
23 | %%
24 | src = padarray(src, [ker(1), ker(2)], 'both', 'symmetric');
25 | dst = zeros(patch(1), patch(2), size(src, 3), nsmp, 'like', src);
26 |
27 | for ix = 1:nix
28 | for iy = 1:niy
29 | iv = niy*(ix - 1) + iy;
30 | dst(:,:,:,iv) = src(iy_set(iy) + by,ix_set(ix) + bx,:);
31 | end
32 | end
33 |
34 | end
35 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/getFOV.m:
--------------------------------------------------------------------------------
1 | function fov = getFOV(param, wgt)
2 |
3 | if nargin < 2
4 | wgt = 1;
5 | end
6 |
7 | szX_h = param.dX*param.nX/2;
8 | szY_h = param.dY*param.nY/2;
9 |
10 | radius = fix(param.DSO*tan(param.dStepDctX*param.nNumDctX/2 - abs(param.dOffsetX)))*wgt;
11 |
12 | [rr, cc] = meshgrid(linspace(-szX_h, szX_h, param.nX), linspace(-szY_h, szY_h, param.nY));
13 |
14 | fov = sqrt(rr.^2 + cc.^2) < radius;
15 |
16 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/getReconPatchVal.m:
--------------------------------------------------------------------------------
1 |
2 | % --------------------------------------------------------------------
3 | function [dst, wgt] = getReconPatchVal(src, opts)
4 | % --------------------------------------------------------------------
5 | nsz = opts.imageSize;
6 | patch = opts.inputSize;
7 | ker = opts.kernalSize;
8 |
9 | nsz(1) = nsz(1) + 2*ker(1);
10 | nsz(2) = nsz(2) + 2*ker(2);
11 |
12 | niy = ceil(nsz(1)/patch(1) + nsz(1)/patch(1)*2*ker(1)/patch(1));
13 | nix = ceil(nsz(2)/patch(2) + nsz(2)/patch(2)*2*ker(2)/patch(2));
14 | nsmp = niy*nix;
15 |
16 | %%
17 | iy_set = fix(linspace(1, nsz(1) - patch(1) + 1, niy));
18 | ix_set = fix(linspace(1, nsz(2) - patch(2) + 1, nix));
19 |
20 | % by = (1:patch(1)) - 1;
21 | % bx = (1:patch(2)) - 1;
22 |
23 | by = (ker(1)+1:patch(1)-ker(1)) - 1;
24 | bx = (ker(2)+1:patch(2)-ker(2)) - 1;
25 |
26 | %%
27 | dst = zeros(nsz(1), nsz(2), nsz(3), 'like', src);
28 | wgt = zeros(nsz(1), nsz(2), nsz(3), 'like', src);
29 |
30 | for ix = 1:nix
31 | for iy = 1:niy
32 | iv = niy*(ix - 1) + iy ;
33 |
34 | dst(iy_set(iy) + by,ix_set(ix) + bx,:) = dst(iy_set(iy) + by,ix_set(ix) + bx,:) + src(by + 1,bx + 1,:,iv);
35 | wgt(iy_set(iy) + by,ix_set(ix) + bx,:) = wgt(iy_set(iy) + by,ix_set(ix) + bx,:) + 1 ;
36 |
37 | end
38 | end
39 |
40 | dst = dst(ker(1)+1:end-ker(1), ker(2)+1:end-ker(2), :);
41 | wgt = wgt(ker(1)+1:end-ker(1), ker(2)+1:end-ker(2), :);
42 |
43 | dst = dst./wgt;
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/install.m:
--------------------------------------------------------------------------------
1 | %% Step 1. Copy the customized library
2 | copyfile('./matlab', '../../matlab');
3 |
4 | %% Step 2. Download the trained network
5 | % standard cnn
6 | network_path = './network/cnn_sparse_view_dagnn_single_init_residual_input256_view60120240/';
7 | network_name = [network_path 'net-epoch-150.mat'];
8 | network_url = 'https://www.dropbox.com/s/3yt24xpng0jn5z1/net-epoch-150.mat?dl=1';
9 |
10 | mkdir(network_path);
11 | fprintf('downloading standard cnn from %s\n', network_url) ;
12 | websave(network_name, network_url);
13 |
14 | % u-net
15 | network_path = './network/cnn_sparse_view_dagnn_multi_init_residual_input256_view60120240/';
16 | network_name = [network_path 'net-epoch-150.mat'];
17 | network_url = 'https://www.dropbox.com/s/nap5nualurgdo29/net-epoch-150.mat?dl=1';
18 |
19 | mkdir(network_path);
20 | fprintf('downloading u-net from %s\n', network_url) ;
21 | websave(network_name, network_url);
22 |
23 | % tight-frame u-net
24 | network_path = './network/cnn_sparse_view_dagnn_tight_frame_init_residual_input256_view60120240/';
25 | network_name = [network_path 'net-epoch-150.mat'];
26 | network_url = 'https://www.dropbox.com/s/uhzuf3694v124lc/net-epoch-150.mat?dl=1';
27 |
28 | mkdir(network_path);
29 | fprintf('downloading tight-frame u-net from %s\n', network_url) ;
30 | websave(network_name, network_url);
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/loadState.m:
--------------------------------------------------------------------------------
1 | function [net, state, stats] = loadState(fileName)
2 | % -------------------------------------------------------------------------
3 | load(fileName, 'net', 'state', 'stats') ;
4 | net = dagnn.DagNN.loadobj(net) ;
5 | if isempty(whos('stats'))
6 | error('Epoch ''%s'' was only partially saved. Delete this file and try again.', ...
7 | fileName) ;
8 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/matlab/+dagnn/Error.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef Error < dagnn.Loss
7 | properties
8 | % loss = 'psnr'
9 | % ignoreAverage = true
10 | method = 'image'
11 | end
12 |
13 | methods
14 | function outputs = forward(obj, inputs, params)
15 |
16 | switch numel(inputs)
17 | case 4
18 | inputs{1} = inputs{4} - inputs{1} ;
19 | inputs{2} = inputs{4} - inputs{2} ;
20 | end
21 |
22 |
23 | inputs{1} = bsxfun(@plus, inputs{1}, inputs{3});
24 | inputs{2} = bsxfun(@plus, inputs{2}, inputs{3});
25 |
26 | peakval = max(max(inputs{2},[],1),[],2);
27 |
28 | peakval(peakval == 0) = 1;
29 |
30 | inputs{1} = bsxfun(@times, inputs{1}, 1./peakval);
31 | inputs{2} = bsxfun(@times, inputs{2}, 1./peakval);
32 |
33 | switch obj.loss
34 | case 'psnr'
35 | outputs{1} = psnr(gather(inputs{2}(:)), gather(inputs{1}(:))) ;
36 | case 'mse'
37 | outputs{1} = immse(gather(inputs{2}(:)), gather(inputs{1}(:))) ;
38 | case 'l2'
39 | outputs{1} = norm(gather(inputs{2}(:)) - gather(inputs{1}(:))) ;
40 | otherwise
41 | error('Invalid number of inputs');
42 | end
43 | obj.accumulateAverage(inputs, outputs);
44 | end
45 |
46 | function accumulateAverage(obj, inputs, outputs)
47 | if obj.ignoreAverage, return; end;
48 | n = obj.numAveraged ;
49 | m = n + size(inputs{1}, 4);
50 | obj.average = bsxfun(@plus, n * obj.average, gather(outputs{1}*size(inputs{1}, 4))) / m ;
51 | obj.numAveraged = m ;
52 | end
53 |
54 | function obj = Error(varargin)
55 | obj.load(varargin) ;
56 | obj.method = obj.method;
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/matlab/+dagnn/EuclideanLoss.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef EuclideanLoss < dagnn.Loss
7 | properties
8 | p = 2;
9 | aggregate = true;
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | switch numel(inputs)
15 | case 2
16 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
17 | 'aggregate', true, 'noRoot', true, obj.opts{:}) ;
18 | case 3
19 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
20 | 'aggregate', true, 'noRoot', true, 'instanceWeights', inputs{3}, ...
21 | obj.opts{:}) ;
22 | otherwise
23 | error('Invalid number of inputs');
24 | end
25 | obj.accumulateAverage(inputs, outputs);
26 | end
27 |
28 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
29 | derInputs = cell(1, numel(inputs));
30 | switch numel(inputs)
31 | case 2
32 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
33 | obj.p, derOutputs{1}, 'aggregate', false, 'noRoot', true, obj.opts{:}) ;
34 | case 3
35 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
36 | obj.p, derOutputs{1}, 'aggregate', false, 'noRoot', true, ...
37 | 'instanceWeights', inputs{3}, obj.opts{:}) ;
38 | otherwise
39 | error('Invalid number of inputs');
40 | end
41 | derParams = {} ;
42 |
43 | derInputs{1} = derInputs{1}./(size(derInputs{1}, 1) * size(derInputs{1}, 2) * size(derInputs{1}, 3));
44 | derInputs{2} = derInputs{2}./(size(derInputs{2}, 1) * size(derInputs{2}, 2) * size(derInputs{2}, 3));
45 | end
46 |
47 | function obj = EuclideanLoss(varargin)
48 | obj.load(varargin) ;
49 | obj.loss = 'pdist';
50 | end
51 | end
52 | end
53 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/matlab/+dagnn/UnPooling.m:
--------------------------------------------------------------------------------
1 | classdef UnPooling < dagnn.Filter
2 | properties
3 | method = 'avg'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(repmat(inputs{1}, self.poolSize), self.poolSize, inputs{1}, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(derOutputs{1}, self.poolSize, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = UnPooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/matlab/vl_euclideanloss.m:
--------------------------------------------------------------------------------
1 | function y = vl_euclideanloss(x, c, dzdy)
2 | %EUCLIDEANLOSS Summary of this function goes here
3 | % Detailed explanation goes here
4 |
5 | assert(numel(x) == numel(c));
6 |
7 | d = size(x);
8 |
9 | assert(all(d == size(c)));
10 |
11 | if nargin == 2 || (nargin == 3 && isempty(dzdy))
12 |
13 | y = 1 / 2 / prod(d) * sum(subsref((x - c) .^ 2, substruct('()', {':'}))); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m.
14 | % Y = 1 / (2 * prod(d(1 : 3))) * sum(subsref((X - c) .^ 2, substruct('()', {':'}))); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate.
15 |
16 | elseif nargin == 3 && ~isempty(dzdy)
17 |
18 | assert(numel(dzdy) == 1);
19 |
20 | y = dzdy / prod(d) * (x - c); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m.
21 | % Y = dzdy / prod(d(1 : 3)) * (X - c); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate.
22 |
23 | end
24 |
25 | end
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/nmse.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/examples/framing-u-net/nmse.m
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/framing-u-net/recon_cnn4img.m:
--------------------------------------------------------------------------------
1 | function rec = recon_cnn4img(net, data, opts)
2 |
3 | if ~isempty(opts.gpus)
4 | net.move('gpu');
5 | net.vars(opts.vid).precious = true ;
6 | end
7 |
8 | rec = zeros(opts.size, 'single');
9 | data = data + opts.offset;
10 | set = opts.set;
11 |
12 |
13 | for ival = 1:1:length(set)
14 |
15 | iz = set(ival);
16 |
17 | disp([num2str(iz) ' / ' num2str(set(end))]);
18 |
19 | data_ = single(squeeze(data(:,:,iz)));
20 |
21 | data_patch = getBatchPatchVal(data_, opts);
22 |
23 | if opts.meanNorm
24 | means_patch_ = mean(mean(mean(data_patch, 1), 2), 3);
25 | else
26 | means_patch_ = 0;
27 | end
28 |
29 | data_patch = bsxfun(@minus, data_patch, means_patch_);
30 |
31 | if opts.varNorm
32 | vars_patch = max(max(max(abs(data_patch), [], 1), [], 2), [], 3);
33 | else
34 | vars_patch = 1;
35 | end
36 |
37 | data_patch = bsxfun(@times, opts.wgt*data_patch, 1./vars_patch);
38 |
39 | %%
40 | nbatch = size(data_patch, 4);
41 | batch_ = (1:opts.batchSize) - 1;
42 |
43 | rec_batch = single([]);
44 |
45 | for ibatch = 1:opts.batchSize:nbatch
46 | batch = ibatch + batch_;
47 | batch(batch > nbatch) = [];
48 |
49 | data_batch = data_patch(:,:,:,batch);
50 |
51 | if ~isempty(opts.gpus)
52 | data_batch = gpuArray(data_batch);
53 | end
54 |
55 | net.eval({'input',data_batch}) ;
56 | rec_batch_ = net.vars(opts.vid).value;
57 |
58 | if strcmp(opts.method, 'residual')
59 | rec_batch_ = data_batch - rec_batch_;
60 | end
61 |
62 | rec_batch(:,:,:,batch) = gather(rec_batch_);
63 | end
64 |
65 | rec_batch = bsxfun(@times, rec_batch/opts.wgt, vars_patch);
66 |
67 | rec_ = getReconPatchVal(rec_batch, opts);
68 | rec(:,:,ival) = bsxfun(@plus, rec_, means_patch_);
69 |
70 | end
71 |
72 | rec = max(rec - opts.offset, 0);
73 |
74 | net.reset();
75 | net.move('cpu');
76 |
77 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/cnn_imagenet_camdemo.m:
--------------------------------------------------------------------------------
1 | function cnn_imagenet_camdemo()
2 | %CNN_IMAGENET_CAMDEMO Realtime classification with a webcam
3 | % Download the pre-trained models from
4 | % http://www.vlfeat.org/matconvnet/pretrained/
5 | %
6 | % Download MATLAB's webcam support from:
7 | % http://mathworks.com/hardware-support/matlab-webcam.html
8 |
9 | run(fullfile(fileparts(mfilename('fullpath')), ...
10 | '..', '..', 'matlab', 'vl_setupnn.m')) ;
11 |
12 | cam = webcam(1) ;
13 | run matlab/vl_setupnn ;
14 | model = 'imagenet-googlenet-dag' ;
15 | %model = 'imagenet-vgg-m' ;
16 | %model = 'imagenet-vgg-f' ;
17 | net = load(fullfile(vl_rootnn, 'data', 'models', sprintf('%s.mat', model))) ;
18 |
19 | if strcmp(model, 'imagenet-googlenet-dag')
20 | net = dagnn.DagNN.loadobj(net) ;
21 | out = net.getVarIndex('prob') ;
22 | dag = true ;
23 | else
24 | dag = false ;
25 | end
26 |
27 | scoress = zeros(1000,1) ;
28 | momentum = .5 ;
29 |
30 | while true
31 | % obtain and preprocess an image
32 | im = snapshot(cam) ;
33 | d = size(im,1)-size(im,2) ;
34 | dy = floor(max(d,0)/2) ;
35 | dx = floor(max(-d,0)/2) ;
36 | im = im(dy+1:end-dy, dx+1:end-dx, :) ; % center crop
37 | im_ = single(im) ; % note: 255 range
38 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2), 'bilinear') ;
39 | im_ = im_ - net.meta.normalization.averageImage ;
40 |
41 | % run the CNN
42 | if dag
43 | net.eval({'data',im_}) ;
44 | scores = squeeze(gather(net.vars(out).value)) ;
45 | else
46 | res = vl_simplenn(net, im_) ;
47 | scores = squeeze(gather(res(end).x)) ;
48 | end
49 |
50 | % smooth scores and pick the best
51 | scoress = momentum*scoress + (1-momentum)*scores ;
52 | [bestScore, best] = max(scoress) ;
53 |
54 | % visualize
55 | figure(1) ; clf ; imagesc(im) ;
56 | title(sprintf('%s, score %.3f',...
57 | strtok(net.meta.classes.description{best},','), bestScore), ...
58 | 'FontSize', 30) ;
59 | axis equal off ;
60 | drawnow ;
61 | end
62 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/cnn_imagenet_googlenet.m:
--------------------------------------------------------------------------------
1 | function cnn_imagenet_googlenet()
2 | %CNN_IMAGENET_GOOGLENET Demonstrates how to use GoogLeNet
3 |
4 | run matlab/vl_setupnn
5 | modelPath = 'data/models/imagenet-googlenet-dag.mat' ;
6 |
7 | if ~exist(modelPath)
8 | mkdir(fileparts(modelPath)) ;
9 | urlwrite(...
10 | 'http://www.vlfeat.org/matconvnet/models/imagenet-googlenet-dag.mat', ...
11 | modelPath) ;
12 | end
13 |
14 | net = dagnn.DagNN.loadobj(load(modelPath)) ;
15 |
16 | im = imread('peppers.png') ;
17 | im_ = single(im) ; % note: 255 range
18 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;
19 | im_ = im_ - net.meta.normalization.averageImage ;
20 | net.eval({'data', im_}) ;
21 |
22 | % show the classification result
23 | scores = squeeze(gather(net.vars(end).value)) ;
24 | [bestScore, best] = max(scores) ;
25 | figure(1) ; clf ; imagesc(im) ;
26 | title(sprintf('%s (%d), score %.3f',...
27 | net.meta.classes.description{best}, best, bestScore)) ;
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/cnn_imagenet_minimal.m:
--------------------------------------------------------------------------------
1 | function cnn_imagenet_minimal()
2 | % CNN_IMAGENET_MINIMAL Minimalistic demonstration of how to run an ImageNet CNN model
3 |
4 | % Setup MatConvNet.
5 | run(fullfile(fileparts(mfilename('fullpath')), ...
6 | '..', '..', 'matlab', 'vl_setupnn.m')) ;
7 |
8 | % Download a pre-trained CNN from the web.
9 | if ~exist('imagenet-vgg-f.mat', 'file')
10 | fprintf('Downloading the VGG-F model ... this may take a while\n') ;
11 | urlwrite('http://www.vlfeat.org/matconvnet/models/imagenet-vgg-f.mat', ...
12 | 'imagenet-vgg-f.mat') ;
13 | end
14 |
15 | % Load the model and upgrade it to MatConvNet current version.
16 | net = load('imagenet-vgg-f.mat') ;
17 | net = vl_simplenn_tidy(net) ;
18 |
19 | % Obtain and preprocess an image.
20 | im = imread('peppers.png') ;
21 | im_ = single(im) ; % note: 255 range
22 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;
23 | im_ = im_ - net.meta.normalization.averageImage ;
24 |
25 | % Run the CNN.
26 | res = vl_simplenn(net, im_) ;
27 |
28 | % Show the classification result.
29 | scores = squeeze(gather(res(end).x)) ;
30 | [bestScore, best] = max(scores) ;
31 | figure(1) ; clf ; imagesc(im) ;
32 | title(sprintf('%s (%d), score %.3f',...
33 | net.meta.classes.description{best}, best, bestScore)) ;
34 |
35 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/cnn_imagenet_sync_labels.m:
--------------------------------------------------------------------------------
1 | function imdb = cnn_imagenet_sync_labels(imdb, net)
2 | % CNN_IMAGENET_SYNC_LABELS Match CNN and database labels
3 | % A CNN NET and the image database IMDB may use a different label ordering.
4 | % This function matches classes by name and reorder the labels
5 | % in IMDB to match NET.
6 |
7 | [~,perm] = ismember(imdb.classes.name, net.meta.classes.name);
8 | assert(all(perm ~= 0));
9 |
10 | imdb.classes.description = imdb.classes.description(perm) ;
11 | imdb.classes.name = imdb.classes.name(perm) ;
12 | ok = imdb.images.label > 0 ;
13 | iperm(perm) = 1:numel(perm) ;
14 | imdb.images.label(ok) = perm(imdb.images.label(ok)) ;
15 |
16 |
17 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/getImageBatch.m:
--------------------------------------------------------------------------------
1 | function data = getImageBatch(imagePaths, varargin)
2 | % GETIMAGEBATCH Load and jitter a batch of images
3 |
4 | opts.useGpu = false ;
5 | opts.prefetch = false ;
6 | opts.numThreads = 1 ;
7 |
8 | opts.imageSize = [227, 227] ;
9 | opts.cropSize = 227 / 256 ;
10 | opts.keepAspect = true ;
11 | opts.subtractAverage = [] ;
12 |
13 | opts.jitterFlip = false ;
14 | opts.jitterLocation = false ;
15 | opts.jitterAspect = 1 ;
16 | opts.jitterScale = 1 ;
17 | opts.jitterBrightness = 0 ;
18 | opts.jitterContrast = 0 ;
19 | opts.jitterSaturation = 0 ;
20 |
21 | opts = vl_argparse(opts, varargin);
22 |
23 | args{1} = {imagePaths, ...
24 | 'NumThreads', opts.numThreads, ...
25 | 'Pack', ...
26 | 'Interpolation', 'bicubic', ...
27 | 'Resize', opts.imageSize(1:2), ...
28 | 'CropSize', opts.cropSize * opts.jitterScale, ...
29 | 'CropAnisotropy', opts.jitterAspect, ...
30 | 'Brightness', opts.jitterBrightness, ...
31 | 'Contrast', opts.jitterContrast, ...
32 | 'Saturation', opts.jitterSaturation} ;
33 |
34 | if ~opts.keepAspect
35 | % Squashign effect
36 | args{end+1} = {'CropAnisotropy', 0} ;
37 | end
38 |
39 | if opts.jitterFlip
40 | args{end+1} = {'Flip'} ;
41 | end
42 |
43 | if opts.jitterLocation
44 | args{end+1} = {'CropLocation', 'random'} ;
45 | else
46 | args{end+1} = {'CropLocation', 'center'} ;
47 | end
48 |
49 | if opts.useGpu
50 | args{end+1} = {'Gpu'} ;
51 | end
52 |
53 | if ~isempty(opts.subtractAverage)
54 | args{end+1} = {'SubtractAverage', opts.subtractAverage} ;
55 | end
56 |
57 | args = horzcat(args{:}) ;
58 |
59 | if opts.prefetch
60 | vl_imreadjpeg(args{:}, 'prefetch') ;
61 | data = [] ;
62 | else
63 | data = vl_imreadjpeg(args{:}) ;
64 | data = data{1} ;
65 | end
66 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/imagenet/getImageStats.m:
--------------------------------------------------------------------------------
1 | function [averageImage, rgbMean, rgbCovariance] = getImageStats(images, varargin)
2 | %GETIMAGESTATS Get image statistics
3 |
4 | opts.gpus = [] ;
5 | opts.batchSize = 256 ;
6 | opts.imageSize = [256 256] ;
7 | opts.numThreads = 6 ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | avg = {} ;
11 | rgbm1 = {} ;
12 | rgbm2 = {} ;
13 |
14 | numGpus = numel(opts.gpus) ;
15 | if numGpus > 0
16 | fprintf('%s: resetting GPU device\n', mfilename) ;
17 | clear mex ;
18 | gpuDevice(opts.gpus(1))
19 | end
20 |
21 | for t=1:opts.batchSize:numel(images)
22 | time = tic ;
23 | batch = t : min(t+opts.batchSize-1, numel(images)) ;
24 | fprintf('collecting image stats: batch starting with image %d ...', batch(1)) ;
25 |
26 | data = getImageBatch(images(batch), ...
27 | 'numThreads', opts.numThreads, ...
28 | 'imageSize', opts.imageSize, ...
29 | 'useGpu', numGpus > 0) ;
30 |
31 | z = reshape(shiftdim(data,2),3,[]) ;
32 | rgbm1{end+1} = mean(z,2) ;
33 | rgbm2{end+1} = z*z'/size(z,2) ;
34 | avg{end+1} = mean(data, 4) ;
35 | time = toc(time) ;
36 | fprintf(' %.1f Hz\n', numel(batch) / time) ;
37 | end
38 |
39 | averageImage = gather(mean(cat(4,avg{:}),4)) ;
40 | rgbm1 = gather(mean(cat(2,rgbm1{:}),2)) ;
41 | rgbm2 = gather(mean(cat(3,rgbm2{:}),3)) ;
42 | rgbMean = rgbm1 ;
43 | rgbCovariance = rgbm2 - rgbm1*rgbm1' ;
44 |
45 | if numGpus > 0
46 | fprintf('%s: finished with GPU device, resetting again\n', mfilename) ;
47 | clear mex ;
48 | gpuDevice(opts.gpus(1)) ;
49 | end
50 | fprintf('%s: all done\n', mfilename) ;
51 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/mnist/cnn_mnist_experiments.m:
--------------------------------------------------------------------------------
1 | %% Experiment with the cnn_mnist_fc_bnorm
2 |
3 | [net_bn, info_bn] = cnn_mnist(...
4 | 'expDir', 'data/mnist-bnorm', 'batchNormalization', true);
5 |
6 | [net_fc, info_fc] = cnn_mnist(...
7 | 'expDir', 'data/mnist-baseline', 'batchNormalization', false);
8 |
9 | figure(1) ; clf ;
10 | subplot(1,2,1) ;
11 | semilogy([info_fc.val.objective]', 'o-') ; hold all ;
12 | semilogy([info_bn.val.objective]', '+--') ;
13 | xlabel('Training samples [x 10^3]'); ylabel('energy') ;
14 | grid on ;
15 | h=legend('BSLN', 'BNORM') ;
16 | set(h,'color','none');
17 | title('objective') ;
18 | subplot(1,2,2) ;
19 | plot([info_fc.val.top1err]', 'o-') ; hold all ;
20 | plot([info_fc.val.top5err]', '*-') ;
21 | plot([info_bn.val.top1err]', '+--') ;
22 | plot([info_bn.val.top5err]', 'x--') ;
23 | h=legend('BSLN-val','BSLN-val-5','BNORM-val','BNORM-val-5') ;
24 | grid on ;
25 | xlabel('Training samples [x 10^3]'); ylabel('error') ;
26 | set(h,'color','none') ;
27 | title('error') ;
28 | drawnow ;
29 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/spatial_transformer/readme.txt:
--------------------------------------------------------------------------------
1 | Example scripts to train a spatial transformer network [1]
2 | for cluttered MNIST dataset.
3 |
4 | Demonstrates how to initialize and train the network.
5 |
6 | References:
7 | -----------
8 | 1. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman
9 | Spatial transformer networks
10 | Advances in Neural Information Processing Systems, 2015
11 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/examples/vggfaces/cnn_vgg_faces.m:
--------------------------------------------------------------------------------
1 | function cnn_vgg_faces()
2 | %CNN_VGG_FACES Demonstrates how to use VGG-Face
3 |
4 | % Setup MatConvNet.
5 | run(fullfile(fileparts(mfilename('fullpath')), ...
6 | '..', '..', 'matlab', 'vl_setupnn.m')) ;
7 |
8 | % Load the VGG-Face model.
9 | modelPath = fullfile(vl_rootnn,'data','models','vgg-face.mat') ;
10 | if ~exist(modelPath)
11 | fprintf('Downloading the VGG-Face model ... this may take a while\n') ;
12 | mkdir(fileparts(modelPath)) ;
13 | urlwrite(...
14 | 'http://www.vlfeat.org/matconvnet/models/vgg-face.mat', ...
15 | modelPath) ;
16 | end
17 |
18 | % Load the model and upgrade it to MatConvNet current version.
19 | net = load('data/models/vgg-face.mat') ;
20 | net = vl_simplenn_tidy(net) ;
21 |
22 | % Load a test image from Wikipedia and run the model.
23 | im = imread('https://upload.wikimedia.org/wikipedia/commons/4/4a/Aamir_Khan_March_2015.jpg') ;
24 | im = im(1:250,:,:) ; % crop
25 | im_ = single(im) ; % note: 255 range
26 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;
27 | im_ = bsxfun(@minus,im_,net.meta.normalization.averageImage) ;
28 | res = vl_simplenn(net, im_) ;
29 |
30 | % Show the classification result.
31 | scores = squeeze(gather(res(end).x)) ;
32 | [bestScore, best] = max(scores) ;
33 | figure(1) ; clf ; imagesc(im) ; axis equal off ;
34 | title(sprintf('%s (%d), score %.3f',...
35 | net.meta.classes.description{best}, best, bestScore), ...
36 | 'Interpreter', 'none') ;
37 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matconvnet.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 14
4 | VisualStudioVersion = 14.0.24720.0
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "matconvnet", "matconvnet.vcxproj", "{B0BD9132-1D90-4267-A07A-B44DE497A9C7}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Win32 = Debug|Win32
11 | Debug|x64 = Debug|x64
12 | Release|Win32 = Release|Win32
13 | Release|x64 = Release|x64
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.ActiveCfg = Debug|Win32
17 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.Build.0 = Debug|Win32
18 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|x64.ActiveCfg = Debug|Win32
19 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.ActiveCfg = Release|Win32
20 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.Build.0 = Release|Win32
21 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|x64.ActiveCfg = Release|Win32
22 | EndGlobalSection
23 | GlobalSection(SolutionProperties) = preSolution
24 | HideSolutionNode = FALSE
25 | EndGlobalSection
26 | EndGlobal
27 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matconvnet.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/addLayer.m:
--------------------------------------------------------------------------------
1 | function addLayer(obj, name, block, inputs, outputs, params, varargin)
2 | %ADDLAYER Adds a layer to a DagNN
3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the
4 | % specified layer to the network. NAME is a string with the layer
5 | % name, used as a unique indentifier. BLOCK is the object
6 | % implementing the layer, which should be a subclass of the
7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and
8 | % PARAMS of parameter names.
9 | %
10 | % See Also REMOVELAYER().
11 | opts.skipRebuild = false;
12 | opts = vl_argparse(opts, varargin);
13 |
14 | index = find(strcmp(name, {obj.layers.name})) ;
15 | if ~isempty(index), error('There is already a layer with name ''%s''.', name), end
16 | index = numel(obj.layers) + 1 ;
17 |
18 | if nargin < 6, params = {} ; end
19 | if ischar(inputs), inputs = {inputs} ; end
20 | if ischar(outputs), outputs = {outputs} ; end
21 | if ischar(params), params = {params} ; end
22 |
23 | obj.layers(index) = struct(...
24 | 'name', {name}, ...
25 | 'inputs', {inputs}, ...
26 | 'outputs', {outputs}, ...
27 | 'params', {params}, ...
28 | 'inputIndexes', {[]}, ...
29 | 'outputIndexes', {[]}, ...
30 | 'paramIndexes', {[]}, ...
31 | 'forwardTime', {[]}, ...
32 | 'backwardTime', {[]}, ...
33 | 'block', {block}) ;
34 | obj.layers(index).block.attach(obj, index) ;
35 | if ~opts.skipRebuild, obj.rebuild() ; end ;
36 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/getVarSizes.m:
--------------------------------------------------------------------------------
1 | function sizes = getVarSizes(obj, inputSizes)
2 | %GETVARSIZES Get the size of the variables
3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the
4 | % DagNN variables given the size of the inputs. `inputSizes` is
5 | % a cell array of the type `{'inputName', inputSize, ...}`
6 | % Returns a cell array with sizes of all network variables.
7 | %
8 | % Example, compute the storage needed for a batch size of 256 for an
9 | % imagenet-like network:
10 | % ```
11 | % batch_size = 256; single_num_bytes = 4;
12 | % input_size = [net.meta.normalization.imageSize, batch_size];
13 | % var_sizes = net.getVarSizes({'data', input_size});
14 | % fprintf('Network activations will take %.2fMiB in single.\n', ...
15 | % sum(prod(cell2mat(var_sizes, 1))) * single_num_bytes ./ 1024^3);
16 | % ```
17 |
18 | % Copyright (C) 2015 Andrea Vedaldi, Karel Lenc.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | nv = numel(obj.vars) ;
25 | sizes = num2cell(NaN(nv, 4),2)' ;
26 |
27 | for i = 1:2:numel(inputSizes)
28 | v = obj.getVarIndex(inputSizes{i}) ;
29 | if isnan(v)
30 | error('Variable `%s` not found in the network.', inputSizes{i});
31 | end;
32 | if isempty(inputSizes{i+1})
33 | sizes{v} = [0 0 0 0] ;
34 | else
35 | sizes{v} = [inputSizes{i+1}(:)' ones(1, 4 - numel(inputSizes{i+1}))] ;
36 | end
37 | end
38 |
39 | for layer = obj.layers(obj.executionOrder)
40 | in = layer.inputIndexes ;
41 | out = layer.outputIndexes ;
42 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ;
43 | end
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/initParams.m:
--------------------------------------------------------------------------------
1 | function initParams(obj)
2 | % INITPARAM Initialize the paramers of the DagNN
3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize
4 | % the corresponding parameters (usually randomly).
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | for l = 1:numel(obj.layers)
13 | p = obj.getParamIndex(obj.layers(l).params) ;
14 | params = obj.layers(l).block.initParams() ;
15 | switch obj.device
16 | case 'cpu'
17 | params = cellfun(@gather, params, 'UniformOutput', false) ;
18 | case 'gpu'
19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ;
20 | end
21 | [obj.params(p).value] = deal(params{:}) ;
22 | end
23 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/loadobj.m:
--------------------------------------------------------------------------------
1 | function obj = loadobj(s)
2 | % LOADOBJ Initialize a DagNN object from a structure.
3 | % OBJ = LOADOBJ(S) initializes a DagNN objet from the structure
4 | % S. It is the opposite of S = OBJ.SAVEOBJ().
5 | % If S is a string, initializes the DagNN object with data
6 | % from a mat-file S. Otherwise, if S is an instance of `dagnn.DagNN`,
7 | % returns S.
8 |
9 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | if ischar(s) s = load(s); end
16 | if isstruct(s)
17 | assert(isfield(s, 'layers'), 'Invalid model.');
18 | if ~isstruct(s.layers)
19 | warning('The model appears to be `simplenn` model. Using `fromSimpleNN` instead.');
20 | obj = dagnn.DagNN.fromSimpleNN(s);
21 | return;
22 | end
23 | obj = dagnn.DagNN() ;
24 | for l = 1:numel(s.layers)
25 | constr = str2func(s.layers(l).type) ;
26 | block = constr() ;
27 | block.load(struct(s.layers(l).block)) ;
28 | obj.addLayer(...
29 | s.layers(l).name, ...
30 | block, ...
31 | s.layers(l).inputs, ...
32 | s.layers(l).outputs, ...
33 | s.layers(l).params,...
34 | 'skipRebuild', true) ;
35 | end
36 | obj.rebuild();
37 | if isfield(s, 'params')
38 | for f = setdiff(fieldnames(s.params)','name')
39 | f = char(f) ;
40 | for i = 1:numel(s.params)
41 | p = obj.getParamIndex(s.params(i).name) ;
42 | obj.params(p).(f) = s.params(i).(f) ;
43 | end
44 | end
45 | end
46 | if isfield(s, 'vars')
47 | for f = setdiff(fieldnames(s.vars)','name')
48 | f = char(f) ;
49 | for i = 1:numel(s.vars)
50 | p = obj.getVarIndex(s.vars(i).name) ;
51 | obj.vars(p).(f) = s.vars(i).(f) ;
52 | end
53 | end
54 | end
55 | for f = setdiff(fieldnames(s)', {'vars','params','layers'})
56 | f = char(f) ;
57 | obj.(f) = s.(f) ;
58 | end
59 | elseif isa(s, 'dagnn.DagNN')
60 | obj = s ;
61 | else
62 | error('Unknown data type %s for `loadobj`.', class(s));
63 | end
64 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/move.m:
--------------------------------------------------------------------------------
1 | function move(obj, device)
2 | %MOVE Move the DagNN to either CPU or GPU
3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU.
4 | %
5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU.
6 |
7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | obj.reset() ;
14 | obj.device = device ;
15 | switch device
16 | case 'gpu'
17 | for i=1:numel(obj.params)
18 | obj.params(i).value = gpuArray(obj.params(i).value) ;
19 | end
20 | case 'cpu'
21 | for i=1:numel(obj.params)
22 | obj.params(i).value = gather(obj.params(i).value) ;
23 | end
24 | otherwise
25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ;
26 | end
27 | for l = 1:numel(obj.layers)
28 | obj.layers(l).block.move(device) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/removeLayer.m:
--------------------------------------------------------------------------------
1 | function removeLayer(obj, layerName)
2 | %REMOVELAYER Remove a layer from the network
3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object
4 | % OBJ. NAME can be a string or a cell array of strings.
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | if ischar(layerName), layerName = {layerName}; end;
13 | idxs = obj.getLayerIndex(layerName);
14 | if any(isnan(idxs))
15 | error('Invalid layer name `%s`', ...
16 | strjoin(layerName(isnan(idxs)), ', '));
17 | end
18 | obj.layers(idxs) = [] ;
19 | obj.rebuild() ;
20 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/renameLayer.m:
--------------------------------------------------------------------------------
1 | function renameLayer(obj, oldName, newName, varargin)
2 | %RENAMELAYER Rename a layer
3 | % RENAMELAYER(OLDNAME, NEWNAME) changes the name of the layer
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing layer.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the layer to rename
11 | v = obj.getLayerIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such layer, nothing to do
14 | if ~opts.quiet
15 | warning('There is no layer ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing layer
21 | newNameExists = any(strcmp(newName, {obj.layers.name})) ;
22 | if newNameExists
23 | error('There is already a layer ''%s''.', newName) ;
24 | end
25 |
26 | % Replace oldName with newName in all the layers
27 | obj.layers(v).name = newName ;
28 | obj.rebuild() ;
29 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/renameParam.m:
--------------------------------------------------------------------------------
1 | function renameParam(obj, oldName, newName, varargin)
2 | %RENAMELAYER Rename a parameter
3 | % RENAMEPARAM(OLDNAME, NEWNAME) changes the name of the parameter
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing parameter.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the param to rename
11 | v = obj.getParamIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such param, nothing to do
14 | if ~opts.quiet
15 | warning('There is no parameter ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing param
21 | newNameExists = any(strcmp(newName, {obj.params.name})) ;
22 | if newNameExists
23 | error('There is already a layer ''%s''.', newName) ;
24 | end
25 |
26 | % Replace oldName with newName in all the layers
27 | for l = 1:numel(obj.layers)
28 | sel = find(strcmp(oldName, obj.layers(l).params));
29 | [obj.layers(l).params{sel}] = deal(newName) ;
30 | end
31 |
32 | if ~newNameExists
33 | obj.params(v).name = newName ;
34 | obj.paramNames.(newName) = v ;
35 | end
36 |
37 | obj.rebuild() ;
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/renameVar.m:
--------------------------------------------------------------------------------
1 | function renameVar(obj, oldName, newName, varargin)
2 | %RENAMEVAR Rename a variable
3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing variable.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the variable to rename
11 | v = obj.getVarIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such a variable, nothing to do
14 | if ~opts.quiet
15 | warning('There is no variable ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing variable
21 | newNameExists = any(strcmp(newName, {obj.vars.name})) ;
22 |
23 | % Replace oldName with newName in all the layers
24 | for l = 1:numel(obj.layers)
25 | for f = {'inputs', 'outputs'}
26 | f = char(f) ;
27 | sel = find(strcmp(oldName, obj.layers(l).(f))) ;
28 | [obj.layers(l).(f){sel}] = deal(newName) ;
29 | end
30 | end
31 |
32 | % If newVariable is a variable in the graph, then there is not
33 | % anything else to do. obj.rebuild() will remove the slot
34 | % in obj.vars() for oldName as that variable becomes unused.
35 | %
36 | % If, however, newVariable is not in the graph already, then
37 | % the slot in obj.vars() is preserved and only the variable name
38 | % is changed.
39 |
40 | if ~newNameExists
41 | obj.vars(v).name = newName ;
42 | % update variable name hash otherwise rebuild() won't find this var
43 | % corectly
44 | obj.varNames.(newName) = v ;
45 | end
46 |
47 | obj.rebuild() ;
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/reset.m:
--------------------------------------------------------------------------------
1 | function reset(obj)
2 | %RESET Reset the DagNN
3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN
4 | % object, including parameter gradients. It also calls the reset
5 | % function of every layer.
6 |
7 | obj.clearParameterServer() ;
8 | [obj.vars.value] = deal([]) ;
9 | [obj.vars.der] = deal([]) ;
10 | [obj.params.der] = deal([]) ;
11 | for l = 1:numel(obj.layers)
12 | obj.layers(l).block.reset() ;
13 | end
14 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/saveobj.m:
--------------------------------------------------------------------------------
1 | function s = saveobj(obj)
2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure
3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB
4 | % structure S. This is particularly convenient to preserve future
5 | % compatibility and to ship networks that are pure structures,
6 | % instead of embedding dependencies to code.
7 | %
8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`.
9 | %
10 | % As a side-effect the network is being reset (all variables are cleared)
11 | % and is transfered to CPU.
12 | %
13 | % See Also: dagnn.DagNN.loadobj, dagnn.DagNN.reset
14 |
15 | % Copyright (C) 2015-2016 Karel Lenc and Andrea Vedaldi.
16 | % All rights reserved.
17 | %
18 | % This file is part of the VLFeat library and is made available under
19 | % the terms of the BSD license (see the COPYING file).
20 |
21 | device = obj.device ;
22 | obj.move('cpu') ;
23 | s.vars = struct(...
24 | 'name', {obj.vars.name}, ...
25 | 'precious', {obj.vars.precious}) ;
26 | s.params = struct(...
27 | 'name', {obj.params.name}, ...
28 | 'value', {obj.params.value}, ...
29 | 'learningRate', {obj.params.learningRate}, ...
30 | 'weightDecay', {obj.params.weightDecay}) ;
31 | s.layers = struct(...
32 | 'name', {obj.layers.name}, ...
33 | 'type', {[]}, ...
34 | 'inputs', {obj.layers.inputs}, ...
35 | 'outputs', {obj.layers.outputs}, ...
36 | 'params', {obj.layers.params}, ...
37 | 'block', {[]}) ;
38 | s.meta = obj.meta ;
39 |
40 | for l = 1:numel(obj.layers)
41 | block = obj.layers(l).block ;
42 | slayer = block.save() ;
43 | s.layers(l).type = class(block) ;
44 | s.layers(l).block = slayer ;
45 | end
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/setLayerInputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerInputs(obj, layer, inputs)
2 | %SETLAYERINPUTS Set or change the inputs to a layer
3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for input = inputs
8 | v(end+1) = obj.addVar(char(input)) ;
9 | end
10 | obj.layers(l).inputs = inputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/setLayerOutputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerOutputs(obj, layer, outputs)
2 | %SETLAYEROUTPUTS Set or change the outputs of a layer
3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for output = outputs
8 | v(end+1) = obj.addVar(char(output)) ;
9 | end
10 | obj.layers(l).outputs = outputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/@DagNN/setLayerParams.m:
--------------------------------------------------------------------------------
1 | function v = setLayerParams(obj, layer, params)
2 | %SETLAYEPARAMS Set or change the parameters of a layer
3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for param = params
8 | v(end+1) = obj.addParam(char(param)) ;
9 | end
10 | obj.layers(l).params = params ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/BatchNorm.m:
--------------------------------------------------------------------------------
1 | classdef BatchNorm < dagnn.ElementWise
2 | properties
3 | numChannels
4 | epsilon = 1e-5
5 | opts = {'NoCuDNN'} % ours seems slightly faster
6 | end
7 |
8 | properties (Transient)
9 | moments
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | if strcmp(obj.net.mode, 'test')
15 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
16 | 'moments', params{3}, ...
17 | 'epsilon', obj.epsilon, ...
18 | obj.opts{:}) ;
19 | else
20 | [outputs{1},obj.moments] = ...
21 | vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
22 | 'epsilon', obj.epsilon, ...
23 | obj.opts{:}) ;
24 | end
25 | end
26 |
27 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
28 | [derInputs{1}, derParams{1}, derParams{2}, derParams{3}] = ...
29 | vl_nnbnorm(inputs{1}, params{1}, params{2}, derOutputs{1}, ...
30 | 'epsilon', obj.epsilon, ...
31 | 'moments', obj.moments, ...
32 | obj.opts{:}) ;
33 | obj.moments = [] ;
34 | % multiply the moments update by the number of images in the batch
35 | % this is required to make the update additive for subbatches
36 | % and will eventually be normalized away
37 | derParams{3} = derParams{3} * size(inputs{1},4) ;
38 | end
39 |
40 | % ---------------------------------------------------------------------
41 | function obj = BatchNorm(varargin)
42 | obj.load(varargin{:}) ;
43 | end
44 |
45 | function params = initParams(obj)
46 | params{1} = ones(obj.numChannels,1,'single') ;
47 | params{2} = zeros(obj.numChannels,1,'single') ;
48 | params{3} = zeros(obj.numChannels,2,'single') ;
49 | end
50 |
51 | function attach(obj, net, index)
52 | attach@dagnn.ElementWise(obj, net, index) ;
53 | p = net.getParamIndex(net.layers(index).params{3}) ;
54 | net.params(p).trainMethod = 'average' ;
55 | net.params(p).learningRate = 0.1 ;
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/BilinearSampler.m:
--------------------------------------------------------------------------------
1 | % Wrapper for BilinearSampler block:
2 | % (c) 2016 Ankush Gupta
3 |
4 | classdef BilinearSampler < dagnn.Layer
5 | methods
6 | function outputs = forward(obj, inputs, params)
7 | outputs = vl_nnbilinearsampler(inputs{1}, inputs{2});
8 | outputs = {outputs};
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | [dX,dG] = vl_nnbilinearsampler(inputs{1}, inputs{2}, derOutputs{1});
13 | derInputs = {dX,dG};
14 | derParams = {};
15 | end
16 |
17 | function outputSizes = getOutputSizes(obj, inputSizes)
18 | xSize = inputSizes{1};
19 | gSize = inputSizes{2};
20 | outputSizes = {[gSize(2), gSize(3), xSize(3), xSize(4)]};
21 | end
22 |
23 | function obj = BilinearSampler(varargin)
24 | obj.load(varargin);
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Concat.m:
--------------------------------------------------------------------------------
1 | classdef Concat < dagnn.ElementWise
2 | properties
3 | dim = 3
4 | end
5 |
6 | properties (Transient)
7 | inputSizes = {}
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | outputs{1} = vl_nnconcat(inputs, obj.dim) ;
13 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
14 | end
15 |
16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
17 | derInputs = vl_nnconcat(inputs, obj.dim, derOutputs{1}, 'inputSizes', obj.inputSizes) ;
18 | derParams = {} ;
19 | end
20 |
21 | function reset(obj)
22 | obj.inputSizes = {} ;
23 | end
24 |
25 | function outputSizes = getOutputSizes(obj, inputSizes)
26 | sz = inputSizes{1} ;
27 | for k = 2:numel(inputSizes)
28 | sz(obj.dim) = sz(obj.dim) + inputSizes{k}(obj.dim) ;
29 | end
30 | outputSizes{1} = sz ;
31 | end
32 |
33 | function rfs = getReceptiveFields(obj)
34 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
35 | if obj.dim == 3 || obj.dim == 4
36 | rfs = getReceptiveFields@dagnn.ElementWise(obj) ;
37 | rfs = repmat(rfs, numInputs, 1) ;
38 | else
39 | for i = 1:numInputs
40 | rfs(i,1).size = [NaN NaN] ;
41 | rfs(i,1).stride = [NaN NaN] ;
42 | rfs(i,1).offset = [NaN NaN] ;
43 | end
44 | end
45 | end
46 |
47 | function load(obj, varargin)
48 | s = dagnn.Layer.argsToStruct(varargin{:}) ;
49 | % backward file compatibility
50 | if isfield(s, 'numInputs'), s = rmfield(s, 'numInputs') ; end
51 | load@dagnn.Layer(obj, s) ;
52 | end
53 |
54 | function obj = Concat(varargin)
55 | obj.load(varargin{:}) ;
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Conv.m:
--------------------------------------------------------------------------------
1 | classdef Conv < dagnn.Filter
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | if ~obj.hasBias, params{2} = [] ; end
11 | outputs{1} = vl_nnconv(...
12 | inputs{1}, params{1}, params{2}, ...
13 | 'pad', obj.pad, ...
14 | 'stride', obj.stride, ...
15 | 'dilate', obj.dilate, ...
16 | obj.opts{:}) ;
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | if ~obj.hasBias, params{2} = [] ; end
21 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconv(...
22 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
23 | 'pad', obj.pad, ...
24 | 'stride', obj.stride, ...
25 | 'dilate', obj.dilate, ...
26 | obj.opts{:}) ;
27 | end
28 |
29 | function kernelSize = getKernelSize(obj)
30 | kernelSize = obj.size(1:2) ;
31 | end
32 |
33 | function outputSizes = getOutputSizes(obj, inputSizes)
34 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
35 | outputSizes{1}(3) = obj.size(4) ;
36 | end
37 |
38 | function params = initParams(obj)
39 | % Xavier improved
40 | sc = sqrt(2 / prod(obj.size(1:3))) ;
41 | %sc = sqrt(2 / prod(obj.size([1 2 4]))) ;
42 | params{1} = randn(obj.size,'single') * sc ;
43 | if obj.hasBias
44 | params{2} = zeros(obj.size(4),1,'single') ;
45 | end
46 | end
47 |
48 | function set.size(obj, ksize)
49 | % make sure that ksize has 4 dimensions
50 | ksize = [ksize(:)' 1 1 1 1] ;
51 | obj.size = ksize(1:4) ;
52 | end
53 |
54 | function obj = Conv(varargin)
55 | obj.load(varargin) ;
56 | % normalize field by implicitly calling setters defined in
57 | % dagnn.Filter and here
58 | obj.size = obj.size ;
59 | obj.stride = obj.stride ;
60 | obj.pad = obj.pad ;
61 | end
62 | end
63 | end
64 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Crop.m:
--------------------------------------------------------------------------------
1 | classdef Crop < dagnn.ElementWise
2 | %CROP DagNN cropping layer.
3 | % This is a pecurial layer from FCN. It crops inputs{1} to
4 | % match the size of inputs{2} (starting with a base crop amount).
5 | % A future version
6 |
7 | properties
8 | crop = [0 0]
9 | end
10 |
11 | properties (Transient)
12 | inputSizes = {}
13 | end
14 |
15 | methods
16 | function crop = getAdaptedCrops(obj)
17 | cropv = obj.inputSizes{1}(1) - obj.inputSizes{2}(1) ;
18 | cropu = obj.inputSizes{1}(2) - obj.inputSizes{2}(2) ;
19 | cropv1 = max(0, cropv - obj.crop(1)) ;
20 | cropu1 = max(0, cropu - obj.crop(2)) ;
21 | crop = [cropv - cropv1, cropv1, cropu - cropu1, cropu1] ;
22 | end
23 |
24 | function outputs = forward(obj, inputs, params)
25 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
26 | adjCrop = obj.getAdaptedCrops() ;
27 | outputs{1} = vl_nncrop(inputs{1}, adjCrop) ;
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | adjCrop = obj.getAdaptedCrops() ;
32 | derInputs{1} = vl_nncrop(inputs{1}, adjCrop, derOutputs{1}, obj.inputSizes{1}) ;
33 | derInputs{2} = [] ;
34 | derParams = {} ;
35 | end
36 |
37 | function reset(obj)
38 | obj.inputSizes = {} ;
39 | end
40 |
41 | function outputSizes = getOutputSizes(obj, inputSizes)
42 | obj.inputSizes = inputSizes ;
43 | crop = obj.getAdaptedCrops() ;
44 | outputSizes{1} = inputSizes{1} - [crop(1)+crop(2), crop(3)+crop(4), 0, 0] ;
45 | end
46 |
47 | function rfs = getReceptiveFields(obj)
48 | rfs(1,1).size = [1 1] ;
49 | rfs(1,1).stride = [1 1] ;
50 | rfs(1,1).offset = 1 + obj.crop ;
51 | rfs(2,1).size = [] ;
52 | rfs(2,1).stride = [] ;
53 | rfs(2,1).offset = [] ;
54 | end
55 |
56 | function obj = Crop(varargin)
57 | obj.load(varargin) ;
58 | end
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/DropOut.m:
--------------------------------------------------------------------------------
1 | classdef DropOut < dagnn.ElementWise
2 | properties
3 | rate = 0.5
4 | frozen = false
5 | end
6 |
7 | properties (Transient)
8 | mask
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if strcmp(obj.net.mode, 'test')
14 | outputs = inputs ;
15 | return ;
16 | end
17 | if obj.frozen & ~isempty(obj.mask)
18 | outputs{1} = vl_nndropout(inputs{1}, 'mask', obj.mask) ;
19 | else
20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ;
21 | end
22 | end
23 |
24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
25 | if strcmp(obj.net.mode, 'test')
26 | derInputs = derOutputs ;
27 | derParams = {} ;
28 | return ;
29 | end
30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ;
31 | derParams = {} ;
32 | end
33 |
34 | % ---------------------------------------------------------------------
35 | function obj = DropOut(varargin)
36 | obj.load(varargin{:}) ;
37 | end
38 |
39 | function obj = reset(obj)
40 | reset@dagnn.ElementWise(obj) ;
41 | obj.mask = [] ;
42 | obj.frozen = false ;
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/ElementWise.m:
--------------------------------------------------------------------------------
1 | classdef ElementWise < dagnn.Layer
2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations
3 | methods
4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes)
5 | outputSizes = inputSizes ;
6 | transforms = {eye(6)} ;
7 | end
8 |
9 | function rfs = getReceptiveFields(obj)
10 | rfs.size = [1 1] ;
11 | rfs.stride = [1 1] ;
12 | rfs.offset = [1 1] ;
13 | end
14 |
15 | function outputSizes = getOutputSizes(obj, inputSizes)
16 | outputSizes = inputSizes ;
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Error.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef Error < dagnn.Loss
7 | properties
8 | % loss = 'psnr'
9 | % ignoreAverage = true
10 | method = 'image'
11 | end
12 |
13 | methods
14 | function outputs = forward(obj, inputs, params)
15 |
16 | switch numel(inputs)
17 | case 4
18 | inputs{1} = inputs{4} - inputs{1} ;
19 | inputs{2} = inputs{4} - inputs{2} ;
20 | end
21 |
22 |
23 | inputs{1} = bsxfun(@plus, inputs{1}, inputs{3});
24 | inputs{2} = bsxfun(@plus, inputs{2}, inputs{3});
25 |
26 | peakval = max(max(inputs{2},[],1),[],2);
27 |
28 | peakval(peakval == 0) = 1;
29 |
30 | inputs{1} = bsxfun(@times, inputs{1}, 1./peakval);
31 | inputs{2} = bsxfun(@times, inputs{2}, 1./peakval);
32 |
33 | switch obj.loss
34 | case 'psnr'
35 | outputs{1} = psnr(gather(inputs{2}(:)), gather(inputs{1}(:))) ;
36 | case 'mse'
37 | outputs{1} = immse(gather(inputs{2}(:)), gather(inputs{1}(:))) ;
38 | case 'l2'
39 | outputs{1} = norm(gather(inputs{2}(:)) - gather(inputs{1}(:))) ;
40 | otherwise
41 | error('Invalid number of inputs');
42 | end
43 | obj.accumulateAverage(inputs, outputs);
44 | end
45 |
46 | function accumulateAverage(obj, inputs, outputs)
47 | if obj.ignoreAverage, return; end;
48 | n = obj.numAveraged ;
49 | m = n + size(inputs{1}, 4);
50 | obj.average = bsxfun(@plus, n * obj.average, gather(outputs{1}*size(inputs{1}, 4))) / m ;
51 | obj.numAveraged = m ;
52 | end
53 |
54 | function obj = Error(varargin)
55 | obj.load(varargin) ;
56 | obj.method = obj.method;
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/EuclideanLoss.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef EuclideanLoss < dagnn.Loss
7 | properties
8 | p = 2;
9 | aggregate = true;
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | switch numel(inputs)
15 | case 2
16 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
17 | 'aggregate', true, 'noRoot', true, obj.opts{:}) ;
18 | case 3
19 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
20 | 'aggregate', true, 'noRoot', true, 'instanceWeights', inputs{3}, ...
21 | obj.opts{:}) ;
22 | otherwise
23 | error('Invalid number of inputs');
24 | end
25 | obj.accumulateAverage(inputs, outputs);
26 | end
27 |
28 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
29 | derInputs = cell(1, numel(inputs));
30 | switch numel(inputs)
31 | case 2
32 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
33 | obj.p, derOutputs{1}, 'aggregate', false, 'noRoot', true, obj.opts{:}) ;
34 | case 3
35 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
36 | obj.p, derOutputs{1}, 'aggregate', false, 'noRoot', true, ...
37 | 'instanceWeights', inputs{3}, obj.opts{:}) ;
38 | otherwise
39 | error('Invalid number of inputs');
40 | end
41 | derParams = {} ;
42 |
43 | derInputs{1} = derInputs{1}./(size(derInputs{1}, 1) * size(derInputs{1}, 2) * size(derInputs{1}, 3));
44 | derInputs{2} = derInputs{2}./(size(derInputs{2}, 1) * size(derInputs{2}, 2) * size(derInputs{2}, 3));
45 | end
46 |
47 | function obj = EuclideanLoss(varargin)
48 | obj.load(varargin) ;
49 | obj.loss = 'pdist';
50 | end
51 | end
52 | end
53 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Filter.m:
--------------------------------------------------------------------------------
1 | classdef Filter < dagnn.Layer
2 | properties
3 | pad = [0 0 0 0]
4 | stride = [1 1]
5 | dilate = [1 1]
6 | end
7 | methods
8 | function set.pad(obj, pad)
9 | if numel(pad) == 1
10 | obj.pad = [pad pad pad pad] ;
11 | elseif numel(pad) == 2
12 | obj.pad = pad([1 1 2 2]) ;
13 | else
14 | obj.pad = pad ;
15 | end
16 | end
17 |
18 | function set.stride(obj, stride)
19 | if numel(stride) == 1
20 | obj.stride = [stride stride] ;
21 | else
22 | obj.stride = stride ;
23 | end
24 | end
25 |
26 | function set.dilate(obj, dilate)
27 | if numel(dilate) == 1
28 | obj.dilate = [dilate dilate] ;
29 | else
30 | obj.dilate = dilate ;
31 | end
32 | end
33 |
34 | function kernelSize = getKernelSize(obj)
35 | kernelSize = [1 1] ;
36 | end
37 |
38 | function outputSizes = getOutputSizes(obj, inputSizes)
39 | ks = obj.getKernelSize() ;
40 | ke = (ks - 1) .* obj.dilate + 1 ;
41 | outputSizes{1} = [...
42 | fix((inputSizes{1}(1) + obj.pad(1) + obj.pad(2) - ke(1)) / obj.stride(1)) + 1, ...
43 | fix((inputSizes{1}(2) + obj.pad(3) + obj.pad(4) - ke(2)) / obj.stride(2)) + 1, ...
44 | 1, ...
45 | inputSizes{1}(4)] ;
46 | end
47 |
48 | function rfs = getReceptiveFields(obj)
49 | ks = obj.getKernelSize() ;
50 | ke = (ks - 1) .* obj.dilate + 1 ;
51 | y1 = 1 - obj.pad(1) ;
52 | y2 = 1 - obj.pad(1) + ke(1) - 1 ;
53 | x1 = 1 - obj.pad(3) ;
54 | x2 = 1 - obj.pad(3) + ke(2) - 1 ;
55 | h = y2 - y1 + 1 ;
56 | w = x2 - x1 + 1 ;
57 | rfs.size = [h, w] ;
58 | rfs.stride = obj.stride ;
59 | rfs.offset = [y1+y2, x1+x2]/2 ;
60 | end
61 | end
62 | end
63 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/LRN.m:
--------------------------------------------------------------------------------
1 | classdef LRN < dagnn.ElementWise
2 | properties
3 | param = [5 1 0.0001/5 0.75]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = LRN(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Loss.m:
--------------------------------------------------------------------------------
1 | classdef Loss < dagnn.ElementWise
2 | properties
3 | loss = 'softmaxlog'
4 | ignoreAverage = false
5 | opts = {}
6 | end
7 |
8 | properties (Transient)
9 | average = 0
10 | numAveraged = 0
11 | end
12 |
13 | methods
14 | function outputs = forward(obj, inputs, params)
15 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss, obj.opts{:}) ;
16 | obj.accumulateAverage(inputs, outputs);
17 | end
18 |
19 | function accumulateAverage(obj, inputs, outputs)
20 | if obj.ignoreAverage, return; end;
21 | n = obj.numAveraged ;
22 | m = n + size(inputs{1}, 1) * size(inputs{1}, 2) * size(inputs{1}, 4);
23 | obj.average = bsxfun(@plus, n * obj.average, gather(outputs{1})) / m ;
24 | obj.numAveraged = m ;
25 | end
26 |
27 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
28 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss, obj.opts{:}) ;
29 | derInputs{2} = [] ;
30 | derParams = {} ;
31 | end
32 |
33 | function reset(obj)
34 | obj.average = 0 ;
35 | obj.numAveraged = 0 ;
36 | end
37 |
38 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes)
39 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ;
40 | end
41 |
42 | function rfs = getReceptiveFields(obj)
43 | % the receptive field depends on the dimension of the variables
44 | % which is not known until the network is run
45 | rfs(1,1).size = [NaN NaN] ;
46 | rfs(1,1).stride = [NaN NaN] ;
47 | rfs(1,1).offset = [NaN NaN] ;
48 | rfs(2,1) = rfs(1,1) ;
49 | end
50 |
51 | function obj = Loss(varargin)
52 | obj.load(varargin) ;
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/NormOffset.m:
--------------------------------------------------------------------------------
1 | classdef NormOffset < dagnn.ElementWise
2 | properties
3 | param = [1 0.5]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = NormOffset(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/PDist.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef PDist < dagnn.Loss
7 | properties
8 | p = 2;
9 | aggregate = true;
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | switch numel(inputs)
15 | case 2
16 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
17 | 'aggregate', obj.aggregate, obj.opts{:}) ;
18 | case 3
19 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
20 | 'aggregate', obj.aggregate, 'instanceWeights', inputs{3}, ...
21 | obj.opts{:}) ;
22 | otherwise
23 | error('Invalid number of inputs');
24 | end
25 | obj.accumulateAverage(inputs, outputs);
26 | end
27 |
28 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
29 | derInputs = cell(1, numel(inputs));
30 | switch numel(inputs)
31 | case 2
32 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
33 | obj.p, derOutputs{1}, 'aggregate', obj.aggregate, obj.opts{:}) ;
34 | case 3
35 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
36 | obj.p, derOutputs{1}, 'aggregate', obj.aggregate, ...
37 | 'instanceWeights', inputs{3}, obj.opts{:}) ;
38 | otherwise
39 | error('Invalid number of inputs');
40 | end
41 | derParams = {} ;
42 | end
43 |
44 | function obj = PDist(varargin)
45 | obj.load(varargin) ;
46 | obj.loss = 'pdist';
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Pooling.m:
--------------------------------------------------------------------------------
1 | classdef Pooling < dagnn.Filter
2 | properties
3 | method = 'max'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = Pooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/ROIPooling.m:
--------------------------------------------------------------------------------
1 | classdef ROIPooling < dagnn.Layer
2 | % DAGNN.ROIPOOLING Region of interest pooling layer
3 |
4 | % Copyright (C) 2016 Hakan Bilen.
5 | % All rights reserved.
6 | %
7 | % This file is part of the VLFeat library and is made available under
8 | % the terms of the BSD license (see the COPYING file).
9 |
10 | properties
11 | method = 'max'
12 | subdivisions = [6 6]
13 | transform = 1
14 | flatten = false
15 | end
16 |
17 | methods
18 | function outputs = forward(obj, inputs, params)
19 | numROIs = numel(inputs{2}) / 5 ;
20 | outputs{1} = vl_nnroipool(...
21 | inputs{1}, inputs{2}, ...
22 | 'subdivisions', obj.subdivisions, ...
23 | 'transform', obj.transform, ...
24 | 'method', obj.method) ;
25 | if obj.flatten
26 | outputs{1} = reshape(outputs{1},1,1,[],numROIs) ;
27 | end
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | numROIs = numel(inputs{2}) / 5 ;
32 | if obj.flatten
33 | % unflatten
34 | derOutputs{1} = reshape(...
35 | derOutputs{1},obj.subdivisions(1),obj.subdivisions(2),[],numROIs) ;
36 | end
37 | derInputs{1} = vl_nnroipool(...
38 | inputs{1}, inputs{2}, derOutputs{1}, ...
39 | 'subdivisions', obj.subdivisions, ...
40 | 'transform', obj.transform, ...
41 | 'method', obj.method) ;
42 | derInputs{2} = [];
43 | derParams = {} ;
44 | end
45 |
46 | function outputSizes = getOutputSizes(obj, inputSizes)
47 | if isempty(inputSizes{1})
48 | n = 0 ;
49 | else
50 | n = prod(inputSizes{2})/5 ;
51 | end
52 | outputSizes{1} = [obj.subdivisions, inputSizes{1}(3), n] ;
53 | end
54 |
55 | function obj = ROIPooling(varargin)
56 | obj.load(varargin) ;
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/ReLU.m:
--------------------------------------------------------------------------------
1 | classdef ReLU < dagnn.ElementWise
2 | properties
3 | useShortCircuit = true
4 | leak = 0
5 | opts = {}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | outputs{1} = vl_nnrelu(inputs{1}, [], ...
11 | 'leak', obj.leak, obj.opts{:}) ;
12 | end
13 |
14 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
15 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}, ...
16 | 'leak', obj.leak, ...
17 | obj.opts{:}) ;
18 | derParams = {} ;
19 | end
20 |
21 | function forwardAdvanced(obj, layer)
22 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
23 | forwardAdvanced@dagnn.Layer(obj, layer) ;
24 | return ;
25 | end
26 | net = obj.net ;
27 | in = layer.inputIndexes ;
28 | out = layer.outputIndexes ;
29 | net.vars(out).value = vl_nnrelu(net.vars(in).value, [], ...
30 | 'leak', obj.leak, ...
31 | obj.opts{:}) ;
32 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) - 1;
33 | if ~net.vars(in).precious & net.numPendingVarRefs(in) == 0
34 | net.vars(in).value = [] ;
35 | end
36 | end
37 |
38 | function backwardAdvanced(obj, layer)
39 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
40 | backwardAdvanced@dagnn.Layer(obj, layer) ;
41 | return ;
42 | end
43 | net = obj.net ;
44 | in = layer.inputIndexes ;
45 | out = layer.outputIndexes ;
46 |
47 | if isempty(net.vars(out).der), return ; end
48 |
49 | derInput = vl_nnrelu(net.vars(out).value, net.vars(out).der, ...
50 | 'leak', obj.leak, obj.opts{:}) ;
51 |
52 | if ~net.vars(out).precious
53 | net.vars(out).der = [] ;
54 | net.vars(out).value = [] ;
55 | end
56 |
57 | if net.numPendingVarRefs(in) == 0
58 | net.vars(in).der = derInput ;
59 | else
60 | net.vars(in).der = net.vars(in).der + derInput ;
61 | end
62 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ;
63 | end
64 |
65 | function obj = ReLU(varargin)
66 | obj.load(varargin) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < dagnn.ElementWise
2 | properties
3 | size
4 | scale = 1;
5 | bias = 0;
6 | hasBias = true
7 | end
8 |
9 | methods
10 |
11 | function outputs = forward(obj, inputs, params)
12 | args = horzcat(inputs, params) ;
13 | outputs{1} = bsxfun(@times, args{1}, args{2}) ;
14 | if obj.hasBias
15 | outputs{1} = bsxfun(@plus, outputs{1}, args{3}) ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | args = horzcat(inputs, params) ;
21 | sz = [size(args{2}) 1 1 1 1] ;
22 | sz = sz(1:4) ;
23 | dargs{1} = bsxfun(@times, derOutputs{1}, args{2}) ;
24 | dargs{2} = derOutputs{1} .* args{1} ;
25 | for k = find(sz == 1)
26 | dargs{2} = sum(dargs{2}, k) ;
27 | end
28 | if obj.hasBias
29 | dargs{3} = derOutputs{1} ;
30 | for k = find(sz == 1)
31 | dargs{3} = sum(dargs{3}, k) ;
32 | end
33 | end
34 | derInputs = dargs(1:numel(inputs)) ;
35 | derParams = dargs(numel(inputs)+(1:numel(params))) ;
36 | end
37 |
38 | function params = initParams(obj)
39 | params{1} = obj.scale ;
40 | if obj.hasBias
41 | params{2} = obj.bias ;
42 | end
43 | end
44 |
45 | function obj = Scale(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Sigmoid.m:
--------------------------------------------------------------------------------
1 | classdef Sigmoid < dagnn.ElementWise
2 | methods
3 | function outputs = forward(obj, inputs, params)
4 | outputs{1} = vl_nnsigmoid(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/SoftMax.m:
--------------------------------------------------------------------------------
1 | classdef SoftMax < dagnn.ElementWise
2 | methods
3 | function outputs = forward(self, inputs, params)
4 | outputs{1} = vl_nnsoftmax(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 |
12 | function obj = SoftMax(varargin)
13 | obj.load(varargin) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/SpatialNorm.m:
--------------------------------------------------------------------------------
1 | classdef SpatialNorm < dagnn.ElementWise
2 | properties
3 | param = [2 2 10 2]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = SpatialNorm(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/Sum.m:
--------------------------------------------------------------------------------
1 | classdef Sum < dagnn.ElementWise
2 | %SUM DagNN sum layer
3 | % The SUM layer takes the sum of all its inputs and store the result
4 | % as its only output.
5 |
6 | properties (Transient)
7 | numInputs
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | obj.numInputs = numel(inputs) ;
13 | outputs{1} = inputs{1} ;
14 | for k = 2:obj.numInputs
15 | outputs{1} = outputs{1} + inputs{k} ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | for k = 1:obj.numInputs
21 | derInputs{k} = derOutputs{1} ;
22 | end
23 | derParams = {} ;
24 | end
25 |
26 | function outputSizes = getOutputSizes(obj, inputSizes)
27 | outputSizes{1} = inputSizes{1} ;
28 | for k = 2:numel(inputSizes)
29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1}))
30 | if ~isequal(inputSizes{k}, outputSizes{1})
31 | warning('Sum layer: the dimensions of the input variables is not the same.') ;
32 | end
33 | end
34 | end
35 | end
36 |
37 | function rfs = getReceptiveFields(obj)
38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
39 | rfs.size = [1 1] ;
40 | rfs.stride = [1 1] ;
41 | rfs.offset = [1 1] ;
42 | rfs = repmat(rfs, numInputs, 1) ;
43 | end
44 |
45 | function obj = Sum(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/+dagnn/UnPooling.m:
--------------------------------------------------------------------------------
1 | classdef UnPooling < dagnn.Filter
2 | properties
3 | method = 'avg'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(repmat(inputs{1}, self.poolSize), self.poolSize, inputs{1}, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(derOutputs{1}, self.poolSize, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = UnPooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/compatibility/parallel/gather.m:
--------------------------------------------------------------------------------
1 | function x=gather(x)
2 | % GATHER Compatibility stub for the GATHER() function
3 | % GATHER() is a function in the Parallel MATLAB toolbox. MATCONVNET
4 | % can work without it.
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/compatibility/parallel/labindex.m:
--------------------------------------------------------------------------------
1 | function i = labindex()
2 | i = 1 ;
3 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/compatibility/parallel/numlabs.m:
--------------------------------------------------------------------------------
1 | function n = numlabs()
2 | n = 1 ;
3 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/simplenn/vl_simplenn_move.m:
--------------------------------------------------------------------------------
1 | function net = vl_simplenn_move(net, destination)
2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU.
3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the
4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the
5 | % network to the CPU.
6 | %
7 | % See also: VL_SIMPLENN().
8 |
9 | % Copyright (C) 2014-15 Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | switch destination
16 | case 'gpu', moveop = @(x) gpuArray(x) ;
17 | case 'cpu', moveop = @(x) gather(x) ;
18 | otherwise, error('Unknown destination ''%s''.', destination) ;
19 | end
20 | for l=1:numel(net.layers)
21 | switch net.layers{l}.type
22 | case {'conv', 'convt', 'bnorm'}
23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'}
24 | f = char(f) ;
25 | if isfield(net.layers{l}, f)
26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ;
27 | end
28 | end
29 | for f = {'weights', 'momentum'}
30 | f = char(f) ;
31 | if isfield(net.layers{l}, f)
32 | for j=1:numel(net.layers{l}.(f))
33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ;
34 | end
35 | end
36 | end
37 | otherwise
38 | % nothing to do ?
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/simplenn/vl_simplenn_start_parserv.m:
--------------------------------------------------------------------------------
1 | function vl_simplenn_start_parserv(net, ps)
2 | %VL_SIMPLENN_START_PARSERV Setup a parameter server for this network
3 | % VL_SIMPLENN_START_PARSERV(NET, PS) registers the network
4 | % parameter derivatives with the specified ParameterServer instance
5 | % PS and then starts the server.
6 |
7 | for i = 1:numel(net.layers)
8 | for j = 1:numel(net.layers{i}.weights)
9 | value = net.layers{i}.weights{j} ;
10 | name = sprintf('l%d_%d',i,j) ;
11 | if strcmp(class(value),'gpuArray')
12 | deviceType = 'gpu' ;
13 | dataType = classUnderlying(value) ;
14 | else
15 | deviceType = 'cpu' ;
16 | dataType = class(value) ;
17 | end
18 | ps.register(...
19 | name, ...
20 | size(value), ...
21 | dataType, ...
22 | deviceType) ;
23 | end
24 | end
25 | ps.start() ;
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/data.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "data.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/datamex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "datamex.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/datamex.hpp:
--------------------------------------------------------------------------------
1 | // @file datamex.hpp
2 | // @brief Basic data structures (MEX support)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__datamex__
14 | #define __vl__datamex__
15 |
16 | #include "mex.h"
17 |
18 | #if ENABLE_GPU
19 | #include "gpu/mxGPUArray.h"
20 | #endif
21 |
22 | #include "data.hpp"
23 |
24 | namespace vl {
25 |
26 | class MexTensor ;
27 |
28 | class MexContext : public Context
29 | {
30 | public:
31 | MexContext() ;
32 | ~MexContext() ;
33 |
34 | protected:
35 | #if ENABLE_GPU
36 | vl::ErrorCode initGpu() ;
37 | vl::ErrorCode validateGpu() ;
38 | mxArray * canary ; // if it breathes, the GPU state is valid
39 | bool gpuIsInitialized ;
40 | #endif
41 |
42 | friend class MexTensor ;
43 | } ;
44 |
45 | class MexTensor : public Tensor
46 | {
47 | public:
48 | MexTensor(MexContext & context) ;
49 | vl::ErrorCode init(mxArray const * array) ;
50 | vl::ErrorCode init(DeviceType deviceType, DataType dataType, TensorShape const & shape) ;
51 | vl::ErrorCode initWithZeros(DeviceType deviceType, DataType dataType, TensorShape const & shape) ;
52 | vl::ErrorCode initWithValue(DeviceType deviceType, DataType dataType, TensorShape const & shape, double value) ;
53 |
54 | void makePersistent() ;
55 | mxArray * relinquish() ;
56 | void clear() ;
57 | ~MexTensor() ;
58 |
59 | size_t getMemorySize() const ;
60 |
61 | protected:
62 | MexContext & context ;
63 | mxArray const * array ;
64 | #ifdef ENABLE_GPU
65 | mxGPUArray const * gpuArray ;
66 | #endif
67 | bool isArrayOwner ;
68 |
69 | private: // prevention
70 | MexTensor(MexTensor const &) ;
71 | MexTensor & operator= (MexTensor & tensor) ;
72 | vl::ErrorCode initHelper(DeviceType deviceType, DataType dataType, TensorShape const & shape, bool fillWithZeros = false) ;
73 | } ;
74 |
75 | void print(char const * str, MexTensor const & tensor) ;
76 |
77 | void mexThrowError(Context const& context, vl::ErrorCode error) ;
78 | }
79 |
80 |
81 | #endif /* defined(__vl__datamex__) */
82 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/bilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file bilinearsampler.hpp
2 | // @brief Bilinear sampler implementation
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_BILINEARSAMPLER_H
15 | #define VL_BILINEARSAMPLER_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | // defines the dispatcher for CUDA kernels:
21 | namespace vl { namespace impl {
22 |
23 | template
24 | struct bilinearsampler {
25 |
26 | static vl::ErrorCode
27 | forward(Context& context,
28 | type* output,
29 | type const* data,
30 | type const* grid,
31 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
32 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
33 |
34 |
35 | static vl::ErrorCode
36 | backward(Context& context,
37 | type* derData,
38 | type* derGrid,
39 | type const* data,
40 | type const* grid,
41 | type const* derOutput,
42 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
43 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
44 | } ;
45 |
46 | } }
47 |
48 | #endif /* defined(VL_BILINEARSAMPLER_H) */
49 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/compat.h:
--------------------------------------------------------------------------------
1 | #ifndef COMPAT_H
2 | #define COMPAT_H
3 |
4 | #ifdef _MSC_VER
5 | #define snprintf _snprintf
6 | #define vsnprintf _vsnprintf
7 | #define __func__ __FUNCTION__
8 | #undef max
9 | #undef min
10 |
11 | #ifdef _WIN64
12 | typedef signed __int64 ssize_t;
13 | #else
14 | typedef signed int ssize_t;
15 | #endif // _WIN64
16 |
17 | #if _MSC_VER < 1700
18 | #define false 0
19 | #define true 1
20 | #elif _MSC_VER > 1700
21 | #include
22 | #endif // _MSC_VER < 1700
23 |
24 | #if _MSC_VER < 1800
25 | // Add some missing functions from C99
26 | #define isnan(x) _isnan(x)
27 | #define isinf(x) (!_finite(x))
28 | #define round(x) (x >= 0.0 ? (double)(int)(x + 0.5) : (double)(int)(x - 0.5))
29 | #define roundf(x) (x >= 0.0f ? (float)(int)(x + 0.5f) : (float)(int)(x - 0.5f))
30 | #endif
31 |
32 | #endif // _MSC_VER
33 |
34 |
35 | #endif // COMPAT_H
36 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/copy.hpp:
--------------------------------------------------------------------------------
1 | // @file copy.hpp
2 | // @brief Copy and other data operations
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__copy__
14 | #define __vl__copy__
15 |
16 | #include "../data.hpp"
17 |
18 | namespace vl { namespace impl {
19 |
20 | template
21 | struct operations
22 | {
23 | typedef type data_type ;
24 | static vl::ErrorCode copy(data_type * dest, data_type const * src, size_t numElements) ;
25 | static vl::ErrorCode fill(data_type * dest, size_t numElements, data_type value) ;
26 | } ;
27 | } }
28 |
29 | #endif /* defined(__vl__copy__) */
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/copy_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file copy_cpu.cpp
2 | // @brief Copy and other data operations (CPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include
15 |
16 | namespace vl { namespace impl {
17 |
18 | template
19 | struct operations
20 | {
21 | typedef type data_type ;
22 |
23 | static vl::ErrorCode
24 | copy(data_type * dest,
25 | data_type const * src,
26 | size_t numElements)
27 | {
28 | memcpy(dest, src, numElements * sizeof(data_type)) ;
29 | return VLE_Success ;
30 | }
31 |
32 | static vl::ErrorCode
33 | fill(data_type * dest,
34 | size_t numElements,
35 | data_type value)
36 | {
37 | for (size_t k = 0 ; k < numElements ; ++k) {
38 | dest[k] = value ;
39 | }
40 | return VLE_Success ;
41 | }
42 | } ;
43 |
44 | } }
45 |
46 | template struct vl::impl::operations ;
47 |
48 | #ifdef ENABLE_DOUBLE
49 | template struct vl::impl::operations ;
50 | #endif
51 |
52 |
53 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/copy_gpu.cu:
--------------------------------------------------------------------------------
1 | // @file copy_gpu.cu
2 | // @brief Copy and other data operations (GPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include "../datacu.hpp"
15 | #include
16 |
17 | template __global__ void
18 | fill_kernel (type * data, type value, size_t size)
19 | {
20 | int index = threadIdx.x + blockIdx.x * blockDim.x ;
21 | if (index < size) data[index] = value ;
22 | }
23 |
24 | namespace vl { namespace impl {
25 |
26 | template
27 | struct operations
28 | {
29 | typedef type data_type ;
30 |
31 | static vl::ErrorCode
32 | copy(data_type * dest,
33 | data_type const * src,
34 | size_t numElements)
35 | {
36 | cudaMemcpy(dest, src, numElements * sizeof(data_type), cudaMemcpyDeviceToDevice) ;
37 | return VLE_Success ;
38 | }
39 |
40 | static vl::ErrorCode
41 | fill(data_type * dest,
42 | size_t numElements,
43 | data_type value)
44 | {
45 | fill_kernel
46 | <<>>
47 | (dest, numElements, value) ;
48 |
49 | cudaError_t error = cudaGetLastError() ;
50 | if (error != cudaSuccess) {
51 | return VLE_Cuda ;
52 | }
53 | return VLE_Success ;
54 | }
55 | } ;
56 |
57 | } }
58 |
59 | template struct vl::impl::operations ;
60 |
61 | #ifdef ENABLE_DOUBLE
62 | template struct vl::impl::operations ;
63 | #endif
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/im2row.hpp:
--------------------------------------------------------------------------------
1 | // @file im2row.hpp
2 | // @brief Stack image patches as matrix rows
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__im2row__
14 | #define __vl__im2row__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct im2row {
23 |
24 | static vl::ErrorCode
25 | forward(vl::Context& context,
26 | type* stacked,
27 | type const* data,
28 | size_t height, size_t width, size_t depth,
29 | size_t windowHeight, size_t windowWidth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight,
32 | int dilateY, int dialteX) ;
33 |
34 | static vl::ErrorCode
35 | backward(vl::Context& context,
36 | type* data,
37 | type const* stacked,
38 | size_t height, size_t width, size_t depth,
39 | size_t windowHeight, size_t windowWidth,
40 | size_t strideY, size_t strideX,
41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight,
42 | int dilateY, int dilateX) ;
43 | } ;
44 |
45 | } }
46 |
47 | #endif /* defined(__vl__im2row__) */
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/nnbias_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias_blas.hpp
2 | // @brief biasolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias_cudnn__
14 | #define __vl__nnbias_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | // todo: data type should be handled internally?
22 |
23 | template
24 | struct nnbias_cudnn
25 | {
26 | static vl::ErrorCode
27 | forward(vl::Context& context,
28 | vl::Tensor output, double outputMult,
29 | vl::Tensor data, double dataMult,
30 | vl::Tensor biases, double biasesMult) ;
31 |
32 | static vl::ErrorCode
33 | backward(vl::Context& context,
34 | vl::Tensor derData, double derDataMult,
35 | vl::Tensor derBiases, double derBiasesMult,
36 | vl::Tensor derOutput, double derOutputMult) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbias_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/nnbilinearsampler_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler_cudnn.hpp
2 | // @brief BilinearSampler CuDNN-based implementation.
3 | // @author Ankush Gupta, Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bilinearsampler_cudnn__
14 | #define __vl__bilinearsampler_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnbilinearsampler_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | Tensor output,
27 | Tensor data,
28 | Tensor grid) ;
29 |
30 | static vl::ErrorCode
31 | backward(Context& context,
32 | Tensor derData,
33 | Tensor derGrid,
34 | Tensor data,
35 | Tensor grid,
36 | Tensor derOutput) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbilinearsampler_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/nnbnorm_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbnorm_cudnn.hpp
2 | // @brief bnorm CuDNN-based implementation.
3 | // @author Ankush Gupta, Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bnorm_cudnn__
14 | #define __vl__bnorm_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnbnorm_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(vl::Context& context,
26 | vl::Tensor output,
27 | vl::Tensor moments,
28 | vl::Tensor data,
29 | vl::Tensor multipliers,
30 | vl::Tensor biases,
31 | double epsilon) ;
32 |
33 | static vl::ErrorCode
34 | forward_given_moments(vl::Context& context,
35 | vl::Tensor output,
36 | vl::Tensor moments,
37 | vl::Tensor data,
38 | vl::Tensor multipliers,
39 | vl::Tensor biases) ;
40 |
41 | static vl::ErrorCode
42 | backward(Context& context,
43 | vl::Tensor derData,
44 | vl::Tensor derMultipliers,
45 | vl::Tensor derBiases,
46 | vl::Tensor moments,
47 | vl::Tensor data,
48 | vl::Tensor multipliers,
49 | vl::Tensor biases,
50 | vl::Tensor derOutput,
51 | double epsilon) ;
52 |
53 | static vl::ErrorCode
54 | backward_given_moments(Context& context,
55 | vl::Tensor derData,
56 | vl::Tensor derMultipliers,
57 | vl::Tensor derBiases,
58 | vl::Tensor moments,
59 | vl::Tensor data,
60 | vl::Tensor multipliers,
61 | vl::Tensor biases,
62 | vl::Tensor derOutput,
63 | double epsilon) ;
64 | } ;
65 |
66 | } }
67 |
68 | #endif /* defined(__vl__nnbnorm_cudnn__) */
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/nnconv_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv_blas.hpp
2 | // @brief Convolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnconv_cudnn__
14 | #define __vl__nnconv_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnconv_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | Tensor output, double outputMult,
27 | Tensor data, double dataMult,
28 | Tensor filters,
29 | Tensor biases,
30 | int strideX, int strideY,
31 | int padLeft, int padRight,
32 | int padTop, int padBottom,
33 | int dilateX, int dilateY) ;
34 |
35 | static vl::ErrorCode
36 | backward(Context& context,
37 | Tensor derData,
38 | Tensor derFilters,
39 | Tensor derBiases,
40 | Tensor data,
41 | Tensor filters,
42 | Tensor derOutput,
43 | int strideX, int strideY,
44 | int padLeft, int padRight,
45 | int padTop, int padBottom,
46 | int dilateX, int dilateY) ;
47 | } ;
48 |
49 | } }
50 | #endif /* defined(__vl__nnconv_cudnn__) */
51 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/nnpooling_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling_blas.hpp
2 | // @brief Pooling block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling_cudnn__
14 | #define __vl__nnpooling_cudnn__
15 |
16 | #include "../nnpooling.hpp"
17 | #include "../data.hpp"
18 | #include "cudnn.h"
19 |
20 |
21 | namespace vl { namespace impl {
22 |
23 | // todo: data type should be handled internally?
24 |
25 | template
26 | struct nnpooling_cudnn
27 | {
28 | static vl::ErrorCode
29 | forward(Context& context,
30 | Tensor output,
31 | Tensor data,
32 | vl::PoolingMethod method,
33 | int poolHeight, int poolWidth,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 |
38 | static vl::ErrorCode
39 | backward(Context& context,
40 | Tensor derData,
41 | Tensor data,
42 | Tensor output,
43 | Tensor derOutput,
44 | vl::PoolingMethod method,
45 | int poolHeight, int poolWidth,
46 | int strideY, int strideX,
47 | int padTop, int padBottom,
48 | int padLeft, int padRight) ;
49 | };
50 |
51 | } }
52 |
53 | #endif /* defined(__vl__nnpooling_cudnn__) */
54 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/normalize.hpp:
--------------------------------------------------------------------------------
1 | // @file normalize.hpp
2 | // @brief Normalize block implementation
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__normalize__
14 | #define __vl__normalize__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct lrn
23 | {
24 | static vl::ErrorCode
25 | forward(type* output,
26 | type const* data,
27 | size_t height, size_t width, size_t depth, size_t size,
28 | size_t normDepth,
29 | type kappa, type alpha, type beta) ;
30 |
31 | static vl::ErrorCode
32 | backward(type* derData,
33 | type const* data,
34 | type const* derOutput,
35 | size_t height, size_t width, size_t depth, size_t size,
36 | size_t normDepth,
37 | type kappa, type alpha, type beta) ;
38 | } ;
39 |
40 | } }
41 |
42 | #endif /* __vl__normalize__ */
43 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/pooling.hpp:
--------------------------------------------------------------------------------
1 | // @file pooling.hpp
2 | // @brief Pooling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_POOLING_H
15 | #define VL_POOLING_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct pooling_max {
24 | typedef type data_type ;
25 |
26 | static vl::ErrorCode
27 | forward(data_type* output,
28 | data_type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t poolHeight, size_t poolWidth,
31 | size_t strideY, size_t strideX,
32 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
33 |
34 | static vl::ErrorCode
35 | backward(data_type* derData,
36 | data_type const* data,
37 | data_type const* derOutput,
38 | size_t height, size_t width, size_t depth,
39 | size_t poolHeight, size_t poolWidth,
40 | size_t strideY, size_t strideX,
41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
42 | } ;
43 |
44 | template
45 | struct pooling_average {
46 | typedef type data_type ;
47 |
48 | static vl::ErrorCode
49 | forward(data_type* output,
50 | data_type const* data,
51 | size_t height, size_t width, size_t depth,
52 | size_t poolHeight, size_t poolWidth,
53 | size_t strideY, size_t strideX,
54 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
55 |
56 | static vl::ErrorCode
57 | backward(type* derData,
58 | type const* derOutput,
59 | size_t height, size_t width, size_t depth,
60 | size_t poolHeight, size_t poolWidth,
61 | size_t strideY, size_t strideX,
62 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
63 | } ;
64 |
65 | } }
66 |
67 | #endif /* defined(VL_POOLING_H) */
68 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/impl/subsample.hpp:
--------------------------------------------------------------------------------
1 | // @file subsampling.hpp
2 | // @brief Subsampling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_NNSUBSAMPLE_H
15 | #define VL_NNSUBSAMPLE_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct subsample {
24 |
25 | static vl::ErrorCode
26 | forward(vl::Context& context,
27 | type* output,
28 | type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
32 |
33 | static vl::ErrorCode
34 | backward(vl::Context& context,
35 | type* derData,
36 | type const* derOutput,
37 | size_t height, size_t width, size_t depth,
38 | size_t strideY, size_t strideX,
39 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
40 | } ;
41 |
42 | } }
43 |
44 | #endif /* defined(VL_NNSUBSAMPLE_H) */
45 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/imread.cpp:
--------------------------------------------------------------------------------
1 | // @file imread.cpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "imread.hpp"
14 | #include
15 |
16 | vl::ImageShape::ImageShape()
17 | : height(0), width(0), depth(0)
18 | { }
19 |
20 | vl::ImageShape::ImageShape(size_t height, size_t width, size_t depth)
21 | : height(height), width(width), depth(depth)
22 | { }
23 |
24 | vl::ImageShape::ImageShape(ImageShape const & im)
25 | : height(im.height), width(im.width), depth(im.depth)
26 | { }
27 |
28 | vl::ImageShape & vl::ImageShape::operator =(vl::ImageShape const & im)
29 | {
30 | height = im.height ;
31 | width = im.width ;
32 | depth = im.depth ;
33 | return *this ;
34 | }
35 |
36 | bool vl::ImageShape::operator == (vl::ImageShape const & im)
37 | {
38 | return
39 | (height == im.height) &
40 | (width == im.width) &
41 | (depth == im.depth) ;
42 | }
43 |
44 | size_t vl::ImageShape::getNumElements() const
45 | {
46 | return height*width*depth ;
47 | }
48 |
49 | void vl::ImageShape::clear()
50 | {
51 | height = 0 ;
52 | width = 0 ;
53 | depth = 0 ;
54 | }
55 |
56 | vl::Image::Image()
57 | : shape(), memory(NULL)
58 | { }
59 |
60 | vl::Image::Image(Image const & im)
61 | : shape(im.shape), memory(im.memory)
62 | { }
63 |
64 | vl::Image::Image(vl::ImageShape const & shape, float * memory)
65 | : shape(shape), memory(memory)
66 | { }
67 |
68 | vl::ImageShape const & vl::Image::getShape() const { return shape ; }
69 | float * vl::Image::getMemory() const { return memory ; }
70 |
71 | void vl::Image::clear()
72 | {
73 | shape.clear() ;
74 | memory = 0 ;
75 | }
76 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/imread.hpp:
--------------------------------------------------------------------------------
1 | // @file imread.hpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__imread__
14 | #define __vl__imread__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | #define VL_IMAGE_ERROR_MSG_MAX_LENGTH 256
21 |
22 | struct ImageShape
23 | {
24 | size_t height ;
25 | size_t width ;
26 | size_t depth ;
27 |
28 | ImageShape() ;
29 | ImageShape(size_t height, size_t width, size_t depth) ;
30 | ImageShape(ImageShape const & im) ;
31 | ImageShape & operator = (ImageShape const & im) ;
32 | bool operator == (ImageShape const & im) ;
33 |
34 | size_t getNumElements() const ;
35 | void clear() ;
36 | } ;
37 |
38 | class Image
39 | {
40 | public:
41 | Image() ;
42 | Image(Image const & im) ;
43 | Image(ImageShape const & shape, float * memory = NULL) ;
44 | ImageShape const & getShape() const ;
45 | float * getMemory() const ;
46 | void clear() ;
47 |
48 | protected:
49 | ImageShape shape ;
50 | float * memory ;
51 | } ;
52 |
53 | class ImageReader
54 | {
55 | public:
56 | ImageReader() ;
57 | ~ImageReader() ;
58 | vl::ErrorCode readShape(ImageShape & image, char const * fileName) ;
59 | vl::ErrorCode readPixels(float * memory, char const * fileName) ;
60 | char const * getLastErrorMessage() const ;
61 |
62 | private:
63 | class Impl ;
64 | Impl * impl ;
65 | } ;
66 | }
67 |
68 | #endif
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnbias.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnbias.cu"
5 |
6 | /**
7 | @brief nnbias_forward
8 | @param context context.
9 | @param output output tensor $\by$ [output].
10 | @param outputMult output tensor multiplier $\alpha$.
11 | @param data data tensor $\bx$.
12 | @param dataMult data tensor multiplier $\beta$.
13 | @param biases biases tensor $\bb$.
14 | @param biasesMult biases tensor multiplier $\gamma$.
15 |
16 | The function computes
17 | @f[
18 | y_{ijkd} \leftarrow
19 | \alpha y_{ijkd} +
20 | \beta x_{ijkd} +
21 | \gamma b_k.
22 | @f]
23 |
24 | @a data can be the null tensor, in which case this tensor
25 | is dropped in the summation.
26 | */
27 |
28 | /**
29 | @brief nnbias_backward
30 | @param context context.
31 | @param derData data derivative tensor $d\bx$ [output].
32 | @param derDataMult data derivative tensor multiplier $\eta$.
33 | @param derBiases biases derivative tensor $d\bb$ [output].
34 | @param derBiasesMult biased derivative tensor multiplier $\tau$.
35 | @param data data tensor $\bx$.
36 | @param dataMult data tensor multiplier $\beta$.
37 | @param biases biases tensor $\bb$.
38 | @param biasesMult biases tensor multiplier $\gamma$.
39 |
40 | If @a derData is the null tensor, this derivative is not comptued and
41 | @param biases can also be null.
42 |
43 | If @a derBiases is the null tensor, this derivative is not computed and
44 | @param data can also be null.
45 | */
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnbias.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias.hpp
2 | // @brief Bias block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias__
14 | #define __vl__nnbias__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nnbias_forward(vl::Context& context,
22 | vl::Tensor output, double outputMult,
23 | vl::Tensor data, double dataMult,
24 | vl::Tensor biases, double biasesMult) ;
25 |
26 | vl::ErrorCode
27 | nnbias_backward(vl::Context& context,
28 | vl::Tensor derData, double derDataMult,
29 | vl::Tensor derBiases, double derBiasesMult,
30 | vl::Tensor derOutput, double derOutputMult) ;
31 | }
32 |
33 | #endif /* defined(__vl__nnbias__) */
34 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnbilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler.hpp
2 | // @brief Bilinear sampler block
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbilinearsampler__
14 | #define __vl__nnbilinearsampler__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 | vl::ErrorCode
21 | nnbilinearsampler_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor grid) ;
25 |
26 | vl::ErrorCode
27 | nnbilinearsampler_backward(vl::Context& context,
28 | vl::Tensor derData,
29 | vl::Tensor derGrid,
30 | vl::Tensor data,
31 | vl::Tensor grid,
32 | vl::Tensor derOutput) ;
33 | }
34 |
35 | #endif /* defined(__vl__nnbilinearsampler__) */
36 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnconv.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnconv.cu should be compiled instead"
3 | #endif
4 | #include "nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnfullyconnected.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnfullyconnected.cu should be compiled instead"
3 | #endif
4 | #include "nnfullyconnected.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnfullyconnected.hpp:
--------------------------------------------------------------------------------
1 | // @file nnfullyconnected.hpp
2 | // @brief Fully-connected block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 |
14 | #ifndef __vl__nnfullyconnected__
15 | #define __vl__nnfullyconnected__
16 |
17 | #include "data.hpp"
18 |
19 | namespace vl {
20 |
21 | vl::ErrorCode
22 | nnfullyconnected_forward(vl::Context& context,
23 | vl::Tensor output,
24 | vl::Tensor data,
25 | vl::Tensor filters,
26 | vl::Tensor biases) ;
27 |
28 | vl::ErrorCode
29 | nnfullyconnected_backward(vl::Context& context,
30 | vl::Tensor derData,
31 | vl::Tensor derFilters,
32 | vl::Tensor derBiases,
33 | vl::Tensor data,
34 | vl::Tensor filters,
35 | vl::Tensor derOutput) ;
36 | }
37 |
38 |
39 | #endif /* defined(__vl__nnfullyconnected__) */
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnnormalize.cu should be compiled instead"
3 | #endif
4 | #include "nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnnormalize.hpp:
--------------------------------------------------------------------------------
1 | // @file nnnormalize.hpp
2 | // @brief Normalization block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnnormalize__
14 | #define __vl__nnnormalize__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 |
21 | vl::ErrorCode
22 | nnlrn_forward(vl::Context& context,
23 | vl::Tensor output,
24 | vl::Tensor data,
25 | size_t normDepth,
26 | double kappa, double alpha, double beta) ;
27 |
28 | vl::ErrorCode
29 | nnlrn_backward(vl::Context& context,
30 | vl::Tensor derData,
31 | vl::Tensor data,
32 | vl::Tensor derOutput,
33 | size_t normDepth,
34 | double kappa, double alpha, double beta) ;
35 | }
36 |
37 | #endif /* defined(__vl__nnnormalize__) */
38 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnpooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnpooling.cu should be compiled instead"
3 | #endif
4 | #include "nnpooling.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnpooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling.hpp
2 | // @brief Pooling block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling__
14 | #define __vl__nnpooling__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 |
21 | enum PoolingMethod { vlPoolingMax, vlPoolingAverage } ;
22 |
23 | vl::ErrorCode
24 | nnpooling_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor data,
27 | PoolingMethod method,
28 | int poolHeight, int poolWidth,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | vl::ErrorCode
34 | nnpooling_backward(vl::Context& context,
35 | vl::Tensor derData,
36 | vl::Tensor data,
37 | vl::Tensor derOutput,
38 | PoolingMethod method,
39 | int poolHeight, int poolWidth,
40 | int strideY, int strideX,
41 | int padTop, int padBottom,
42 | int padLeft, int padRight) ;
43 | }
44 |
45 | #endif /* defined(__vl__nnpooling__) */
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnroipooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnroipooling.cu should be compiled instead"
3 | #endif
4 | #include "nnroipooling.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnroipooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnroipooling.hpp
2 | // @brief Spatial Pyramid block
3 | // @author Hakan Bilen
4 | // @author Abishek Dutta
5 | // @author Andrea Vedaldi
6 | /*
7 | Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef __vl__nnroipooling__
15 | #define __vl__nnroipooling__
16 |
17 | #include "data.hpp"
18 | #include
19 |
20 | namespace vl {
21 | enum ROIPoolingMethod { vlROIPoolingMax, vlROIPoolingAverage } ;
22 |
23 | vl::ErrorCode
24 | nnroipooling_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor data,
27 | vl::Tensor rois,
28 | ROIPoolingMethod method,
29 | int const subdivisions[2],
30 | double const transform[6]) ;
31 |
32 | vl::ErrorCode
33 | nnroipooling_backward(vl::Context& context,
34 | vl::Tensor derData,
35 | vl::Tensor data,
36 | vl::Tensor rois,
37 | vl::Tensor derOutput,
38 | ROIPoolingMethod method,
39 | int const subdivisions[2],
40 | double const transform[6]) ;
41 | }
42 |
43 | #endif /* defined(__vl__nnroipooling__) */
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnsubsample.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnsubsample.cu"
5 |
6 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/bits/nnsubsample.hpp:
--------------------------------------------------------------------------------
1 | // @file nnsubsample.hpp
2 | // @brief Subsamping block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnsubsample__
14 | #define __vl__nnsubsample__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nnsubsample_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor biases,
25 | int strideY, int strideX,
26 | int padTop, int padBottom,
27 | int padLeft, int padRight) ;
28 |
29 | vl::ErrorCode
30 | nnsubsample_backward(vl::Context& context,
31 | vl::Tensor derData,
32 | vl::Tensor derBiases,
33 | vl::Tensor derOutput,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 | }
38 |
39 | #endif /* defined(__vl__nnsubsample__) */
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_cudatool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_cudatool.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_imreadjpeg.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_imreadjpeg_old.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnconv.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnconvt.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconvt.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnpool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnpool.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_nnroipool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnroipool.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_taccummex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_taccummex.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/src/vl_tmove.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_tmove.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_euclideanloss.m:
--------------------------------------------------------------------------------
1 | function y = vl_euclideanloss(x, c, dzdy)
2 | %EUCLIDEANLOSS Summary of this function goes here
3 | % Detailed explanation goes here
4 |
5 | assert(numel(x) == numel(c));
6 |
7 | d = size(x);
8 |
9 | assert(all(d == size(c)));
10 |
11 | if nargin == 2 || (nargin == 3 && isempty(dzdy))
12 |
13 | y = 1 / 2 / prod(d) * sum(subsref((x - c) .^ 2, substruct('()', {':'}))); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m.
14 | % Y = 1 / (2 * prod(d(1 : 3))) * sum(subsref((X - c) .^ 2, substruct('()', {':'}))); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate.
15 |
16 | elseif nargin == 3 && ~isempty(dzdy)
17 |
18 | assert(numel(dzdy) == 1);
19 |
20 | y = dzdy / prod(d) * (x - c); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m.
21 | % Y = dzdy / prod(d(1 : 3)) * (X - c); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate.
22 |
23 | end
24 |
25 | end
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnconcat.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin)
2 | %VL_NNCONCAT CNN concatenate multiple inputs.
3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell
4 | % array INPUTS along dimension DIM generating an output Y.
5 | %
6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives
7 | % of the block projected onto DZDY. DZDINPUTS has one element for
8 | % each element of INPUTS, each of which is an array that has the same
9 | % dimensions of the corresponding array in INPUTS.
10 |
11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
12 | % All rights reserved.
13 | %
14 | % This file is part of the VLFeat library and is made available under
15 | % the terms of the BSD license (see the COPYING file).
16 |
17 | opts.inputSizes = [] ;
18 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
19 |
20 | if nargin < 2, dim = 3; end;
21 | if nargin < 3, dzdy = []; end;
22 |
23 | if isempty(dzdy)
24 | y = cat(dim, inputs{:});
25 | else
26 | if isempty(opts.inputSizes)
27 | opts.inputSizes = cellfun(@(inp) [size(inp,1),size(inp,2),size(inp,3),size(inp,4)], inputs, 'UniformOutput', false) ;
28 | end
29 | start = 1 ;
30 | y = cell(1, numel(opts.inputSizes)) ;
31 | s.type = '()' ;
32 | s.subs = {':', ':', ':', ':'} ;
33 | for i = 1:numel(opts.inputSizes)
34 | stop = start + opts.inputSizes{i}(dim) ;
35 | s.subs{dim} = start:stop-1 ;
36 | y{i} = subsref(dzdy,s) ;
37 | start = stop ;
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nncrop.m:
--------------------------------------------------------------------------------
1 | function y = vl_nncrop(x, crop, dzdy, inputSize)
2 | %VL_NNCROP CNN crop.
3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the
4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT].
5 | %
6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the
7 | % function projected on the output derivative DZDY. DZDX has the same
8 | % dimension as X and DZDY the same dimension as Y.
9 | %
10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to
11 | % the previous call in which X is omitted and its size is passed as
12 | % INPUTSIZE.
13 |
14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi.
15 | % All rights reserved.
16 | %
17 | % This file is part of the VLFeat library and is made available under
18 | % the terms of the BSD license (see the COPYING file).
19 |
20 | if nargin < 4
21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ;
22 | else
23 | sz = inputSize ;
24 | end
25 |
26 | sv = 1 + crop(1) : sz(1) - crop(2) ;
27 | su = 1 + crop(3) : sz(2) - crop(4) ;
28 |
29 | if nargin <= 2 || isempty(dzdy)
30 | y = x(sv, su, :, :) ;
31 | else
32 | if isa(dzdy, 'gpuArray')
33 | y = gpuArray.zeros(sz, classUnderlying(dzdy)) ;
34 | else
35 | y = zeros(sz, class(dzdy)) ;
36 | end
37 | y(sv, su, :, :) = dzdy ;
38 | end
39 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnnoffset.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnoffset(x, param, dzdy)
2 | %VL_NNNOFFSET CNN norm-dependent offset.
3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the
4 | % weighted norm of the feature channels:
5 | %
6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2)
7 | %
8 | % where
9 | %
10 | % L(i,j) = sum_K X(i,j,k)^2
11 | %
12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the
13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
14 | % as X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | L = sum(x.^2,3) ;
23 | L = max(L, 1e-8) ;
24 |
25 | if nargin <= 2
26 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ;
27 | else
28 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnnormalize.m:
--------------------------------------------------------------------------------
1 | %VL_NNNORMALIZE CNN Local Response Normalization (LRN)
2 | % Y = VL_NNORMALIZE(X, PARAM) computes the so-called Local Response
3 | % Normalization (LRN) operator. This operator performs a
4 | % channel-wise sliding window normalization of each column of the
5 | % input array X. The normalized output is given by:
6 | %
7 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA
8 | %
9 | % where the normalization factor is given by
10 | %
11 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2,
12 | %
13 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The
14 | % window Q(k) is defined as:
15 | %
16 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))].
17 | %
18 | % where D is the number of feature channels in X. Note in particular
19 | % that, by setting N >= 2D, the function can be used to normalize
20 | % all the channels as a single group (useful to achieve L2
21 | % normalization).
22 | %
23 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of
24 | % the block projected onto DZDY. DZDX and DZDY have the same
25 | % dimensions as X and Y respectively.
26 | %
27 | % **Remark:** Some CNN libraries (e.g. Caffe) use a slightly
28 | % different convention for the parameters of the LRN. Caffe in
29 | % particular uses the convention:
30 | %
31 | % PARAM_CAFFE = [N KAPPA N*ALPHA BETA]
32 | %
33 | % i.e. the ALPHA paramter is multiplied by N.
34 |
35 | % Copyright (C) 2014 Andrea Vedaldi.
36 | % All rights reserved.
37 | %
38 | % This file is part of the VLFeat library and is made available under
39 | % the terms of the BSD license (see the COPYING file).
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnormalizelp(x,dzdy,varargin)
2 | %VL_NNNORMALIZELP CNN Lp normalization
3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial
4 | % location in the array X:
5 | %
6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p)
7 | %
8 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the
9 | % function with respect to X projected onto DZDY.
10 | %
11 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options:
12 | %
13 | % `p`:: 2
14 | % The exponent of the Lp norm. Warning: currently only even
15 | % exponents are supported.
16 | %
17 | % `epsilon`:: 0.01
18 | % The constant added to the sum of p-powers before taking the
19 | % 1/p square root (see the formula above).
20 | %
21 | % `spatial`:: `false`
22 | % If `true`, sum along the two spatial dimensions instead of
23 | % along the feature channels.
24 | %
25 | % See also: VL_NNNORMALIZE().
26 |
27 | opts.epsilon = 1e-2 ;
28 | opts.p = 2 ;
29 | opts.spatial = false ;
30 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
31 |
32 | if ~opts.spatial
33 | massp = sum(x.^opts.p,3) + opts.epsilon ;
34 | else
35 | massp = sum(sum(x.^opts.p,1),2) + opts.epsilon ;
36 | end
37 | mass = massp.^(1/opts.p) ;
38 | y = bsxfun(@rdivide, x, mass) ;
39 |
40 | if nargin < 2 || isempty(dzdy)
41 | return ;
42 | else
43 | dzdy = bsxfun(@rdivide, dzdy, mass) ;
44 | if ~opts.spatial
45 | tmp = sum(dzdy .* x, 3) ;
46 | else
47 | tmp = sum(sum(dzdy .* x, 1),2);
48 | end
49 | y = dzdy - bsxfun(@times, tmp, bsxfun(@rdivide, x.^(opts.p-1), massp)) ;
50 | end
51 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnrelu.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnrelu(x,varargin)
2 | %VL_NNRELU CNN rectified linear unit.
3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data
4 | % X. X can have arbitrary size.
5 | %
6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block
7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
8 | % X and Y respectively.
9 | %
10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options:
11 | %
12 | % `Leak`:: 0
13 | % Set the leak factor, a non-negative number. Y is equal to X if
14 | % X is not smaller than zero; otherwise, Y is equal to X
15 | % multipied by the leak factor. By default, the leak factor is
16 | % zero; for values greater than that one obtains the leaky ReLU
17 | % unit.
18 | %
19 | % ADVANCED USAGE
20 | %
21 | % As a further optimization, in the backward computation it is
22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then
23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY).
24 | % This is useful because it means that the buffer X does not need to
25 | % be remembered in the backward pass.
26 |
27 | % Copyright (C) 2014-15 Andrea Vedaldi.
28 | % All rights reserved.
29 | %
30 | % This file is part of the VLFeat library and is made available under
31 | % the terms of the BSD license (see the COPYING file).
32 |
33 | if ~isempty(varargin) && ~ischar(varargin{1}) % passed in dzdy
34 | dzdy = varargin{1} ;
35 | varargin(1) = [] ;
36 | else
37 | dzdy = [] ;
38 | end
39 |
40 | opts.leak = 0 ;
41 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
42 |
43 | if opts.leak == 0
44 | if nargin <= 1 || isempty(dzdy)
45 | y = max(x, 0) ;
46 | else
47 | y = dzdy .* (x > 0) ;
48 | end
49 | else
50 | if nargin <= 1 || isempty(dzdy)
51 | y = x .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
52 | else
53 | y = dzdy .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnsigmoid.m:
--------------------------------------------------------------------------------
1 | function out = vl_nnsigmoid(x,dzdy)
2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit.
3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can
4 | % have an arbitrary size. The sigmoid is defined as follows:
5 | %
6 | % SIGMOID(X) = 1 / (1 + EXP(-X)).
7 | %
8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the
9 | % block projected onto DZDY. DZDX and DZDY have the same
10 | % dimensions as X and Y respectively.
11 |
12 | % Copyright (C) 2015 Karel Lenc.
13 | % All rights reserved.
14 | %
15 | % This file is part of the VLFeat library and is made available under
16 | % the terms of the BSD license (see the COPYING file).
17 |
18 | y = 1 ./ (1 + exp(-x));
19 |
20 | if nargin <= 1 || isempty(dzdy)
21 | out = y ;
22 | else
23 | out = dzdy .* (y .* (1 - y)) ;
24 | end
25 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnsoftmax.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmax(X,dzdY)
2 | %VL_NNSOFTMAX CNN softmax.
3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X
4 | % has dimension H x W x D x N, packing N arrays of W x H
5 | % D-dimensional vectors.
6 | %
7 | % D can be thought of as the number of possible classes and the
8 | % function computes the softmax along the D dimension. Often W=H=1,
9 | % but this is not a requirement, as the operator is applied
10 | % convolutionally at all spatial locations.
11 | %
12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block
13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
14 | % X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ;
23 | L = sum(E,3) ;
24 | Y = bsxfun(@rdivide, E, L) ;
25 |
26 | if nargin <= 1, return ; end
27 |
28 | % backward
29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ;
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_nnspnorm.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnspnorm(x, param, dzdy)
2 | %VL_NNSPNORM CNN spatial normalization.
3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of
4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and
5 | % PW define the size of the spatial neighbourhood used for
6 | % nomalization.
7 | %
8 | % For each feature channel, the function computes the sum of squares
9 | % of X inside each rectangle, N2(i,j). It then divides each element
10 | % of X as follows:
11 | %
12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA.
13 | %
14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the
15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
16 | % as X and Y respectively.
17 |
18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | pad = floor((param(1:2)-1)/2) ;
25 | pad = [pad ; param(1:2)-1-pad] ;
26 |
27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ;
28 | f = 1 + param(3) * n2 ;
29 |
30 | if nargin <= 2 || isempty(dzdy)
31 | y = f.^(-param(4)) .* x ;
32 | else
33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ;
34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ;
35 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_rootnn.m:
--------------------------------------------------------------------------------
1 | function root = vl_rootnn()
2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox.
3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox.
4 |
5 | % Copyright (C) 2014 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = fileparts(fileparts(mfilename('fullpath'))) ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_setupnn.m:
--------------------------------------------------------------------------------
1 | function vl_setupnn()
2 | %VL_SETUPNN Setup the MatConvNet toolbox.
3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path.
4 |
5 | % Copyright (C) 2014-15 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = vl_rootnn() ;
12 | addpath(fullfile(root, 'matlab')) ;
13 | addpath(fullfile(root, 'matlab', 'mex')) ;
14 | addpath(fullfile(root, 'matlab', 'simplenn')) ;
15 | addpath(fullfile(root, 'matlab', 'xtest')) ;
16 | addpath(fullfile(root, 'examples')) ;
17 |
18 | if ~exist('gather')
19 | warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ;
20 | addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ;
21 | end
22 |
23 | if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0
24 | warning('MatConvNet is not compiled. Consider running `vl_compilenn`.');
25 | end
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_taccum.m:
--------------------------------------------------------------------------------
1 | function a = vl_taccum(alpha, a, beta, b)
2 | %VL_TACCUM Compute A = alpha A + beta B
3 | % A = VL_TACCUM(ALPHA, A, BETA, B) computes efficiently A = alpha A
4 | % + beta B. For GPU arrays, it performs its computation in place, by
5 | % modifiying A without creating an additional copy.
6 |
7 | % Copyright (C) 2016 Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | if isscalar(a) || isscalar(b)
14 | a = alpha * a + beta * b ;
15 | return ;
16 | elseif isa(a, 'gpuArray')
17 | vl_taccummex(alpha, a, beta, b, 'inplace') ;
18 | else
19 | a = vl_taccummex(alpha, a, beta, b) ;
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/vl_tshow.m:
--------------------------------------------------------------------------------
1 | function vl_tshow(T, varargin)
2 | %VL_TSHOW Visualize a 4D tensor.
3 | % VL_TSHOW(T) shows the 4D tensor T in the current figure.
4 | %
5 | % The tensor is shown as a montage of 2D slices (e.g. filters), with the
6 | % 3rd dimension stacked along the rows and the 4th dimension along the
7 | % columns.
8 | %
9 | % VL_TSHOW(T, 'option', value, ...) accepts the following options:
10 | %
11 | % `labels`:: true
12 | % If true, labels the x/y axis of the montage.
13 | %
14 | % Any additional options are passed to IMAGESC (e.g. to set the parent
15 | % axes, or other properties).
16 |
17 | % Copyright (C) 2017 Joao F. Henriques.
18 | % All rights reserved.
19 | %
20 | % This file is part of the VLFeat library and is made available under
21 | % the terms of the BSD license (see the COPYING file).
22 |
23 | opts.labels = true ;
24 | [opts, varargin] = vl_argparse(opts, varargin, 'nonrecursive') ;
25 |
26 | assert((isnumeric(T) || islogical(T)) && ndims(T) <= 4, ...
27 | 'T must be a 4D numeric or logical tensor.') ;
28 |
29 | % Stack input channels along rows (merge 1st dim. with 3rd), and output
30 | % channels along columns (merge 2nd dim. with 4th), to form a 2D image
31 | sz = size(T) ;
32 | sz(end+1:4) = 1 ;
33 | T = reshape(permute(T, [1 3 2 4]), sz(1) * sz(3), sz(2) * sz(4)) ;
34 |
35 | % Display it
36 | h = imagesc(T, varargin{:}) ;
37 |
38 | ax = get(h, 'Parent') ;
39 | axis(ax, 'image') ;
40 |
41 | % Display grid between filters
42 | set(ax, 'XGrid', 'on', 'YGrid', 'on', 'GridAlpha', 1, ...
43 | 'TickLength', [0 0], 'XTickLabel', {}, 'YTickLabel', {}, ...
44 | 'YTick', sz(1) + 0.5 : sz(1) : sz(1) * sz(3) - 0.5, ...
45 | 'XTick', sz(2) + 0.5 : sz(2) : sz(2) * sz(4) - 0.5) ;
46 |
47 | if opts.labels
48 | xlabel(sprintf('Output channels (%i)', sz(4)), 'Parent', ax) ;
49 | ylabel(sprintf('Input channels (%i)', sz(3)), 'Parent', ax) ;
50 | end
51 |
52 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/cmyk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/matlab/xtest/cmyk.jpg
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < nntest
2 | properties
3 | x
4 | a
5 | b
6 | end
7 |
8 | properties (TestParameter)
9 | dim = {1 2 3 4}
10 | end
11 |
12 | methods (TestClassSetup)
13 | function data(test,device)
14 | test.x = test.randn(15,14,3,2) ;
15 | test.a = test.randn(15,14,3,2) ;
16 | test.b = test.randn(15,14,3,2) ;
17 | end
18 | end
19 |
20 | methods (Test)
21 | function data_and_parameters(test, dim)
22 | x = test.x ;
23 | a = test.a ;
24 | b = test.b ;
25 |
26 | a = sum(a, dim) ;
27 | b = sum(b, dim) ;
28 |
29 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
30 |
31 | output = scale.forward({x}, {a,b}) ;
32 | dzdy = test.randn(size(output{1})) ;
33 | [derInputs, derParams] = scale.backward({x}, {a,b}, {dzdy}) ;
34 |
35 | pick = @(x) x{1} ;
36 | dzdx = derInputs{1} ;
37 | dzda = derParams{1} ;
38 | dzdb = derParams{2} ;
39 |
40 | test.der(@(x) pick(scale.forward({x},{a,b})), x, dzdy, dzdx, 1e-2 * test.range) ;
41 | test.der(@(a) pick(scale.forward({x},{a,b})), a, dzdy, dzda, 1e-2 * test.range) ;
42 | test.der(@(b) pick(scale.forward({x},{a,b})), b, dzdy, dzdb, 1e-2 * test.range) ;
43 | end
44 |
45 | function data_only(test, dim)
46 | x = test.x ;
47 | a = test.a ;
48 | b = test.b ;
49 |
50 | a = sum(a, dim) ;
51 | b = sum(b, dim) ;
52 |
53 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
54 |
55 | output = scale.forward({x,a,b}, {}) ;
56 | dzdy = test.randn(size(output{1})) ;
57 | [derInputs, derParams] = scale.backward({x,a,b}, {}, {dzdy}) ;
58 |
59 | pick = @(x) x{1} ;
60 | dzdx = derInputs{1} ;
61 | dzda = derInputs{2} ;
62 | dzdb = derInputs{3} ;
63 |
64 | test.der(@(x) pick(scale.forward({x,a,b},{})), x, dzdy, dzdx, 1e-2 * test.range) ;
65 | test.der(@(a) pick(scale.forward({x,a,b},{})), a, dzdy, dzda, 1e-2 * test.range) ;
66 | test.der(@(b) pick(scale.forward({x,a,b},{})), b, dzdy, dzdb, 1e-2 * test.range) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnbnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnbnorm < nntest
2 | properties (TestParameter)
3 | rows = {2 8 13}
4 | cols = {2 8 17}
5 | numDims = {1 3 4}
6 | batchSize = {2 7}
7 | end
8 | methods (Test)
9 | function basic(test, rows, cols, numDims, batchSize)
10 | r = rows ;
11 | c = cols ;
12 | nd = numDims ;
13 | bs = batchSize ;
14 | x = test.randn(r, c, nd, bs) ;
15 | %g = test.randn(1, 1, nd, 1) ;
16 | %b = test.randn(1, 1, nd, 1) ;
17 | g = test.randn(nd, 1) / test.range ;
18 | b = test.randn(nd, 1) / test.range ;
19 |
20 | y = vl_nnbnorm(x,g,b) ;
21 | dzdy = test.randn(size(y)) ;
22 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ;
23 |
24 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-3) ;
25 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, 1e-2) ;
26 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, 1e-3) ;
27 | end
28 | end
29 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnconcat.m:
--------------------------------------------------------------------------------
1 | classdef nnconcat < nntest
2 | methods (Test)
3 | function basic(test)
4 | pick = @(i,x) x{i} ;
5 | sz = [4,5,10,3] ;
6 | for dim = 1:3
7 | sz1 = sz ; sz1(dim) = 3 ;
8 | sz2 = sz ; sz2(dim) = 7 ;
9 | sz3 = sz ; sz3(dim) = 2 ;
10 | x1 = test.randn(sz1) ;
11 | x2 = test.randn(sz2) ;
12 | x3 = test.randn(sz3) ;
13 |
14 | y = vl_nnconcat({x1, x2, x3}, dim) ;
15 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
16 | dzdy = test.randn(size(y)) ;
17 | dzdx = vl_nnconcat({x1, x2, x3} ,dim, dzdy) ;
18 |
19 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
20 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
21 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
22 | end
23 | end
24 |
25 | function by_size(test)
26 | pick = @(i,x) x{i} ;
27 | sz = [4,5,10,3] ;
28 | for dim = 1:3
29 | sz1 = sz ; sz1(dim) = 3 ;
30 | sz2 = sz ; sz2(dim) = 7 ;
31 | sz3 = sz ; sz3(dim) = 2 ;
32 | x1 = test.randn(sz1) ;
33 | x2 = test.randn(sz2) ;
34 | x3 = test.randn(sz3) ;
35 |
36 | y = vl_nnconcat({x1, x2, x3}, dim) ;
37 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
38 | dzdy = test.randn(size(y)) ;
39 | dzdx = vl_nnconcat({}, dim, dzdy, 'inputSizes', {sz1, sz2, sz3}) ;
40 |
41 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
42 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
43 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nndropout.m:
--------------------------------------------------------------------------------
1 | classdef nndropout < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(4,5,10,3) ;
5 | [y,mask] = vl_nndropout(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ;
8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ;
9 | end
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnmnist.m:
--------------------------------------------------------------------------------
1 | classdef nnmnist < nntest
2 | properties (TestParameter)
3 | networkType = {'simplenn', 'dagnn'}
4 | end
5 |
6 | methods (TestClassSetup)
7 | function init(test)
8 | addpath(fullfile(vl_rootnn, 'examples', 'mnist'));
9 | end
10 | end
11 |
12 | methods (Test)
13 | function valErrorRate(test, networkType)
14 | clear mex ; % will reset GPU, remove MCN to avoid crashing
15 | % MATLAB on exit (BLAS issues?)
16 | if strcmp(test.currentDataType, 'double'), return ; end
17 | rng(0); % fix random seed, for reproducible tests
18 | switch test.currentDevice
19 | case 'cpu'
20 | gpus = [];
21 | case 'gpu'
22 | gpus = 1;
23 | end
24 | trainOpts = struct('numEpochs', 1, 'continue', false, 'gpus', gpus, ...
25 | 'plotStatistics', false);
26 | if strcmp(networkType, 'simplenn')
27 | trainOpts.errorLabels = {'error', 'top5err'} ;
28 | end
29 | [~, info] = cnn_mnist('train', trainOpts, 'networkType', networkType);
30 | test.verifyLessThan(info.train.error, 0.08);
31 | test.verifyLessThan(info.val.error, 0.025);
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnnormalize.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalize < nntest
2 | properties (TestParameter)
3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17}
4 | sgroup = {2 3 4 5 6 7}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, group)
9 | param = [group, .1, .5, .75] ;
10 | x = test.randn(3,2,10,4) ;
11 | y = vl_nnnormalize(x,param) ;
12 | dzdy = test.rand(size(y))-0.5 ;
13 | dzdx = vl_nnnormalize(x,param,dzdy) ;
14 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ;
15 | end
16 |
17 | function compare_to_naive(test, sgroup)
18 | param = [sgroup, .1, .5, .75] ;
19 | x = test.randn(3,2,10,4) ;
20 | y = vl_nnnormalize(gather(x),param) ;
21 | y_ = test.zeros(size(y)) ;
22 | x_ = gather(x) ;
23 | for i=1:size(x,1)
24 | for j=1:size(x,2)
25 | for n=1:size(x,4)
26 | t = test.zeros(1,1,size(x,3),1) ;
27 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ...
28 | ones(param(1),1), 'same')).^(-param(4)) ;
29 | y_(i,j,:,n) = x_(i,j,:,n) .* t ;
30 | end
31 | end
32 | end
33 | test.eq(y,y_) ;
34 | end
35 |
36 | function l2(test)
37 | x = test.randn(1,1,10,1) ;
38 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ;
39 | test.eq(sum(y(:).^2), test.toDataType(1), 1e-2) ;
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalizelp < nntest
2 | properties (TestParameter)
3 | h = {1 2 3 4}
4 | w = {1 2 3 4}
5 | d = {2 3 4}
6 | p = {2 4}
7 | end
8 |
9 | methods (Test)
10 | function basicl2(test, h,w,d)
11 | x = test.randn(h,w,d,3) ;
12 | y = vl_nnnormalizelp(x) ;
13 | dzdy = test.rand(size(y))-0.5 ;
14 | dzdx = vl_nnnormalizelp(x,dzdy) ;
15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, 1e-4, 0.3) ;
16 | end
17 |
18 | function lp(test, p)
19 | x = test.randn(2,3,5,3) / test.range ;
20 | y = vl_nnnormalizelp(x, [], 'p', p) ;
21 | dzdy = test.rand(size(y))-0.5 ;
22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ;
23 | test.der(@(x) vl_nnnormalizelp(x,[],'p',p), x, dzdy, dzdx, 1e-4, 0.3) ;
24 | end
25 |
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnoffset.m:
--------------------------------------------------------------------------------
1 | classdef nnoffset < nntest
2 | methods (Test)
3 | function basic(test)
4 | param = [.34, .5] ;
5 | x = test.randn(4,5,10,3) ;
6 | y = vl_nnnoffset(x,param) ;
7 | dzdy = test.randn(size(y)) ;
8 | dzdx = vl_nnnoffset(x,param,dzdy) ;
9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ;
10 | end
11 | end
12 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnpdist.m:
--------------------------------------------------------------------------------
1 | classdef nnpdist < nntest
2 | properties (TestParameter)
3 | oneToOne = {false, true}
4 | noRoot = {false, true}
5 | p = {.5 1 2 3}
6 | aggregate = {false, true}
7 | end
8 | methods (Test)
9 | function basic(test,oneToOne, noRoot, p, aggregate)
10 | if aggregate
11 | % make it smaller to avoid numerical derivative issues with
12 | % float
13 | h = 2 ;
14 | w = 2 ;
15 | else
16 | h = 13 ;
17 | w = 17 ;
18 | end
19 | d = 4 ;
20 | n = 5 ;
21 | x = test.randn(h,w,d,n) ;
22 | if oneToOne
23 | x0 = test.randn(h,w,d,n) ;
24 | else
25 | x0 = test.randn(1,1,d,n) ;
26 | end
27 | opts = {'noRoot', noRoot, 'aggregate', aggregate} ;
28 |
29 | y = vl_nnpdist(x, x0, p, opts{:}) ;
30 |
31 | % make sure they are not too close in any dimension as this may be a
32 | % problem for the finite difference dereivatives as one could
33 | % approach 0 which is not differentiable for some p-norms
34 |
35 | s = abs(bsxfun(@minus, x, x0)) < test.range*1e-1 ;
36 | x(s) = x(s) + 5*test.range ;
37 |
38 | dzdy = test.rand(size(y)) ;
39 | [dzdx, dzdx0] = vl_nnpdist(x,x0,p,dzdy,opts{:}) ;
40 | test.der(@(x) vl_nnpdist(x,x0,p,opts{:}), x, dzdy, dzdx, test.range * 1e-3) ;
41 | if oneToOne
42 | % Pdist does not implement backprop of the bsxfun
43 | test.der(@(x0) vl_nnpdist(x,x0,p,opts{:}), x0, dzdy, dzdx0, test.range * 1e-3) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnrelu.m:
--------------------------------------------------------------------------------
1 | classdef nnrelu < nntest
2 | properties
3 | x
4 | end
5 |
6 | methods (TestClassSetup)
7 | function data(test,device)
8 | % make sure that all elements in x are different. in this way,
9 | % we can compute numerical derivatives reliably by adding a delta < .5.
10 | x = test.randn(15,14,3,2) ;
11 | x(:) = randperm(numel(x))' ;
12 | % avoid non-diff value for test
13 | x(x==0)=1 ;
14 | test.x = x ;
15 | test.range = 10 ;
16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
17 | end
18 | end
19 |
20 | methods (Test)
21 | function basic(test)
22 | x = test.x ;
23 | y = vl_nnrelu(x) ;
24 | dzdy = test.randn(size(y)) ;
25 | dzdx = vl_nnrelu(x,dzdy) ;
26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ;
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnroipool.m:
--------------------------------------------------------------------------------
1 | classdef nnroipool < nntest
2 | properties
3 | x
4 | end
5 |
6 | properties (TestParameter)
7 | method = {'avg', 'max'}
8 | subdivisions = {[1 1], [2 1], [1 2], [3 7], [16 16]}
9 | end
10 |
11 | methods (TestClassSetup)
12 | function data(test,device)
13 | % make sure that all elements in x are different. in this way,
14 | % we can compute numerical derivatives reliably by adding a delta < .5.
15 | x = test.randn(15,14,3,2) ;
16 | x(:) = randperm(numel(x))' ;
17 | test.x = x ;
18 | test.range = 10 ;
19 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
20 | end
21 | end
22 |
23 | methods (Test)
24 | function basic(test,method,subdivisions)
25 | R = [1 1 1 2 2 2 1 1 ;
26 | 0 1 2 0 1 2 1 1 ;
27 | 0 4 3 0 1 2 1 1 ;
28 | 15 5 6 15 4 2 9 0 ;
29 | 14 7 9 14 4 8 1 0] ;
30 | R = test.toDevice(test.toDataType(R)) ;
31 | x = test.x ;
32 | args = {'method', method, 'subdivisions', subdivisions} ;
33 | y = vl_nnroipool(x,R,args{:}) ;
34 | dzdy = test.randn(size(y)) ;
35 | dzdx = vl_nnroipool(x,R,dzdy,args{:}) ;
36 | test.der(@(x) vl_nnroipool(x,R,args{:}), ...
37 | x, dzdy, dzdx, test.range * 1e-2) ;
38 | end
39 |
40 | function identity(test,method)
41 | x = test.toDevice(test.toDataType((2:10)'*(1:10))) ;
42 | R = test.toDevice(test.toDataType([1, 1, 1, 9, 10])) ;
43 | T = [0 1 0 ; 1 0 0] ;
44 | opts = {'method', method, ...
45 | 'subdivisions', [9,10], ...
46 | 'transform', T} ;
47 | y = vl_nnroipool(x,R,opts{:}) ;
48 | test.eq(x,y) ;
49 | end
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnsigmoid.m:
--------------------------------------------------------------------------------
1 | classdef nnsigmoid < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(5,5,1,1)/test.range ;
5 | y = vl_nnsigmoid(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nnsigmoid(x,dzdy) ;
8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ;
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnsoftmax.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmax < nntest
2 | properties (TestParameter)
3 | h = {1 2 3}
4 | w = {1 2}
5 | end
6 | methods (Test)
7 | function basic(test,h,w)
8 | d = 10 ;
9 | n = 3 ;
10 | x = test.randn(h,w,d,n)/test.range ;
11 | y = vl_nnsoftmax(x) ;
12 | dzdy = test.randn(size(y)) ;
13 | dzdx = vl_nnsoftmax(x, dzdy) ;
14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ;
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmaxloss < nntest
2 | properties (TestParameter)
3 | weighed = {false true}
4 | multilab = {false true}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, multilab, weighed)
9 | C = 10 ;
10 | n = 3 ;
11 | if multilab
12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ;
13 | else
14 | c = reshape([7 2 1],1,1,1,[]) ;
15 | end
16 | if weighed
17 | c = cat(3, c, test.rand(size(c))) ;
18 | end
19 |
20 | % compare direct and indirect composition; this cannot
21 | % take large test.ranges
22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative
23 | y = vl_nnsoftmaxloss(x,c) ;
24 | if size(c,3) == 1
25 | opts = {'loss','log'} ;
26 | else
27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ;
28 | end
29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ;
30 | dzdy = test.randn(size(y)) ;
31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ;
33 | test.eq(y,y_) ;
34 | test.eq(dzdx,dzdx_) ;
35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ;
36 |
37 | % now larger input range
38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative
39 | y = vl_nnsoftmaxloss(x,c) ;
40 | dzdy = test.randn(size(y)) ;
41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ...
43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ;
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/suite/nnspnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnspnorm < nntest
2 | methods (Test)
3 | function basic(test)
4 | h = 13 ;
5 | w = 17 ;
6 | d = 4 ;
7 | n = 5 ;
8 | param = [3, 3, 0.1, 0.75] ;
9 | x = test.randn(h,w,d,n) ;
10 | y = vl_nnspnorm(x, param) ;
11 | dzdy = test.rand(h, w, d, n) ;
12 | dzdx = vl_nnspnorm(x, param, dzdy) ;
13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_bench_bnorm.m:
--------------------------------------------------------------------------------
1 | function vl_bench_bnorm(gpu)
2 | if nargin < 1
3 | gpu = false ;
4 | end
5 |
6 | T = 100 ;
7 | x = randn(64,64,32,32,'single') ;
8 | g = randn(32,1,'single') ;
9 | b = randn(32,1,'single') ;
10 |
11 | if gpu
12 | x = gpuArray(x) ;
13 | g = gpuArray(g) ;
14 | b = gpuArray(b) ;
15 | end
16 |
17 | tic
18 | for t=1:T
19 | y = vl_nnbnorm(x,g,b) ;
20 | end
21 | if gpu, wait(gpuDevice) ; end
22 | fprintf('new: %f\n',toc);
23 |
24 | tic
25 | for t=1:T
26 | y_ = vl_nnbnorm_old(x,g,b) ;
27 | end
28 | if gpu, wait(gpuDevice) ; end
29 | fprintf('old: %f\n',toc);
30 |
31 | dzdy = randn(size(y),'single') ;
32 | if gpu
33 | dzdy = gpuArray(dzdy) ;
34 | end
35 |
36 | tic
37 | for t=1:T
38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ;
39 | end
40 | if gpu, wait(gpuDevice) ; end
41 | fprintf('new deriv: %f\n',toc);
42 |
43 | tic
44 | for t=1:T
45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ;
46 | end
47 | if gpu, wait(gpuDevice) ; end
48 | fprintf('old deriv: %f\n',toc);
49 |
50 | vl_testsim(y,y_);
51 | vl_testsim(a,a_);
52 | vl_testsim(b,b_);
53 | vl_testsim(c,c_);
54 | end
55 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_bench_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | % VL_BENCH_IMREADJPEG Evaluates the speed of imreadjpeg
2 |
3 | numThreads = 4 ;
4 | base = 'data/bench-imreadjpeg' ;
5 |
6 | files = {} ;
7 | files = dir(fullfile(base,'*.jpg')) ;
8 | files = fullfile(base, {files.name}) ;
9 | if numel(files) > 256, files = files(1:256) ; end
10 |
11 | for preallocate = [true, false]
12 | opts={'verbose','verbose', 'preallocate', preallocate} ;
13 | for t=1:4
14 | % simple read
15 | fprintf('direct read single thread\n') ;
16 | clear ims ;
17 | tic ;
18 | ims = vl_imreadjpeg(files, 'numThreads', 1, opts{:}) ;
19 | directSingle(t) = toc ;
20 | fprintf(' done\n') ;
21 | pause(1) ;
22 |
23 | % simple read
24 | fprintf('direct read multi thread\n') ;
25 | clear ims ;
26 | tic ;
27 | ims = vl_imreadjpeg(files, 'numThreads', numThreads, opts{:}) ;
28 | direct(t) = toc ;
29 | fprintf(' done\n') ;
30 | pause(1) ;
31 |
32 | % threaded read
33 | fprintf('issue prefetch\n') ;
34 | tic ;
35 | vl_imreadjpeg(files, 'prefetch', opts{:}) ;
36 | prefetch(t) = toc ;
37 | fprintf(' done [pause 6]\n') ;
38 | pause(6)
39 |
40 | fprintf('prefetched read\n') ;
41 | clear ims_ ; % do not accoutn for the time requried to delete this
42 | tic ;
43 | ims_ = vl_imreadjpeg(files, opts{:}) ;
44 | indirect(t) = toc ;
45 | pause(1) ;
46 | end
47 |
48 | n = numel(ims) ;
49 | fprintf('** test results preallcoate %d\n', preallocate) ;
50 | fprintf('\tsingle tread: %.1f pm %.1f\n', mean(n./directSingle), std(n./directSingle)) ;
51 | fprintf('\t%d threads: %.1f pm %.1f\n', numThreads, mean(n./direct), std(n./direct)) ;
52 | fprintf('\tissue prefetch: %.1f pm %.1f\n', mean(n./prefetch), std(n./prefetch)) ;
53 | fprintf('\tretrieve prefetched: %.1f pm %.1f\n', mean(n./indirect), std(n./indirect)) ;
54 | fprintf('\n\n') ;
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_test_bnorm.m:
--------------------------------------------------------------------------------
1 | %%
2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with
3 | % using VLFEAT
4 | %%
5 |
6 | gpu = false;
7 | gpu = true ;
8 |
9 | T = 1 ;
10 | x = randn(64,64,32,32,'single') ;
11 | g = randn(32,1,'single') ;
12 | b = randn(32,1,'single') ;
13 |
14 | if gpu
15 | x = gpuArray(x) ;
16 | g = gpuArray(g) ;
17 | b = gpuArray(b) ;
18 | end
19 |
20 | a=vl_nnbnorm(x,g,b);
21 | a_=vl_nnbnorm_old(x,g,b);
22 |
23 | vl_testsim(a,a_)
24 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_test_economic_relu.m:
--------------------------------------------------------------------------------
1 | % VL_TEST_ECONOMIC_RELU
2 | function vl_test_economic_relu()
3 |
4 | x = randn(11,12,8,'single');
5 | w = randn(5,6,8,9,'single');
6 | b = randn(1,9,'single') ;
7 |
8 | net.layers{1} = struct('type', 'conv', ...
9 | 'filters', w, ...
10 | 'biases', b, ...
11 | 'stride', 1, ...
12 | 'pad', 0);
13 | net.layers{2} = struct('type', 'relu') ;
14 |
15 | res = vl_simplenn(net, x) ;
16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ;
17 | clear res ;
18 |
19 | res_ = vl_simplenn(net, x, dzdy) ;
20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ;
21 |
22 | a=whos('res_') ;
23 | b=whos('res__') ;
24 | assert(a.bytes > b.bytes) ;
25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ;
26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ;
27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ;
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_test_gpureset.m:
--------------------------------------------------------------------------------
1 | for explictMexReset = [false]
2 |
3 | % reset the same GPU device
4 | for t = 1:6
5 | if explictMexReset, clear mex ; end
6 | if mod(t-1,2) == 0
7 | disp('vl_test_gpureset: resetting GPU') ;
8 | gpuDevice(1) ;
9 | else
10 | disp('vl_test_gpureset: not resetting GPU') ;
11 | end
12 | if t > 1, disp(a) ; end
13 | a = gpuArray(single(ones(10))) ;
14 | b = gpuArray(single(ones(5))) ;
15 | c = vl_nnconv(a,b,[],'nocudnn') ;
16 | end
17 |
18 | % resetting GPU arguments to a MEX file should fail properly
19 | a = gpuArray(single(ones(10))) ;
20 | b = gpuArray(single(ones(5))) ;
21 | c = vl_nnconv(a,b,[],'nocudnn') ;
22 |
23 | gpuDevice(1) ;
24 | disp(a) ;
25 | try
26 | c = vl_nnconv(a,b,[],'nocudnn') ;
27 | catch e
28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ;
29 | end
30 |
31 | % switch GPU devices
32 | if gpuDeviceCount > 1
33 | disp('vl_text_gpureset: test switching GPU device') ;
34 | for t = 1:gpuDeviceCount
35 | if explictMexReset, clear mex ; end
36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ;
37 | gpuDevice(t) ;
38 | a = gpuArray(single(ones(10))) ;
39 | b = gpuArray(single(ones(5))) ;
40 | c = vl_nnconv(a,b,[],'nocudnn') ;
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_test_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | function vl_test_imreadjpeg
2 | % VL_TEST_IMREADJPEG
3 |
4 | % Test basic file reading capability
5 | for t=1:6
6 | files{t} = which(sprintf('office_%d.jpg', t)) ;
7 | end
8 | ims = vl_imreadjpeg(files) ;
9 |
10 | % Test reading a CMYK image
11 | ims_cmyk = vl_imreadjpeg({which('cmyk.jpg')}) ;
12 |
13 | ims = vl_imreadjpeg(files) ;
14 | assert(all(~cellfun(@isempty, ims)), 'Imagae Files not loaded.');
15 |
16 | % Test inserting a non-image file
17 | files_ = files ;
18 | files_{3} = [mfilename('fullpath') '.m'];
19 | ims_ = vl_imreadjpeg(files_) ;
20 | for t=setdiff(1:6,3)
21 | assert(isequal(ims{t},ims_{t})) ;
22 | end
23 |
24 | % Test inserting a non-esiting file
25 | files__ = files_ ;
26 | files__{4} = 'idontexist.jpg' ;
27 | ims__ = vl_imreadjpeg(files__) ;
28 | for t=setdiff(1:6,[3 4])
29 | assert(isequal(ims{t},ims__{t})) ;
30 | end
31 |
32 | for n = 1:4
33 | % Test prefetching
34 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ;
35 | ims___ = vl_imreadjpeg(files) ;
36 | assert(isequal(ims,ims___)) ;
37 |
38 | % Hardening: test prefetching, clearing mex, fetching
39 | vl_imreadjpeg(files,'prefetch') ;
40 | clear mex ;
41 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ;
42 | assert(isequal(ims,ims___)) ;
43 | end
44 |
45 | ims = vl_imreadjpeg(files) ;
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/matlab/xtest/vl_test_print.m:
--------------------------------------------------------------------------------
1 | function vl_test_print(varargin)
2 |
3 | addpath(fullfile(vl_rootnn(), 'examples', 'mnist'));
4 |
5 | net = cnn_mnist_init('networkType', 'dagnn');
6 | net.print(varargin{:});
7 |
8 | end
9 |
10 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/get-file.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | local_dir="$1"
4 | url="$2"
5 |
6 | function get_filename_from_url() {
7 | regexp='^([^\/]*\/)+'
8 | echo -n "$1" | sed -r "s/$regexp//g"
9 | }
10 |
11 | function get_remote_file_size() {
12 | curl -sI "$1" | grep Content-Length | grep -o '[0-9][0-9]*'
13 | }
14 |
15 | filename=$(get_filename_from_url "$url")
16 | local_path="$local_dir/$filename"
17 | remote_size=$(get_remote_file_size "$url")
18 |
19 | echo "Getting: $url"
20 | echo " File: $filename"
21 | echo " Local file path: $local_path"
22 | echo " Remote file size: $remote_size"
23 |
24 | if [ -e "$local_path" ]
25 | then
26 | local_size=$(stat -c%s "$local_path")
27 | echo " Local file size: $local_size"
28 | if [[ "$local_size" -eq "$remote_size" ]]
29 | then
30 | echo " Local and remote file sizes match: not downloading"
31 | exit 0
32 | else
33 | echo " Trying to resume partial download"
34 | if curl -f -C - -o "$local_path" "$url"
35 | then
36 | echo " Download completed successfully"
37 | exit 0
38 | else
39 | echo " Could not resume"
40 | fi
41 | fi
42 | fi
43 |
44 | echo " Downloading the whole file"
45 | curl -f -o "$local_path" "$url"
46 | exit $?
47 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/import-googlenet.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # brief: Import various CNN models from the web
3 | # author: Karel Lenc and Andrea Vedaldi
4 |
5 | # Models are written to /data/models
6 | # You can delete /data/tmp after conversion
7 |
8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?)
9 |
10 | overwrite=yes
11 |
12 | CAFFE_URL=http://dl.caffe.berkeleyvision.org/
13 | GOOGLENET_PROTO_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/train_val_googlenet.prototxt
14 | GOOGLENET_MODEL_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_googlenet.caffemodel
15 | GOOGLENET_MEAN_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_mean.binaryproto
16 |
17 | # Obtain the path of this script
18 | pushd `dirname $0` > /dev/null
19 | SCRIPTPATH=`pwd`
20 | popd > /dev/null
21 |
22 | #converter="python -m pdb $SCRIPTPATH/import-caffe.py"
23 | converter="python $SCRIPTPATH/import-caffe.py"
24 | data="$SCRIPTPATH/../data/models-import"
25 |
26 | mkdir -pv "$data/tmp/googlenet"
27 |
28 | function get()
29 | {
30 | "$SCRIPTPATH/get-file.sh" "$data/tmp/googlenet" "$1"
31 | }
32 |
33 | # --------------------------------------------------------------------
34 | # GoogLeNet
35 | # --------------------------------------------------------------------
36 |
37 | get "$CAFFE_URL/caffe_ilsvrc12.tar.gz"
38 | (cd "$data/tmp/googlenet" ; tar xzvf caffe_ilsvrc12.tar.gz)
39 |
40 | get "$GOOGLENET_PROTO_URL"
41 | get "$GOOGLENET_MODEL_URL"
42 | get "$GOOGLENET_MEAN_URL"
43 |
44 | (
45 | cd "$data/tmp/googlenet" ;
46 | cp -v train_val_googlenet.prototxt train_val_googlenet_patched.prototxt
47 | patch -Np0 < "$SCRIPTPATH/proto/googlenet_prototxt_patch.diff"
48 | )
49 |
50 | base="$data/tmp/googlenet"
51 | out="$data/imagenet-googlenet-dag.mat"
52 |
53 | if test -f "$out" -a -z "$overwrite"
54 | then
55 | echo "$out exists; skipping."
56 | else
57 | $converter \
58 | --caffe-variant=caffe_0115 \
59 | --preproc=vgg-caffe \
60 | --remove-dropout \
61 | --remove-loss \
62 | --append-softmax="cls3_fc" \
63 | --average-image="$base/imagenet_mean.binaryproto" \
64 | --synsets="$base/synset_words.txt" \
65 | --caffe-data="$base/imagenet_googlenet.caffemodel" \
66 | "$base/train_val_googlenet_patched.prototxt" \
67 | "$out"
68 | fi
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/import-resnet.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # brief: Import various CNN models from the web
3 | # author: Karel Lenc and Andrea Vedaldi
4 |
5 | # Models are written to /data/models
6 | # You can delete /data/tmp after conversion
7 |
8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?)
9 |
10 | overwrite=yes
11 |
12 | CAFFE_URL=http://dl.caffe.berkeleyvision.org/
13 | RESNET_URL=http://research.microsoft.com/en-us/um/people/kahe/resnet/models.zip
14 |
15 | # Obtain the path of this script
16 | pushd `dirname $0` > /dev/null
17 | SCRIPTPATH=`pwd`
18 | popd > /dev/null
19 |
20 | converter="python $SCRIPTPATH/import-caffe.py"
21 | data="$SCRIPTPATH/../data/models-import"
22 |
23 | mkdir -pv "$data/tmp/resnet"
24 |
25 | function get()
26 | {
27 | "$SCRIPTPATH/get-file.sh" "$data/tmp/resnet" "$1"
28 | }
29 |
30 | # --------------------------------------------------------------------
31 | # Resnet
32 | # --------------------------------------------------------------------
33 |
34 | get "$CAFFE_URL/caffe_ilsvrc12.tar.gz"
35 | (cd "$data/tmp/resnet" ; tar xzvf caffe_ilsvrc12.tar.gz)
36 |
37 | get "$RESNET_URL"
38 | (cd "$data/tmp/resnet" ; unzip -n models.zip)
39 |
40 | for t in 50 101 152
41 | do
42 | base="$data/tmp/resnet"
43 | out="$data/imagenet-resnet-$t-dag.mat"
44 | cdata=--caffe-data="$base/ResNet-$t-model.caffemodel"
45 |
46 | if test -f "$out" -a -z "$overwrite"
47 | then
48 | echo "$out exists; skipping."
49 | else
50 | $converter \
51 | --caffe-variant=caffe_b590f1d \
52 | --preproc=vgg-caffe \
53 | --remove-dropout \
54 | --remove-loss \
55 | --average-image="$base/ResNet_mean.binaryproto" \
56 | --synsets="$base/synset_words.txt" \
57 | $cdata \
58 | "$base/ResNet-$t-deploy.prototxt" \
59 | "$out"
60 | fi
61 | done
62 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/proto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hanyoseob/framing-u-net/69bb7788fe4d9c9582c1aea8107b669b5c93ad5a/matconvnet-1.0-beta24/utils/proto/__init__.py
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/proto/get-protos.sh:
--------------------------------------------------------------------------------
1 | # FCN
2 | wget -nc "https://raw.githubusercontent.com/longjon/caffe/6e3916766c6b63bff07e2cfadf210ee5e46af807/src/caffe/proto/caffe.proto" --output-document=./caffe_6e3916.proto
3 | protoc ./caffe_6e3916.proto --python_out=./
4 |
5 | # b590f1d (ResNet)
6 | wget -nc "https://raw.githubusercontent.com/BVLC/caffe/b590f1d27eb5cbd9bc7b9157d447706407c68682/src/caffe/proto/caffe.proto" --output-document=./caffe_b590f1d.proto
7 | protoc ./caffe_b590f1d.proto --python_out=./
8 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/simplenn_caffe_testdeploy.m:
--------------------------------------------------------------------------------
1 | % Compare basic MatConvNet and Caffe blocks numerically
2 | rootpath = fileparts(fileparts(mfilename('fullpath')));
3 | run(fullfile(rootpath, 'matlab', 'vl_setupnn.m'));
4 |
5 | inputScale = 100;
6 |
7 | caffemodel = fullfile('data', 'tmp_caffemodels', 'test_model');
8 | [~,~,~] = mkdir('data');
9 | [~,~,~] = mkdir(fileparts(caffemodel));
10 |
11 | %%
12 | layers = {};
13 | layers{end+1} = struct(...
14 | 'name', 'conv', ...
15 | 'type', 'conv', ...
16 | 'stride', [2, 2], ...
17 | 'pad', [1, 1, 1, 1], ...
18 | 'weights', {{rand(3, 3, 10, 5, 'single'), rand(5, 1, 'single')}});
19 |
20 | layers{end+1} = struct(...
21 | 'name', 'relu', ...
22 | 'type', 'relu');
23 |
24 | layers{end+1} = struct(...
25 | 'name', 'norm', ...
26 | 'type', 'normalize', ...
27 | 'param', [5, 1, 2e-5, 0.75]);
28 |
29 | layers{end+1} = struct(...
30 | 'name', 'softmax', ...
31 | 'type', 'softmax');
32 |
33 | %%
34 | net_ = struct();
35 | net_.meta.normalization.imageSize = [20, 20, 10];
36 | net_.meta.normalization.averageImage = rand(1, 1, 10);
37 |
38 | diffStats = zeros(numel(layers), 3);
39 | for li = 1:numel(layers)
40 | net_.layers = layers(li);
41 | layerName = layers{li}.name;
42 | simplenn_caffe_deploy(net_, caffemodel, 'doTest', false, ...
43 | 'outputBlobName', layerName, 'silent', true);
44 | res = simplenn_caffe_compare(net_, caffemodel, [], ...
45 | 'randScale', inputScale, 'silent', true);
46 | diffStats(li, :) = res.(layerName);
47 | end
48 |
49 | fprintf('Results: \n');
50 | layerNames = cellfun(@(l) l.name, layers, 'UniformOutput', false);
51 | fprintf('Layer %s\n', sprintf('% 10s', layerNames{:}));
52 | fprintf('MeanErr %s\n', sprintf('% 10.2e', diffStats(:, 2)));
53 | fprintf('MaxErr %s\n', sprintf('% 10.2e', diffStats(:, 3)));
54 |
55 | rmdir(fileparts(caffemodel), 's');
56 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/test_examples.m:
--------------------------------------------------------------------------------
1 | function test_examples()
2 | %TEST_EXAMPLES Test some of the examples in the `examples/` directory
3 |
4 | addpath examples/mnist ;
5 | addpath examples/cifar ;
6 |
7 | trainOpts.gpus = [] ;
8 | trainOpts.continue = true ;
9 | num = 1 ;
10 |
11 | exps = {} ;
12 | for networkType = {'dagnn', 'simplenn'}
13 | for index = 1:4
14 | clear ex ;
15 | ex.trainOpts = trainOpts ;
16 | ex.networkType = char(networkType) ;
17 | ex.index = index ;
18 | exps{end+1} = ex ;
19 | end
20 | end
21 |
22 | if num > 1
23 | if isempty(gcp('nocreate')),
24 | parpool('local',num) ;
25 | end
26 | parfor e = 1:numel(exps)
27 | test_one(exps{e}) ;
28 | end
29 | else
30 | for e = 1:numel(exps)
31 | test_one(exps{e}) ;
32 | end
33 | end
34 |
35 | % ------------------------------------------------------------------------
36 | function test_one(ex)
37 | % -------------------------------------------------------------------------
38 |
39 | suffix = ['-' ex.networkType] ;
40 | switch ex.index
41 | case 1
42 | cnn_mnist(...
43 | 'expDir', ['data/test-mnist' suffix], ...
44 | 'batchNormalization', false, ...
45 | 'networkType', ex.networkType, ...
46 | 'train', ex.trainOpts) ;
47 |
48 | case 2
49 | cnn_mnist(...
50 | 'expDir', ['data/test-mnist-bnorm' suffix], ...
51 | 'batchNormalization', true, ...
52 | 'networkType', ex.networkType, ...
53 | 'train', ex.trainOpts) ;
54 |
55 | case 3
56 | cnn_cifar(...
57 | 'expDir', ['data/test-cifar-lenet' suffix], ...
58 | 'modelType', 'lenet', ...
59 | 'networkType', ex.networkType, ...
60 | 'train', ex.trainOpts) ;
61 |
62 | case 4
63 | cnn_cifar(...
64 | 'expDir', ['data/test-cifar-nin' suffix], ...
65 | 'modelType', 'nin', ...
66 | 'networkType', ex.networkType, ...
67 | 'train', ex.trainOpts) ;
68 | end
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta24/utils/tidy_ref_models.m:
--------------------------------------------------------------------------------
1 | function tidy_ref_models()
2 | % Update reference models to latest MatConvNet version
3 |
4 | run(fullfile(fileparts(mfilename('fullpath')), '..', 'matlab', 'vl_setupnn.m')) ;
5 |
6 | models = {...
7 | 'imagenet-resnet-152-dag', ...
8 | 'imagenet-resnet-101-dag', ...
9 | 'imagenet-resnet-50-dag', ...
10 | 'imagenet-matconvnet-alex', ...
11 | 'imagenet-matconvnet-vgg-f', ...
12 | 'imagenet-matconvnet-vgg-m', ...
13 | 'imagenet-matconvnet-vgg-m', ...
14 | 'imagenet-matconvnet-vgg-s', ...
15 | 'imagenet-matconvnet-vgg-verydeep-16', ...
16 | 'imagenet-caffe-ref', ...
17 | 'imagenet-caffe-alex', ...
18 | 'imagenet-vgg-s', ...
19 | 'imagenet-vgg-m', ...
20 | 'imagenet-vgg-f', ...
21 | 'imagenet-vgg-m-128', ...
22 | 'imagenet-vgg-m-1024', ...
23 | 'imagenet-vgg-m-2048', ...
24 | 'imagenet-vgg-verydeep-19', ...
25 | 'imagenet-vgg-verydeep-16', ...
26 | 'imagenet-googlenet-dag', ...
27 | 'pascal-fcn16s-dag', ...
28 | 'pascal-fcn32s-dag', ...
29 | 'pascal-fcn8s-dag', ...
30 | 'pascal-fcn8s-tvg-dag', ...
31 | 'vgg-face', ...
32 | 'fast-rcnn-caffenet-pascal07-dagnn', ...
33 | 'fast-rcnn-vggm1k-pascal07-dagnn', ...
34 | 'fast-rcnn-vgg16-pascal07-dagnn', ...
35 | } ;
36 |
37 | mkdir(fullfile('data', 'models')) ;
38 |
39 | for i = 1:numel(models)
40 | inPath = fullfile('data', 'models-import', [models{i} '.mat']) ;
41 | outPath = fullfile('data', 'models', [models{i} '.mat']) ;
42 | if exist(outPath), continue ; end
43 |
44 | fprintf('%s: loading ''%s''\n', mfilename, inPath) ;
45 | net = load(inPath) ;
46 | % Cannot use isa('dagnn.DagNN') because it is not an object yet
47 | isDag = isfield(net, 'params') ;
48 |
49 | if isDag
50 | net = dagnn.DagNN.loadobj(net) ;
51 | net = net.saveobj() ;
52 | else
53 | net = vl_simplenn_tidy(net) ;
54 | end
55 |
56 | fprintf('%s: saving ''%s''\n', mfilename, outPath) ;
57 | save(fullfile('data', 'models', [models{i} '.mat']), '-struct', 'net') ;
58 | end
59 |
--------------------------------------------------------------------------------