may not be used to endorse or promote products derived
11 | from this software without specific prior written permission.
12 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 | IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
--------------------------------------------------------------------------------
/matconvnet/Makefile.mex:
--------------------------------------------------------------------------------
1 | # Compile using only MEX. The CUDA version must match MATLAB's.
2 |
3 | # Prefer .cu over .cpp and .c when GPU is enabled; this rule must come
4 | # before the following ones.
5 |
6 | ifneq ($(ENABLE_GPU),)
7 |
8 | matlab/mex/.build/%.o : matlab/src/bits/%.cu matlab/mex/.build/.stamp
9 | MW_NVCC_PATH='$(NVCC)' \
10 | $(MEX) -c $(MEXFLAGS_GPU) "$(<)" $(nvcc_filter)
11 | mv -f "$(notdir $(@))" "$(@)"
12 |
13 | matlab/mex/%.mex$(MEXARCH) : matlab/src/%.cu $(cpp_tgt) $(cu_tgt)
14 | MW_NVCC_PATH='$(NVCC)' \
15 | $(MEX) $(MEXFLAGS_GPU) "$(<)" -output "$(@)" $(cpp_tgt) $(cu_tgt) $(nvcc_filter)
16 |
17 | endif
18 |
19 | matlab/mex/.build/%.o : matlab/src/bits/%.cpp matlab/mex/.build/.stamp
20 | $(MEX) -c $(MEXFLAGS) "$(<)"
21 | mv -f "$(notdir $(@))" "$(@)"
22 |
23 | matlab/mex/%.mex$(MEXARCH) : matlab/src/%.cpp $(cpp_tgt)
24 | $(MEX) $(MEXFLAGS) "$(<)" -output "$(@)" $(cpp_tgt)
25 |
26 |
--------------------------------------------------------------------------------
/matconvnet/Makefile.nvcc:
--------------------------------------------------------------------------------
1 | # Compile using a mix of NVCC and MEX In particular, it compiles MEX
2 | # files using NVCC then MEX; in this manner the CUDA Devkit needs not
3 | # matching MATLAB version.
4 |
5 | # Prefer .cu over .cpp and .c when GPU is enabled; this rule must come
6 | # before the following ones.
7 |
8 | ifneq ($(ENABLE_GPU),)
9 |
10 | matlab/mex/.build/%.o : matlab/src/bits/%.cu matlab/mex/.build/.stamp
11 | $(NVCC) $(NVCCFLAGS) \
12 | "$(<)" -c -o "$(@)" $(nvcc_filter)
13 |
14 | matlab/mex/%.mex$(MEXARCH) : matlab/src/%.cu $(cpp_tgt) $(cu_tgt)
15 | $(NVCC) $(NVCCFLAGS) -Xcompiler -fPIC \
16 | "$(<)" -c -o "matlab/mex/.build/$(*).o" $(nvcc_filter)
17 | $(MEX) $(MEXFLAGS_NVCC) "matlab/mex/.build/$(*).o" -output "$(@)" $(cpp_tgt) $(cu_tgt)
18 |
19 | endif
20 |
21 | matlab/mex/.build/%.o : matlab/src/bits/%.cpp matlab/mex/.build/.stamp
22 | $(MEX) -c $(MEXFLAGS) "$(<)"
23 | mv -f "$(notdir $(@))" "$(@)"
24 |
25 | matlab/mex/%.mex$(MEXARCH) : matlab/src/%.cpp $(cpp_tgt)
26 | $(MEX) $(MEXFLAGS) "$(<)" -output "$(@)" $(cpp_tgt)
27 |
--------------------------------------------------------------------------------
/matconvnet/README.md:
--------------------------------------------------------------------------------
1 | # MatConvNet: CNNs for MATLAB
2 |
3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural
4 | Networks* (CNNs) for computer vision applications. It is simple,
5 | efficient, and can run and learn state-of-the-art CNNs. Several
6 | example CNNs are included to classify and encode images. Please visit
7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more.
8 |
--------------------------------------------------------------------------------
/matconvnet/doc/Makefile:
--------------------------------------------------------------------------------
1 | PYTHON = python
2 | MARKDOWN = markdown2
3 | PDFLATEX = pdflatex
4 | BIBTEX = bibtex
5 | MKDOCS = mkdocs
6 | SVG2PDF = svg2pdf
7 |
8 | site=doc/site/docs
9 |
10 | mfiles=\
11 | vl_argparse.m \
12 | vl_compilenn.m \
13 | vl_nnbnorm.m \
14 | vl_nnconcat.m \
15 | vl_nnconv.m \
16 | vl_nnconvt.m \
17 | vl_nndropout.m \
18 | vl_nnloss.m \
19 | vl_nnnoffset.m \
20 | vl_nnnormalize.m \
21 | vl_nnpdist.m \
22 | vl_nnpool.m \
23 | vl_nnrelu.m \
24 | vl_nnsigmoid.m \
25 | vl_nnsoftmax.m \
26 | vl_nnsoftmaxloss.m \
27 | vl_nnspnorm.m \
28 | vl_rootnn.m \
29 | vl_setupnn.m \
30 | simplenn/vl_simplenn.m \
31 | simplenn/vl_simplenn_diagnose.m \
32 | simplenn/vl_simplenn_display.m \
33 | simplenn/vl_simplenn_move.m \
34 | +dagnn/@DagNN/DagNN.m
35 |
36 | mfile_src=$(addprefix matlab/src/, $(mfiles))
37 | mfile_doc=$(patsubst %.m,%.md,$(addprefix $(site)/mfiles/, $(mfiles)))
38 | mds=$(wildcard doc/site/docs/*.md)
39 | svg_src=$(wildcard doc/figures/svg/*.svg)
40 | svg_tgt=$(patsubst %.svg, %.pdf, $(svg_src))
41 |
42 | doc: doc-site doc-manual
43 | doc-site: doc/site/site/index.html
44 | doc-manual: doc/matconvnet-manual.pdf
45 |
46 | doc/matconvnet-manual.pdf : doc/matconvnet-manual.tex $(svg_tgt)
47 | mkdir -p doc/.build
48 | ln -sf ../references.bib doc/.build/references.bib
49 | cd "$(dir $(<))" ;\
50 | $(PDFLATEX) -file-line-error -output-directory=../doc/.build/ "$(notdir $(<))" ;
51 | cd doc/.build ; $(BIBTEX) matconvnet-manual || true ;
52 | cd "$(dir $(<))" ;\
53 | $(PDFLATEX) -file-line-error -output-directory=../doc/.build/ "$(notdir $(<))" ;\
54 | $(PDFLATEX) -file-line-error -output-directory=../doc/.build/ "$(notdir $(<))" ;
55 | cp -f doc/.build/matconvnet-manual.pdf doc/
56 |
57 | doc/figures/svg/%.pdf : doc/figures/svg/%.svg
58 | $(SVG2PDF) "$(<)" "$(@)"
59 |
60 | $(site)/mfiles/%.md : matlab/%.m $(site)/mfiles/.stamp doc/matdoc.py doc/matdocparser.py
61 | mkdir -p $(dir $(@))
62 | $(PYTHON) doc/matdoc.py "$(<)" > "$(@)"
63 |
64 | doc/site/site/index.html : doc/site/mkdocs.yml $(mfile_doc) $(mds)
65 | cd doc/site ; $(MKDOCS) build --clean
66 |
67 | doc-clean:
68 | rm -f doc/matdocparser.pyc doc/matdoc.pyc
69 | rm -f $(svg_tgt)
70 |
71 | doc-distclean:
72 | rm -f doc/matconvnet-manual.pdf
73 | rm -rf doc/site/site
74 | rm -f $(mfile_doc)
75 |
76 | doc-info:
77 | @echo "mds=$(mds)"
78 | @echo "mfile_src=$(mfile_src)"
79 | @echo "mfile_doc=$(mfile_doc)"
80 | @echo "svg_tgt=$(svg_tgt)"
81 |
--------------------------------------------------------------------------------
/matconvnet/doc/figures/imnet.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/matconvnet/doc/figures/imnet.pdf
--------------------------------------------------------------------------------
/matconvnet/doc/figures/pepper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/matconvnet/doc/figures/pepper.pdf
--------------------------------------------------------------------------------
/matconvnet/doc/references.bib:
--------------------------------------------------------------------------------
1 | %% This BibTeX bibliography file was created using BibDesk.
2 | %% http://bibdesk.sourceforge.net/
3 |
4 |
5 | %% Created for Andrea Vedaldi at 2014-04-16 10:10:12 +0100
6 |
7 |
8 | %% Saved with string encoding Unicode (UTF-8)
9 |
10 | @string{aistats = {Proc. {AISTATS}}}
11 |
12 | @string{bmvc = {Proc. {BMVC}}}
13 |
14 | @string{cvpr = {Proc. {CVPR}}}
15 |
16 | @string{eccv = {Proc. {ECCV}}}
17 |
18 | @string{iccv = {Proc. {ICCV}}}
19 |
20 | @string{icip = {Proc. {ICIP}}}
21 |
22 | @string{icml = {Proc. {ICML}}}
23 |
24 | @string{icpr = {Proc. {ICPR}}}
25 |
26 | @string{ijcv = {{IJCV}}}
27 |
28 | @string{inria = {{INRIA}}}
29 |
30 | @string{mit = {{MIT}}}
31 |
32 | @string{nips = {Proc. {NIPS}}}
33 |
34 | @string{pami = {{PAMI}}}
35 |
36 | @string{siggraph = {Proc. {SIGGRAPH}}}
37 |
38 | @inproceedings{krizhevsky12imagenet,
39 | Author = {A. Krizhevsky and I. Sutskever and G. E. Hinton},
40 | Booktitle = nips,
41 | Title = {ImageNet Classification with Deep Convolutional Neural Networks},
42 | Year = {2012}}
43 |
44 | @article{kinghorn96integrals,
45 | Author = {Kinghorn, D. B.},
46 | Journal = {International Journal of Quantum Chemestry},
47 | Pages = {141-155},
48 | Title = {Integrals and Derivatives for Correlated Gaussian Fuctions Using Matrix Differential Calculus},
49 | Volume = {57},
50 | Year = {1996}}
51 |
52 | @misc{jia13caffe,
53 | Author = {Yangqing Jia},
54 | Howpublished = {\url{http://caffe.berkeleyvision.org/}},
55 | Title = {{Caffe}: An Open Source Convolutional Architecture for Fast Feature Embedding},
56 | Year = {2013}}
57 |
58 | @inproceedings{chatfield14return,
59 | Author = {K. Chatfield and K. Simonyan and A. Vedaldi and A. Zisserman},
60 | Booktitle = bmvc,
61 | Title = {Return of the Devil in the Details: Delving Deep into Convolutional Nets},
62 | Year = {2014}}
63 |
64 | @MASTERSTHESIS{deepltbx12,
65 | author = "R. B. Palm",
66 | title = "Prediction as a candidate for learning deep hierarchical models of data",
67 | year = "2012",
68 | }
69 |
70 | @inproceedings{collobert2011torch7,
71 | title={Torch7: A matlab-like environment for machine learning},
72 | author={Collobert, Ronan and Kavukcuoglu, Koray and Farabet, Cl{\'e}ment},
73 | booktitle={BigLearn, NIPS Workshop},
74 | number={EPFL-CONF-192376},
75 | year={2011}
76 | }
77 |
78 |
79 | @INPROCEEDINGS{bergstra2010,
80 | author = {Bergstra, James and Breuleux, Olivier and Bastien, Fr{\'{e}}d{\'{e}}ric and Lamblin, Pascal and Pascanu, Razvan and Desjardins, Guillaume and Turian, Joseph and Warde-Farley, David and Bengio, Yoshua},
81 | title = {Theano: a {CPU} and {GPU} Math Expression Compiler},
82 | booktitle = {Proceedings of the Python for Scientific Computing Conference ({SciPy})},
83 | year = {2010},
84 | location = {Austin, TX},
85 | }
86 |
87 | @ARTICLE{ioffe2015,
88 | author = {{Ioffe}, S. and {Szegedy}, C.},
89 | title = "{Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift}",
90 | journal = {ArXiv e-prints},
91 | eprint = {1502.03167},
92 | keywords = {Computer Science - Learning},
93 | year = 2015,
94 | }
95 |
96 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/css/fixes.css:
--------------------------------------------------------------------------------
1 | /* Prevent blockquotes to be giant-sized, restyle */
2 | blockquote p {
3 | font-size: inherit;
4 | font-style: italic;
5 | }
6 | blockquote {
7 | border: 1px solid gray;
8 | }
9 |
10 | /* Have a space after ul */
11 | ul ul, ol ul, ul ol, ol ol {
12 | margin-bottom: 10px ;
13 | }
14 |
15 | /* Nicer font for code samples */
16 | code, kbd, pre, samp {
17 | font-family: "Lucida Console","Courier New",monospace;
18 | }
19 |
20 | /* Solves the problem of jumping to section and being hidden by fixed nav-bar */
21 | /* https://github.com/twbs/bootstrap/issues/1768 */
22 | *[id]:before {
23 | display: block;
24 | content: " ";
25 | margin-top: -70px;
26 | height: 70px;
27 | visibility: hidden;
28 | }
29 |
30 | /* Hides the prev, next and github links when browser width is between 768 and 1200px */
31 | @media (min-width: 768px) and (max-width: 1199px) {
32 | .nav.navbar-nav.navbar-right
33 | {
34 | display: none;
35 | }
36 | }
37 |
38 | canvas {
39 | background-color: transparent;
40 | }
41 |
42 | .shy {
43 | display: none ;
44 | }
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/css/tables.css:
--------------------------------------------------------------------------------
1 | table {
2 | max-width: 100%;
3 | background-color: transparent;
4 | width: 100%;
5 | margin-bottom: 20px;
6 | border-collapse: collapse;
7 | border-spacing: 0;
8 | background-color: #fff;
9 | border: 1px solid #ddd;
10 | font-size: 12px;
11 | }
12 | th {
13 | text-align: left;
14 | }
15 |
16 | table > thead > tr > th,
17 | table > tbody > tr > th,
18 | table > tfoot > tr > th,
19 | table > thead > tr > td,
20 | table > tbody > tr > td,
21 | table > tfoot > tr > td {
22 | padding: 5px;
23 | line-height: 1.42857143;
24 | vertical-align: top;
25 | border: 1px solid #ddd;
26 |
27 | }
28 | table > thead > tr > th {
29 | vertical-align: bottom;
30 | border-bottom: 2px solid #ddd;
31 | }
32 | table > caption + thead > tr:first-child > th,
33 | table > colgroup + thead > tr:first-child > th,
34 | table > thead:first-child > tr:first-child > th,
35 | table > caption + thead > tr:first-child > td,
36 | table > colgroup + thead > tr:first-child > td,
37 | table > thead:first-child > tr:first-child > td {
38 | border-top: 0;
39 | }
40 | table > tbody + tbody {
41 | border-top: 2px solid #ddd;
42 | }
43 |
44 | table > thead > tr > th,
45 | table > thead > tr > td {
46 | border-bottom-width: 2px;
47 | }
48 |
49 | table > tbody > tr:nth-child(odd) > td,
50 | table > tbody > tr:nth-child(odd) > th {
51 | background-color: #f9f9f9;
52 | }
53 | table > tbody > tr:hover > td,
54 | table > tbody > tr:hover > th {
55 | background-color: #f5f5f5;
56 | }
57 |
58 |
59 | div.rst-content ul,
60 | div.rst-content ol {
61 | margin-top: 0;
62 | margin-bottom: 10px;
63 | }
64 | div.rst-content ul ul,
65 | div.rst-content ol ul,
66 | div.rst-content ul ol,
67 | div.rst-content ol ol {
68 | margin-bottom: 0;
69 | }
70 | div.rst-content dl {
71 | margin-top: 0;
72 | margin-bottom: 20px;
73 | }
74 | div.rst-content dt,
75 | div.rst-content dd {
76 | line-height: 1.42857143;
77 | }
78 | div.rst-content dt {
79 | font-weight: bold;
80 | }
81 | div.rst-content dd {
82 | margin-left: 0;
83 | }
84 | div.rst-content li{
85 | list-style: circle;
86 | margin-left: 20px;
87 | }
88 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/faq.md:
--------------------------------------------------------------------------------
1 | # Frequently-asked questions (FAQ)
2 |
3 | ## Running MatConvNet
4 |
5 | ### Can I use MatConvNet with CuDNN?
6 |
7 | Yes, but CuDNN must be installed and linked to MatConvNet. See the
8 | [installation instructions](install.md).
9 |
10 | ### How do I fix the error `Attempt to execute SCRIPT vl_nnconv as a function`?
11 |
12 | Before the toolbox can be used, the
13 | [MEX files](http://www.mathworks.com/support/tech-notes/1600/1605.html
14 | ) must be compiled. Make sure to follow the
15 | [installation instructions](install.md). If you have done so and the
16 | MEX files are still not recognized, check that the directory
17 | `matlab/toolbox/mex` contains the missing files. If the files are
18 | there, there may be a problem with the way MEX files have been
19 | compiled.
20 |
21 | ### Why files such as `vl_nnconv.m` do not contain any code?
22 |
23 | Functions such as `vl_nnconv`, `vl_nnpool`, `vl_nnbnorm` and many
24 | others are implemented MEX files. In this case, M files such as
25 | `vl_nnconv.m` contain only the function documentation. The code of the
26 | function is actually found in `matlab/src/vl_nnconv.cu` (a CUDA/C++
27 | source file) or similar.
28 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/functions.md:
--------------------------------------------------------------------------------
1 | # Function index
2 |
3 | MatConvNet includes several MATLAB functions organized as follows:
4 |
5 | - [Building blocks](#core). These functions implement the CNN
6 | computational blocks that can be combined either manually or using
7 | one of the provided wrappers to construct CNNs.
8 | - [SimpleCNN wrapper](#simplenn). SimpleNN is a lightweight wrapper
9 | implementing CNNs that are linear chains of computational blocks.
10 | - [DagNN wrapper](#dagnn). DagNN is an object-oriented wrapper
11 | supporting more complex network topologies.
12 | - [Other functions](#utility). These helper functions are used to
13 | initialize and compile MatConvNet.
14 |
15 | There is no general training function as training depends on the
16 | dataset and problem. Look at the `examples` subdirectory for code
17 | showing how to train CNNs.
18 |
19 |
20 |
21 | ## Building blocks
22 |
23 | - [`vl_nnbnorm`](mfiles/vl_nnbnorm.md) Batch normalization.
24 | - [`vl_nnconv`](mfiles/vl_nnconv.md) Linear convolution by a filter.
25 | - [`vl_nnconvt`](mfiles/vl_nnconvt.md) Convolution transopose.
26 | - [`vl_nndropout`](mfiles/vl_nndropout.md) Dropout.
27 | - [`vl_nnloss`](mfiles/vl_nnloss.md) Classification log-loss.
28 | - [`vl_nnnoffset`](mfiles/vl_nnnoffset.md) Norm-dependent offset.
29 | - [`vl_nnnormalize`](mfiles/vl_nnnormalize.md) Local Channel Normalization (LCN).
30 | - [`vl_nnpdist`](mfiles/vl_nnpdist.md) Pairwise distances.
31 | - [`vl_nnpool`](mfiles/vl_nnpool.md) Max and sum pooling.
32 | - [`vl_nnrelu`](mfiles/vl_nnrelu.md) REctified Linear Unit.
33 | - [`vl_nnsigmoid`](mfiles/vl_nnsigmoid.md) Sigmoid.
34 | - [`vl_nnsoftmax`](mfiles/vl_nnsoftmax.md) Channel soft-max.
35 | - [`vl_nnsoftmaxloss`](mfiles/vl_nnsoftmaxloss.md) *Deprecated*
36 |
37 |
38 |
39 | ## SimpleCNN wrapper
40 |
41 | - [`vl_simplenn`](mfiles/vl_simplenn.md) A lightweight wrapper for
42 | CNNs with a linear topology.
43 | - [`vl_simplenn_diagnose`](mfiles/vl_simplenn_diagnose.md) Print
44 | diagnostics about the CNN.
45 | - [`vi_simplenn_display`](mfiles/vl_simplenn_display.md) Print
46 | information about the CNN architecture.
47 | - [`vl_simplenn_move`](mfiles/vl_simplenn_move.md) Move the CNN
48 | between CPU and GPU.
49 |
50 |
51 |
52 | ## DagNN wrapper
53 |
54 | - [`DagNN`](mfiles/+dagnn/@DagNN/DagNN.md) An object-oriented wrapper
55 | for CNN with complex topologies
56 |
57 |
58 |
59 | ## Other functions
60 |
61 | - [`vl_argparse`](mfiles/vl_argparse.md) A helper function to parse
62 | optional arugments.
63 | - [`vl_compilenn`](mfiles/vl_compilenn.md) Compile the MEX fiels in the toolbox.
64 | - [`vl_rootnn`](mfiles/vl_rootnn.md) Return the path to the MatConvNet toolbox installation.
65 | - [`vl_setpunn`](mfiles/vl_setupnn.md) Setup MatConvNet for use in MATLAB.
66 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/gpu.md:
--------------------------------------------------------------------------------
1 | # Using GPU acceleration
2 |
3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the
4 | Parallel Programming Toolbox. This toolbox requires CUDA-compatible
5 | cards, and you will need a copy of the corresponding
6 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to
7 | compile GPU support in MatConvNet (see [compiling](install#compiling)).
8 |
9 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox
10 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore,
11 | switching to use the a GPU is as simple as converting the input CPU
12 | arrays in GPU arrays.
13 |
14 | In order to make the very best of powerful GPUs, it is important to
15 | balance the load between CPU and GPU in order to avoid starving the
16 | latter. In training on a problem like ImageNet, the CPU(s) in your
17 | system will be busy loading data from disk and streaming it to the GPU
18 | to evaluate the CNN and its derivative. MatConvNet includes the
19 | utility `vl_imreadjpeg` to accelerate and parallelize loading images
20 | into memory (this function is currently a bottleneck will be made more
21 | powerful in future releases).
22 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/index.md:
--------------------------------------------------------------------------------
1 | # MatConvNet: CNNs for MATLAB
2 |
3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural
4 | Networks* (CNNs) for computer vision applications. It is simple,
5 | efficient, and can run and learn state-of-the-art CNNs. Several
6 | example CNNs are included to classify and encode images.
7 |
8 | **Citing.** If you use MatConvNet in your work, please cite:
9 | "MatConvNet - Convolutional Neural Networks for MATLAB", A. Vedaldi
10 | and K. Lenc, *Proc. of the ACM Int. Conf. on Multimedia*, 2015. [BibTex]
13 |
14 |
15 | @inproceedings{vedaldi15matconvnet,
16 | author = {A. Vedaldi and K. Lenc},
17 | title = {MatConvNet -- Convolutional Neural Networks for MATLAB},
18 | book = {Proceeding of the {ACM} Int. Conf. on Multimedia}
19 | year = {2015},
20 | }
21 |
22 |
23 | > **New:** 1.0-beta16 adds VGG-Face as a [pretrained model](pretrained.md).
24 | >
25 | > **New:** Fully-Convolutional Networks (FCN) training and evaluation
26 | > code is available
27 | > [here](https://github.com/vlfeat/matconvnet-fcn).
28 | >
29 | > **New:** 1.0-beta15 adds a few new layers to DagNN to support the
30 | > **Fully-Convolutonal Networks** (FCN) for image
31 | > segmentation. Pretrained models are
32 | > [also available here](pretrained.md). Batch normalization
33 | > ([`vl_nnbnorm`](mfiles/vl_nnbnorm.md)) has also been improved adding
34 | > features that will make it easier to remove the layer after training
35 | > a model.
36 | >
37 | > **New:** 1.0-beta14 adds a new object-oriented
38 | > [network wrapper `DagNN`](wrappers.md) supporting arbitrary network
39 | > topologies. This release also adds GoogLeNet as a pre-trained model,
40 | > new building blocks such as [`vl_nnconcat`](mfiles/vl_nnconcat.md),
41 | > a rewritten loss function block [`vl_nnloss`](mfiles/vl_nnloss.md),
42 | > better documentation, and bugfixes. A new **realtime demo** (see
43 | > `examples/cnn_imagenet_camdemo.m`) using GoogLeNet, VGG-VD, or any
44 | > other similar network.
45 | >
46 | > **New:** MatConvNet used in planetary science research by the
47 | > University of Arizona (see the
48 | > [NVIDIA blog post](http://devblogs.nvidia.com/parallelforall/deep-learning-image-understanding-planetary-science/)).
49 |
50 | * **Obtaining MatConvNet**
51 | - Tarball for [version 1.0-beta16](download/matconvnet-1.0-beta16.tar.gz)
52 | - [GIT repository](http://www.github.com/vlfeat/matconvnet.git)
53 |
54 | * **Documentation**
55 | - [PDF manual](matconvnet-manual.pdf)
56 | - [MATLAB functions](functions.md)
57 | - [FAQ](faq.md)
58 |
59 | * **Getting started**
60 | - [Quick start guide](quick.md)
61 | - [Installation instructions](install.md)
62 | - [Using pre-trained models](pretrained.md): VGG-VD, GoogLeNet, FCN, ...
63 | - [Training your own models](training.md)
64 | - [CNN wrappers: linear chains or DAGs](wrappers.md)
65 | - [Working with GPU accelerated code](gpu.md)
66 | - [Tutorial](http://www.robots.ox.ac.uk/~vgg/practicals/cnn/index.html),
67 | [slides](http://www.robots.ox.ac.uk/~vedaldi/assets/teach/2015/vedaldi15aims-bigdata-lecture-4-deep-learning-handout.pdf)
68 |
69 | * **Other information**
70 | - [Changes](about/#changes)
71 | - [Developing the library](developers.md)
72 |
73 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/js/mathjaxhelper.js:
--------------------------------------------------------------------------------
1 | /*
2 | #if false
3 | Prevent Unity to try compiling this js
4 | */
5 | MathJax.Hub.Config({
6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] }
7 | });
8 | /*
9 | #endif
10 | */
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/js/toggle.js:
--------------------------------------------------------------------------------
1 | function toggle_visibility(id) {
2 | var e = document.getElementById(id);
3 | if(e.style.display == 'block')
4 | e.style.display = 'none';
5 | else
6 | e.style.display = 'block';
7 | }
8 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/quick.md:
--------------------------------------------------------------------------------
1 | # Quick start
2 |
3 | If you are new to MatConvNet, cut & paste the following code in a
4 | MATLAB window to try out MatConvNet. The code downloads and compiles
5 | MatConvNet, downloads a pre-trained CNN, and uses the latter to
6 | classify one of MATLAB stock images.
7 |
8 | This example requries MATLAB to be interfaced to a C/C++ compiler (try
9 | `mex -setup` if you are unsure). Depending on your Internet connection
10 | speed, downloading the CNN model may require some time.
11 |
12 | ```matlab
13 | % install and compile MatConvNet (needed once)
14 | untar('http://www.vlfeat.org/matconvnet/download/matconvnet-1.0-beta16.tar.gz') ;
15 | cd matconvnet-1.0-beta16
16 | run matlab/vl_compilenn
17 |
18 | % download a pre-trained CNN from the web (needed once)
19 | urlwrite(...
20 | 'http://www.vlfeat.org/matconvnet/models/imagenet-vgg-f.mat', ...
21 | 'imagenet-vgg-f.mat') ;
22 |
23 | % setup MatConvNet
24 | run matlab/vl_setupnn
25 |
26 | % load the pre-trained CNN
27 | net = load('imagenet-vgg-f.mat') ;
28 |
29 | % load and preprocess an image
30 | im = imread('peppers.png') ;
31 | im_ = single(im) ; % note: 0-255 range
32 | im_ = imresize(im_, net.normalization.imageSize(1:2)) ;
33 | im_ = im_ - net.normalization.averageImage ;
34 |
35 | % run the CNN
36 | res = vl_simplenn(net, im_) ;
37 |
38 | % show the classification result
39 | scores = squeeze(gather(res(end).x)) ;
40 | [bestScore, best] = max(scores) ;
41 | figure(1) ; clf ; imagesc(im) ;
42 | title(sprintf('%s (%d), score %.3f',...
43 | net.classes.description{best}, best, bestScore)) ;
44 | ```
45 |
46 | In order to compile the GPU support and other advanced features, see
47 | the [installation instructions](install.md).
48 |
49 |
50 |
51 | ## Using DAG models
52 |
53 | The example above exemplifies using a model using the SimpleNN
54 | wrapper. More complex models use instead the DagNN wrapper. For
55 | example, to run GoogLeNet use:
56 |
57 | ```matlab
58 | % download a pre-trained CNN from the web (needed once)
59 | urlwrite(...
60 | 'http://www.vlfeat.org/matconvnet/models/imagenet-googlenet-dag.mat', ...
61 | 'imagenet-googlenet-dag.mat') ;
62 |
63 | % load the pre-trained CNN
64 | net = dagnn.DagNN.loadobj(load('imagenet-googlenet-dag.mat')) ;
65 |
66 | % load and preprocess an image
67 | im = imread('peppers.png') ;
68 | im_ = single(im) ; % note: 0-255 range
69 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;
70 | im_ = im_ - net.meta.normalization.averageImage ;
71 |
72 | % run the CNN
73 | net.eval({'data', im_}) ;
74 |
75 | % obtain the CNN otuput
76 | scores = net.vars(net.getVarIndex('prob')).value ;
77 | scores = squeeze(gather(scores)) ;
78 |
79 | % show the classification results
80 | [bestScore, best] = max(scores) ;
81 | figure(1) ; clf ; imagesc(im) ;
82 | title(sprintf('%s (%d), score %.3f',...
83 | net.meta.classes.description{best}, best, bestScore)) ;
84 | ```
85 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/docs/training.md:
--------------------------------------------------------------------------------
1 | ## Training your own models
2 |
3 | MatConvNet can be used to train models, typically by using stochastic
4 | gradient descent (SGD) and back-propagation.
5 |
6 | The following learning demos are provided in the MatConvNet package:
7 |
8 | - **MNIST**. See `examples/cnn_mnist.m`.
9 | - **CIFAR**. See `examples/cnn_cifar.m`.
10 | - **ImageNet**. See `examples/cnn_imagenet.m`.
11 |
12 | These demos are self-contained; MNIST and CIFAR, in particular,
13 | automatically download and unpack the required data, so that they
14 | should work out-of-the-box.
15 |
16 | While MNIST and CIFAR are small datasets (by today's standard) and
17 | training is feasible on a CPU, ImageNet requires a powerful GPU to
18 | complete in a reasonable time (a few days!). It also requires the
19 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to
20 | accelerate reading large batches of JPEG images and avoid starving the
21 | GPU.
22 |
23 | All these demos use the `example/cnn_train.m` SGD driver, a simple
24 | implementation of SGD with momentum, done directly in MATLAB code. Do
25 | not be shy and experiment with your own learning algorithms!
26 |
--------------------------------------------------------------------------------
/matconvnet/doc/site/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: MatConvNet
2 | markdown_extensions: ['extra', 'mathjax', 'toc']
3 | theme: bootstrap
4 |
5 | extra_css: ['css/tables.css', 'css/fixes.css']
6 | extra_javascript: ['https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML', 'js/mathjaxhelper.js', 'js/toggle.js']
7 |
8 | repo_url: https://github.com/vlfeat/matconvnet
9 |
10 | pages:
11 | - Home: 'index.md'
12 | - Getting Started:
13 | - Quick Start: 'quick.md'
14 | - Installing: 'install.md'
15 | - CNN Wrappers: 'wrappers.md'
16 | - Pretrained CNNs: 'pretrained.md'
17 | - Training new CNNs: 'training.md'
18 | - Using a GPU: 'gpu.md'
19 | - About: 'about.md'
20 | - Frequently-asked questions: 'faq.md'
21 |
22 | - Functions:
23 | - Index: 'functions.md'
24 | - vl_nnbnorm: 'mfiles/vl_nnbnorm.md'
25 | - vl_nnconcat: 'mfiles/vl_nnconcat.md'
26 | - vl_nnspnorm: 'mfiles/vl_nnspnorm.md'
27 | - vl_nnpdist: 'mfiles/vl_nnpdist.md'
28 | - vl_nnconv: 'mfiles/vl_nnconv.md'
29 | - vl_nnconvt: 'mfiles/vl_nnconvt.md'
30 | - vl_nndropout: 'mfiles/vl_nndropout.md'
31 | - vl_nnloss: 'mfiles/vl_nnloss.md'
32 | - vl_nnnoffset: 'mfiles/vl_nnnoffset.md'
33 | - vl_nnnormalize: 'mfiles/vl_nnnormalize.md'
34 | - vl_nnpool: 'mfiles/vl_nnpool.md'
35 | - vl_nnrelu: 'mfiles/vl_nnrelu.md'
36 | - vl_nnsigmoid: 'mfiles/vl_nnsigmoid.md'
37 | - vl_nnsoftmax: 'mfiles/vl_nnsoftmax.md'
38 | - vl_nnsoftmaxloss: 'mfiles/vl_nnsoftmaxloss.md'
39 | - DagNN wrapper: 'mfiles/+dagnn/@DagNN/DagNN.md'
40 | - vl_simplenn: 'mfiles/simplenn/vl_simplenn.md'
41 | - vl_simplenn_diagnose: 'mfiles/simplenn/vl_simplenn_diagnose.md'
42 | - vl_simplenn_display: 'mfiles/simplenn/vl_simplenn_display.md'
43 | - vl_simplenn_move: 'mfiles/simplenn/vl_simplenn_move.md'
44 | - vl_argparse: 'mfiles/vl_argparse.md'
45 | - vl_compilenn: 'mfiles/vl_compilenn.md'
46 | - vl_rootnn: 'mfiles/vl_rootnn.md'
47 | - vl_setupnn: 'mfiles/vl_setupnn.md'
48 |
49 | - Contributing:
50 | - Developers notes: 'developers.md'
51 | - Compiling from the command line: 'install-alt.md'
--------------------------------------------------------------------------------
/matconvnet/matconvnet.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 11.00
3 | # Visual Studio 2010
4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "matconvnet", "matconvnet.vcxproj", "{B0BD9132-1D90-4267-A07A-B44DE497A9C7}"
5 | EndProject
6 | Global
7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
8 | Debug|Win32 = Debug|Win32
9 | Release|Win32 = Release|Win32
10 | EndGlobalSection
11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
12 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.ActiveCfg = Debug|Win32
13 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.Build.0 = Debug|Win32
14 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.ActiveCfg = Release|Win32
15 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.Build.0 = Release|Win32
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | EndGlobal
21 |
--------------------------------------------------------------------------------
/matconvnet/matconvnet.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/matconvnet/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv CPU.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/matconvnet/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv GPU.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/matconvnet/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv cuDNN.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/addLayer.m:
--------------------------------------------------------------------------------
1 | function addLayer(obj, name, block, inputs, outputs, params)
2 | %ADDLAYER Adds a layer to a DagNN
3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the
4 | % specified layer to the network. NAME is a string with the layer
5 | % name, used as a unique indentifier. BLOCK is the object
6 | % implementing the layer, which should be a subclass of the
7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and
8 | % PARAMS of parameter names.
9 | %
10 | % See Also REMOVELAYER().
11 |
12 | f = find(strcmp(name, {obj.layers.name})) ;
13 | if ~isempty(f), error('There is already a layer with name ''%s''.', name), end
14 | f = numel(obj.layers) + 1 ;
15 |
16 | if nargin < 6, params = {} ; end
17 | if isstr(inputs), inputs = {inputs} ; end
18 | if isstr(outputs), outputs = {outputs} ; end
19 | if isstr(params), params = {params} ; end
20 |
21 | obj.layers(f) = struct(...
22 | 'name', {name}, ...
23 | 'inputs', {inputs}, ...
24 | 'outputs', {outputs}, ...
25 | 'params', {params}, ...
26 | 'inputIndexes', {[]}, ...
27 | 'outputIndexes', {[]}, ...
28 | 'paramIndexes', {[]}, ...
29 | 'forwardTime', {[]}, ...
30 | 'backwardTime', {[]}, ...
31 | 'block', {block}) ;
32 |
33 | block.net = obj ;
34 | block.layerIndex = f ;
35 |
36 | for input = inputs
37 | obj.addVar(char(input)) ;
38 | end
39 |
40 | for output = outputs
41 | obj.addVar(char(output)) ;
42 | end
43 |
44 | for param = params
45 | obj.addParam(char(param)) ;
46 | end
47 |
48 | obj.rebuild() ;
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/getVarSizes.m:
--------------------------------------------------------------------------------
1 | function sizes = getVarSizes(obj, inputSizes)
2 | %GETVARSIZES Get the size of the variables
3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the
4 | % DagNN variables given the size o
5 |
6 | nv = numel(obj.vars) ;
7 | sizes = num2cell(NaN(nv, 4),2)' ;
8 |
9 | for i = 1:2:numel(inputSizes)
10 | v = obj.getVarIndex(inputSizes{i}) ;
11 | sizes{v} = inputSizes{i+1}(:)' ;
12 | end
13 |
14 | for layer = obj.layers
15 | in = layer.inputIndexes ;
16 | out = layer.outputIndexes ;
17 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ;
18 | end
19 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/initParams.m:
--------------------------------------------------------------------------------
1 | function initParams(obj)
2 | % INITPARAM Initialize the paramers of the DagNN
3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize
4 | % the corresponding parameters (usually randomly).
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | for l = 1:numel(obj.layers)
13 | p = obj.getParamIndex(obj.layers(l).params) ;
14 | params = obj.layers(l).block.initParams() ;
15 | switch obj.device
16 | case 'cpu'
17 | params = cellfun(@gather, params, 'UniformOutput', false) ;
18 | case 'gpu'
19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ;
20 | end
21 | [obj.params(p).value] = deal(params{:}) ;
22 | end
23 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/loadobj.m:
--------------------------------------------------------------------------------
1 | function obj = loadobj(s)
2 | % LOADOBJ Initialize a DagNN object from a structure.
3 | % OBJ = LOADOBJ(S) initializes a DagNN objet from the structure
4 | % S. It is the opposite of S = OBJ.SAVEOBJ().
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | if isstruct(s)
13 | obj = dagnn.DagNN() ;
14 | for l = 1:numel(s.layers)
15 | constr = str2func(s.layers(l).type) ;
16 | block = constr() ;
17 | block.load(struct(s.layers(l).block)) ;
18 | obj.addLayer(...
19 | s.layers(l).name, ...
20 | block, ...
21 | s.layers(l).inputs, ...
22 | s.layers(l).outputs, ...
23 | s.layers(l).params) ;
24 | end
25 | if isfield(s, 'params')
26 | for f = setdiff(fieldnames(s.params)','name')
27 | f = char(f) ;
28 | for i = 1:numel(s.params)
29 | p = obj.getParamIndex(s.params(i).name) ;
30 | obj.params(p).(f) = s.params(i).(f) ;
31 | end
32 | end
33 | end
34 | if isfield(s, 'vars')
35 | for f = setdiff(fieldnames(s.vars)','name')
36 | f = char(f) ;
37 | for i = 1:numel(s.vars)
38 | p = obj.getVarIndex(s.vars(i).name) ;
39 | obj.vars(p).(f) = s.vars(i).(f) ;
40 | end
41 | end
42 | end
43 | for f = setdiff(fieldnames(s)', {'vars','params','layers'})
44 | f = char(f) ;
45 | obj.(f) = s.(f) ;
46 | end
47 | else
48 | obj = s ;
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/move.m:
--------------------------------------------------------------------------------
1 | function move(obj, device)
2 | %MOVE Move the DagNN to either CPU or GPU
3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU.
4 | %
5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU.
6 |
7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | obj.reset() ;
14 | obj.device = device ;
15 | switch device
16 | case 'gpu'
17 | for i=1:numel(obj.params)
18 | obj.params(i).value = gpuArray(obj.params(i).value) ;
19 | end
20 | case 'cpu'
21 | for i=1:numel(obj.params)
22 | obj.params(i).value = gather(obj.params(i).value) ;
23 | end
24 | otherwise
25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ;
26 | end
27 | for l = 1:numel(obj.layers)
28 | obj.layers(l).block.move(device) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/rebuild.m:
--------------------------------------------------------------------------------
1 | function rebuild(obj)
2 | %REBUILD Rebuild the internal data structures of a DagNN object
3 | % REBUILD(obj) rebuilds the internal data structures
4 | % of the DagNN obj. It is an helper function used internally
5 | % to update the network when layers are added or removed.
6 |
7 | varFanIn = zeros(1, numel(obj.vars)) ;
8 | varFanOut = zeros(1, numel(obj.vars)) ;
9 | parFanOut = zeros(1, numel(obj.params)) ;
10 |
11 | for l = 1:numel(obj.layers)
12 | ii = obj.getVarIndex(obj.layers(l).inputs) ;
13 | oi = obj.getVarIndex(obj.layers(l).outputs) ;
14 | pi = obj.getParamIndex(obj.layers(l).params) ;
15 | obj.layers(l).inputIndexes = ii ;
16 | obj.layers(l).outputIndexes = oi ;
17 | obj.layers(l).paramIndexes = pi ;
18 | varFanOut(ii) = varFanOut(ii) + 1 ;
19 | varFanIn(oi) = varFanIn(oi) + 1 ;
20 | parFanOut(pi) = parFanOut(pi) + 1 ;
21 | end
22 |
23 | [obj.vars.fanin] = tolist(num2cell(varFanIn)) ;
24 | [obj.vars.fanout] = tolist(num2cell(varFanOut)) ;
25 | if ~isempty(parFanOut),
26 | [obj.params.fanout] = tolist(num2cell(parFanOut)) ;
27 | end
28 |
29 | % dump unused variables
30 | keep = (varFanIn + varFanOut) > 0 ;
31 | obj.vars = obj.vars(keep) ;
32 | varRemap = cumsum(keep) ;
33 |
34 | % dump unused parameters
35 | keep = parFanOut > 0 ;
36 | obj.params = obj.params(keep) ;
37 | parRemap = cumsum(keep) ;
38 |
39 | % update the indexes to account for removed layers, variables and parameters
40 | for l = 1:numel(obj.layers)
41 | obj.layers(l).inputIndexes = varRemap(obj.layers(l).inputIndexes) ;
42 | obj.layers(l).outputIndexes = varRemap(obj.layers(l).outputIndexes) ;
43 | obj.layers(l).paramIndexes = parRemap(obj.layers(l).paramIndexes) ;
44 | obj.layers(l).block.layerIndex = l ;
45 | end
46 |
47 | % update the variable and parameter names hash maps
48 | obj.varNames = cell2struct(num2cell(1:numel(obj.vars)), {obj.vars.name}, 2) ;
49 | obj.paramNames = cell2struct(num2cell(1:numel(obj.params)), {obj.params.name}, 2) ;
50 | obj.layerNames = cell2struct(num2cell(1:numel(obj.layers)), {obj.layers.name}, 2) ;
51 |
52 | % determine the execution order again (and check for consistency)
53 | obj.executionOrder = getOrder(obj) ;
54 |
55 | % --------------------------------------------------------------------
56 | function order = getOrder(obj)
57 | % --------------------------------------------------------------------
58 | hops = cell(1, numel(obj.vars)) ;
59 | for l = 1:numel(obj.layers)
60 | for v = obj.layers(l).inputIndexes
61 | hops{v}(end+1) = l ;
62 | end
63 | end
64 | order = zeros(1, numel(obj.layers)) ;
65 | for l = 1:numel(obj.layers)
66 | if order(l) == 0
67 | order = dagSort(obj, hops, order, l) ;
68 | end
69 | end
70 | if any(order == -1)
71 | warning('The network grpah contains a cycle') ;
72 | end
73 | [~,order] = sort(order, 'descend') ;
74 |
75 | % --------------------------------------------------------------------
76 | function order = dagSort(obj, hops, order, layer)
77 | % --------------------------------------------------------------------
78 | if order(layer) > 0, return ; end
79 | order(layer) = -1 ; % mark as open
80 | n = 0 ;
81 | for o = obj.layers(layer).outputIndexes ;
82 | for child = hops{o}
83 | if order(child) == -1
84 | return ;
85 | end
86 | if order(child) == 0
87 | order = dagSort(obj, hops, order, child) ;
88 | end
89 | n = max(n, order(child)) ;
90 | end
91 | end
92 | order(layer) = n + 1 ;
93 |
94 | function varargout = tolist(x)
95 | [varargout{1:numel(x)}] = x{:} ;
96 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/removeLayer.m:
--------------------------------------------------------------------------------
1 | function removeLayer(obj, name)
2 | %REMOVELAYER Remove a layer from the network
3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object
4 | % OBJ.
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | f = find(strcmp(name, {obj.layers.name})) ;
13 | if isempty(f), error('There is no layer ''%s''.', name), end
14 | layer = obj.layers(f) ;
15 | obj.layers(f) = [] ;
16 | obj.rebuild() ;
17 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/renameVar.m:
--------------------------------------------------------------------------------
1 | function renameVar(obj, oldName, newName)
2 | %RENAMEVAR Rename a variable
3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing variable.
6 |
7 | if any(strcmp(newName, {obj.vars.name}))
8 | error('%s is the name of an existing variable', newName) ;
9 | end
10 |
11 | v = obj.getVarIndex(oldName) ;
12 | if isnan(v)
13 | error('%s is not an existing variable', oldName) ;
14 | end
15 |
16 | for l = 1:numel(obj.layers)
17 | for f = {'inputs', 'outputs'}
18 | f = char(f) ;
19 | sel = find(strcmp(oldName, obj.layers(l).(f))) ;
20 | [obj.layers(l).(f){sel}] = deal(newName) ;
21 | end
22 | end
23 | obj.vars(v).name = newName ;
24 |
25 | % update variable name hash otherwise rebuild() won't find this var corectly
26 | obj.varNames.(newName) = v ;
27 |
28 | obj.rebuild() ;
29 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/reset.m:
--------------------------------------------------------------------------------
1 | function reset(obj)
2 | % RESET Reset the DagNN
3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN
4 | % object, including parameter gradients. It also calls the reset
5 | % function of every layer.
6 |
7 | [obj.vars.value] = deal([]) ;
8 | [obj.vars.der] = deal([]) ;
9 | [obj.params.der] = deal([]) ;
10 | for l = 1:numel(obj.layers)
11 | obj.layers(l).block.reset() ;
12 | end
13 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/saveobj.m:
--------------------------------------------------------------------------------
1 | function s = saveobj(obj)
2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure
3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB
4 | % structure S. This is particularly convenient to preserve future
5 | % compatibility and to ship networks that are pure structures,
6 | % instead of embedding dependencies to code.
7 | %
8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`.
9 |
10 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
11 | % All rights reserved.
12 | %
13 | % This file is part of the VLFeat library and is made available under
14 | % the terms of the BSD license (see the COPYING file).
15 |
16 | device = obj.device ;
17 | obj.move('cpu') ;
18 | s.vars = struct(...
19 | 'name', {obj.vars.name}, ...
20 | 'precious', {obj.vars.precious}) ;
21 | s.params = struct(...
22 | 'name', {obj.params.name}, ...
23 | 'value', {obj.params.value}, ...
24 | 'learningRate', {obj.params.learningRate}, ...
25 | 'weightDecay', {obj.params.weightDecay}) ;
26 | s.layers = struct(...
27 | 'name', {obj.layers.name}, ...
28 | 'type', {[]}, ...
29 | 'inputs', {obj.layers.inputs}, ...
30 | 'outputs', {obj.layers.outputs}, ...
31 | 'params', {obj.layers.params}, ...
32 | 'block', {[]}) ;
33 | s.meta = obj.meta ;
34 |
35 | for l = 1:numel(obj.layers)
36 | block = obj.layers(l).block ;
37 | slayer = block.save() ;
38 | s.layers(l).type = class(block) ;
39 | s.layers(l).block = slayer ;
40 | end
41 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/setLayerInputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerInputs(obj, layer, inputs)
2 | %SETLAYERINPUTS Set or change the inputs to a layer
3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for input = inputs
8 | v(end+1) = obj.addVar(char(input)) ;
9 | end
10 | obj.layers(l).inputs = inputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/setLayerOutputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerOutputs(obj, layer, outputs)
2 | %SETLAYEROUTPUTS Set or change the outputs of a layer
3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for output = outputs
8 | v(end+1) = obj.addVar(char(output)) ;
9 | end
10 | obj.layers(l).outputs = outputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/@DagNN/setLayerParams.m:
--------------------------------------------------------------------------------
1 | function v = setLayerParams(obj, layer, params)
2 | %SETLAYEPARAMS Set or change the parameters of a layer
3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for param = params
8 | v(end+1) = obj.addParam(char(param)) ;
9 | end
10 | obj.layers(l).params = params ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/BatchNorm.m:
--------------------------------------------------------------------------------
1 | classdef BatchNorm < dagnn.ElementWise
2 | methods
3 | function outputs = forward(obj, inputs, params)
4 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}) ;
9 | [derInputs{1}, derParams{1}, derParams{2}] = ...
10 | vl_nnbnorm(inputs{1}, params{1}, params{2}, derOutputs{1}) ;
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Concat.m:
--------------------------------------------------------------------------------
1 | classdef Concat < dagnn.ElementWise
2 | properties
3 | dim = 3
4 | numInputs = 2
5 | end
6 |
7 | properties (Transient)
8 | inputSizes = {}
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | outputs{1} = vl_nnconcat(inputs, obj.dim) ;
14 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
18 | derInputs = vl_nnconcat(inputs, obj.dim, derOutputs{1}, 'inputSizes', obj.inputSizes) ;
19 | derParams = {} ;
20 | end
21 |
22 | function reset(obj)
23 | obj.inputSizes = {} ;
24 | end
25 |
26 | function outputSizes = getOutputSizes(obj, inputSizes)
27 | sz = inputSizes{1} ;
28 | for k = 2:numel(inputSizes)
29 | sz(obj.dim) = sz(obj.dim) + inputSizes{k}(obj.dim) ;
30 | end
31 | outputSizes{1} = sz ;
32 | end
33 |
34 | function rfs = getReceptiveFields(obj)
35 | if obj.dim == 3 || obj.dim == 4
36 | rfs = getReceptiveFields@dagnn.ElementWise(obj) ;
37 | rfs = repmat(rfs, obj.numInputs, 1) ;
38 | else
39 | for i = 1:obj.numInputs
40 | rfs(i,1).size = [NaN NaN] ;
41 | rfs(i,1).stride = [NaN NaN] ;
42 | rfs(i,1).offset = [NaN NaN] ;
43 | end
44 | end
45 | end
46 |
47 | function obj = Concat(varargin)
48 | obj.load(varargin) ;
49 | end
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Conv.m:
--------------------------------------------------------------------------------
1 | classdef Conv < dagnn.Filter
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | if ~obj.hasBias, params{2} = [] ; end
11 | outputs{1} = vl_nnconv(...
12 | inputs{1}, params{1}, params{2}, ...
13 | 'pad', obj.pad, ...
14 | 'stride', obj.stride, ...
15 | obj.opts{:}) ;
16 | end
17 |
18 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
19 | if ~obj.hasBias, params{2} = [] ; end
20 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconv(...
21 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
22 | 'pad', obj.pad, ...
23 | 'stride', obj.stride, ...
24 | obj.opts{:}) ;
25 | end
26 |
27 | function kernelSize = getKernelSize(obj)
28 | kernelSize = obj.size(1:2) ;
29 | end
30 |
31 | function outputSizes = getOutputSizes(obj, inputSizes)
32 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
33 | outputSizes{1}(3) = obj.size(4) ;
34 | end
35 |
36 | function params = initParams(obj)
37 | sc = sqrt(2 / prod(obj.size(1:3))) ;
38 | params{1} = randn(obj.size,'single') * sc ;
39 | if obj.hasBias
40 | params{2} = zeros(obj.size(4),1,'single') * sc ;
41 | end
42 | end
43 |
44 | function obj = Conv(varargin)
45 | obj.load(varargin) ;
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/ConvTranspose.m:
--------------------------------------------------------------------------------
1 | classdef ConvTranspose < dagnn.Layer
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | upsample = [1 1]
6 | crop = [0 0 0 0]
7 | numGroups = 1
8 | opts = {'cuDNN'}
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if ~obj.hasBias, params{2} = [] ; end
14 | outputs{1} = vl_nnconvt(...
15 | inputs{1}, params{1}, params{2}, ...
16 | 'upsample', obj.upsample, ...
17 | 'crop', obj.crop, ...
18 | 'numGroups', obj.numGroups, ...
19 | obj.opts{:}) ;
20 | end
21 |
22 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
23 | if ~obj.hasBias, params{2} = [] ; end
24 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconvt(...
25 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
26 | 'upsample', obj.upsample, ...
27 | 'crop', obj.crop, ...
28 | 'numGroups', obj.numGroups, ...
29 | obj.opts{:}) ;
30 | end
31 |
32 | function outputSizes = getOutputSizes(obj, inputSizes)
33 | outputSizes{1} = [...
34 | obj.upsample(1) * (inputSizes{1}(1) - 1) + obj.size(1) - obj.crop(1) - obj.crop(2), ...
35 | obj.upsample(2) * (inputSizes{1}(2) - 1) + obj.size(2) - obj.crop(3) - obj.crop(4), ...
36 | obj.size(4), ...
37 | inputSizes{1}(4)] ;
38 | end
39 |
40 | function rfs = getReceptiveFields(obj)
41 | rfs.size = (obj.size(1:2) - 1) ./ obj.upsample + 1 ;
42 | rfs.stride = 1 ./ [obj.upsample] ;
43 | rfs.offset = (2*obj.crop([1 3]) - obj.size(1:2) + 1) ...
44 | ./ (2*obj.upsample) + 1 ;
45 | end
46 |
47 | function params = initParams(obj)
48 | % todo: test this initialization method
49 | sc = sqrt(2 / prod(obj.size([1 2 4]))) ;
50 | params{1} = randn(obj.size,'single') * sc ;
51 | if obj.hasBias
52 | params{2} = zeros(obj.size(3),1,'single') * sc ;
53 | end
54 | end
55 |
56 | function set.crop(obj, crop)
57 | if numel(crop) == 1
58 | obj.crop = [crop crop crop crop] ;
59 | elseif numel(crop) == 2
60 | obj.crop = crop([1 1 2 2]) ;
61 | else
62 | obj.crop = crop ;
63 | end
64 | end
65 |
66 | function set.upsample(obj, upsample)
67 | if numel(upsample) == 1
68 | obj.upsample = [upsample upsample] ;
69 | else
70 | obj.upsample = upsample ;
71 | end
72 | end
73 |
74 | function obj = ConvTranspose(varargin)
75 | obj.load(varargin) ;
76 | obj.upsample = obj.upsample ;
77 | end
78 | end
79 | end
80 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Crop.m:
--------------------------------------------------------------------------------
1 | classdef Crop < dagnn.ElementWise
2 | %CROP DagNN cropping layer.
3 | % This is a pecurial layer from FCN. It crops inputs{1} to
4 | % match the size of inputs{2} (starting with a base crop amount).
5 | % A future version
6 |
7 | properties
8 | crop = [0 0]
9 | end
10 |
11 | properties (Transient)
12 | inputSizes = {}
13 | end
14 |
15 | methods
16 | function crop = getAdaptedCrops(obj)
17 | cropv = obj.inputSizes{1}(1) - obj.inputSizes{2}(1) ;
18 | cropu = obj.inputSizes{1}(2) - obj.inputSizes{2}(2) ;
19 | cropv1 = max(0, cropv - obj.crop(1)) ;
20 | cropu1 = max(0, cropu - obj.crop(2)) ;
21 | crop = [cropv - cropv1, cropv1, cropu - cropu1, cropu1] ;
22 | end
23 |
24 | function outputs = forward(obj, inputs, params)
25 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
26 | adjCrop = obj.getAdaptedCrops() ;
27 | outputs{1} = vl_nncrop(inputs{1}, adjCrop) ;
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | adjCrop = obj.getAdaptedCrops() ;
32 | derInputs{1} = vl_nncrop(inputs{1}, adjCrop, derOutputs{1}, obj.inputSizes{1}) ;
33 | derInputs{2} = [] ;
34 | derParams = {} ;
35 | end
36 |
37 | function reset(obj)
38 | obj.inputSizes = {} ;
39 | end
40 |
41 | function outputSizes = getOutputSizes(obj, inputSizes)
42 | obj.inputSizes = inputSizes ;
43 | crop = obj.getAdaptedCrops() ;
44 | outputSizes{1} = inputSizes{1} - [crop(1)+crop(2), crop(3)+crop(4), 0, 0] ;
45 | end
46 |
47 | function rfs = getReceptiveFields(obj)
48 | rfs(1,1).size = [1 1] ;
49 | rfs(1,1).stride = [1 1] ;
50 | rfs(1,1).offset = 1 + obj.crop ;
51 | rfs(2,1).size = [] ;
52 | rfs(2,1).stride = [] ;
53 | rfs(2,1).offset = [] ;
54 | end
55 |
56 | function obj = Crop(varargin)
57 | obj.load(varargin) ;
58 | end
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/DropOut.m:
--------------------------------------------------------------------------------
1 | classdef DropOut < dagnn.ElementWise
2 | properties
3 | rate = 0.5
4 | frozen = false
5 | end
6 |
7 | properties (Transient)
8 | mask
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if strcmp(obj.net.mode, 'test')
14 | outputs = inputs ;
15 | return ;
16 | end
17 | if obj.frozen & ~isempty(obj.mask)
18 | outputs{1} = vl_nndropout(inputs{1}, 'maks', obj.mask) ;
19 | else
20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ;
21 | end
22 | end
23 |
24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
25 | if strcmp(obj.net.mode, 'test')
26 | derInputs = derOutputs ;
27 | derParams = {} ;
28 | return ;
29 | end
30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ;
31 | derParams = {} ;
32 | end
33 |
34 | % ---------------------------------------------------------------------
35 | function obj = DropOut(varargin)
36 | obj.load(varargin{:}) ;
37 | end
38 |
39 | function obj = reset(obj)
40 | reset@dagnn.ElementWise(obj) ;
41 | obj.mask = [] ;
42 | obj.frozen = false ;
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/ElementWise.m:
--------------------------------------------------------------------------------
1 | classdef ElementWise < dagnn.Layer
2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations
3 | methods
4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes)
5 | outputSizes = inputSizes ;
6 | transforms = {eye(6)} ;
7 | end
8 |
9 | function rfs = getReceptiveFields(obj)
10 | rfs.size = [1 1] ;
11 | rfs.stride = [1 1] ;
12 | rfs.offset = [1 1] ;
13 | end
14 |
15 | function outputSizes = getOutputSizes(obj, inputSizes)
16 | outputSizes = inputSizes ;
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Filter.m:
--------------------------------------------------------------------------------
1 | classdef Filter < dagnn.Layer
2 | properties
3 | pad = [0 0 0 0]
4 | stride = [1 1]
5 | end
6 | methods
7 | function set.pad(obj, pad)
8 | if numel(pad) == 1
9 | obj.pad = [pad pad pad pad] ;
10 | elseif numel(pad) == 2
11 | obj.pad = pad([1 1 2 2]) ;
12 | else
13 | obj.pad = pad ;
14 | end
15 | end
16 |
17 | function set.stride(obj, stride)
18 | if numel(stride) == 1
19 | obj.stride = [stride stride] ;
20 | else
21 | obj.stride = stride ;
22 | end
23 | end
24 |
25 | function kernelSize = getKernelSize(obj)
26 | kernelSize = [1 1] ;
27 | end
28 |
29 | function outputSizes = getOutputSizes(obj, inputSizes)
30 | ks = obj.getKernelSize() ;
31 | outputSizes{1} = [...
32 | fix((inputSizes{1}(1) + obj.pad(1) + obj.pad(2) - ks(1)) / obj.stride(1)) + 1, ...
33 | fix((inputSizes{1}(2) + obj.pad(3) + obj.pad(4) - ks(2)) / obj.stride(2)) + 1, ...
34 | 1, ...
35 | inputSizes{1}(4)] ;
36 | end
37 |
38 | function rfs = getReceptiveFields(obj)
39 | ks = obj.getKernelSize() ;
40 | y1 = 1 - obj.pad(1) ;
41 | y2 = 1 - obj.pad(1) + ks(1) - 1 ;
42 | x1 = 1 - obj.pad(3) ;
43 | x2 = 1 - obj.pad(3) + ks(2) - 1 ;
44 | h = y2 - y1 + 1 ;
45 | w = x2 - x1 + 1 ;
46 | rfs.size = [h, w] ;
47 | rfs.stride = obj.stride ;
48 | rfs.offset = [y1+y2, x1+x2]/2 ;
49 | end
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/LRN.m:
--------------------------------------------------------------------------------
1 | classdef LRN < dagnn.ElementWise
2 | properties
3 | param = [5 1 0.0001/5 0.75]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = LRN(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Loss.m:
--------------------------------------------------------------------------------
1 | classdef Loss < dagnn.ElementWise
2 | properties
3 | loss = 'softmaxlog'
4 | end
5 |
6 | properties (Transient)
7 | average = 0
8 | numAveraged = 0
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss) ;
14 | n = obj.numAveraged ;
15 | m = n + size(inputs{1},4) ;
16 | obj.average = (n * obj.average + gather(outputs{1})) / m ;
17 | obj.numAveraged = m ;
18 | end
19 |
20 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
21 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss) ;
22 | derInputs{2} = [] ;
23 | derParams = {} ;
24 | end
25 |
26 | function reset(obj)
27 | obj.average = 0 ;
28 | obj.numAveraged = 0 ;
29 | end
30 |
31 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes)
32 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ;
33 | end
34 |
35 | function rfs = getReceptiveFields(obj)
36 | % the receptive field depends on the dimension of the variables
37 | % which is not known until the network is run
38 | rfs(1,1).size = [NaN NaN] ;
39 | rfs(1,1).stride = [NaN NaN] ;
40 | rfs(1,1).offset = [NaN NaN] ;
41 | rfs(2,1) = rfs(1,1) ;
42 | end
43 |
44 | function obj = Loss(varargin)
45 | obj.load(varargin) ;
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/NormOffset.m:
--------------------------------------------------------------------------------
1 | classdef NormOffset < dagnn.ElementWise
2 | properties
3 | param = [1 0.5]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = NormOffset(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Pooling.m:
--------------------------------------------------------------------------------
1 | classdef Pooling < dagnn.Filter
2 | properties
3 | method = 'max'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = Pooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/ReLU.m:
--------------------------------------------------------------------------------
1 | classdef ReLU < dagnn.ElementWise
2 | properties
3 | useShortCircuit = true
4 | opts = {}
5 | end
6 |
7 | methods
8 | function outputs = forward(obj, inputs, params)
9 | outputs{1} = vl_nnrelu(inputs{1}, [], obj.opts{:}) ;
10 | end
11 |
12 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
13 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}, obj.opts{:}) ;
14 | derParams = {} ;
15 | end
16 |
17 | function forwardAdvanced(obj, layer)
18 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
19 | forwardAdvanced@dagnn.Layer(obj, layer) ;
20 | return ;
21 | end
22 | net = obj.net ;
23 | in = layer.inputIndexes ;
24 | out = layer.outputIndexes ;
25 | net.vars(out).value = vl_nnrelu(net.vars(in).value, [], obj.opts{:}) ;
26 | if ~net.vars(in).precious
27 | net.vars(in).value = [] ;
28 | end
29 | end
30 |
31 | function backwardAdvanced(obj, layer)
32 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
33 | backwardAdvanced@dagnn.Layer(obj, layer) ;
34 | return ;
35 | end
36 | net = obj.net ;
37 | in = layer.inputIndexes ;
38 | out = layer.outputIndexes ;
39 |
40 | if isempty(net.vars(out).der), return ; end
41 |
42 | derInput = vl_nnrelu(net.vars(out).value, net.vars(out).der, obj.opts{:}) ;
43 |
44 | if ~net.vars(out).precious
45 | net.vars(out).der = [] ;
46 | net.vars(out).value = [] ;
47 | end
48 |
49 | if net.numPendingVarRefs(in) == 0
50 | net.vars(in).der = derInput ;
51 | else
52 | net.vars(in).der = net.vars(in).der + derInputs ;
53 | end
54 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ;
55 | end
56 |
57 | function obj = ReLU(varargin)
58 | obj.load(varargin) ;
59 | end
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Sigmoid.m:
--------------------------------------------------------------------------------
1 | classdef Sigmoid < dagnn.ElementWise
2 | methods
3 | function outputs = forward(obj, inputs, params)
4 | outputs{1} = vl_nnsigmoid(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/SoftMax.m:
--------------------------------------------------------------------------------
1 | classdef SoftMax < dagnn.ElementWise
2 | methods
3 | function outputs = forward(self, inputs, params)
4 | outputs{1} = vl_nnsoftmax(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 |
12 | function obj = SoftMax(varargin)
13 | obj.load(varargin) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/SpatialNorm.m:
--------------------------------------------------------------------------------
1 | classdef SpatialNorm < dagnn.ElementWise
2 | properties
3 | param = [2 2 10 2]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = SpatialNorm(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet/matlab/+dagnn/Sum.m:
--------------------------------------------------------------------------------
1 | classdef Sum < dagnn.ElementWise
2 | %SUM DagNN sum layer
3 | % The SUM layer takes the sum of all its inputs and store the result
4 | % as its only output.
5 |
6 | properties (Transient)
7 | numInputs
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | obj.numInputs = numel(inputs) ;
13 | outputs{1} = inputs{1} ;
14 | for k = 2:obj.numInputs
15 | outputs{1} = outputs{1} + inputs{k} ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | for k = 1:obj.numInputs
21 | derInputs{k} = derOutputs{1} ;
22 | end
23 | derParams = {} ;
24 | end
25 |
26 | function outputSizes = getOutputSizes(obj, inputSizes)
27 | outputSizes{1} = inputSizes{1} ;
28 | for k = 2:numel(inputSizes)
29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1}))
30 | if ~isequal(inputSizes{k}, outputSizes{1})
31 | warning('Sum layer: the dimensions of the input variables is not the same.') ;
32 | end
33 | end
34 | end
35 | end
36 |
37 | function rfs = getReceptiveFields(obj)
38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
39 | rfs.size = [1 1] ;
40 | rfs.stride = [1 1] ;
41 | rfs.offset = [1 1] ;
42 | rfs = repmat(rfs, numInputs, 1) ;
43 | end
44 |
45 | function obj = Sum(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet/matlab/simplenn/vl_simplenn_diagnose.m:
--------------------------------------------------------------------------------
1 | function vl_simplenn_diagnose(net, res)
2 | % VL_SIMPLENN_DIAGNOSE Plot diagnostic information
3 | % VL_SIMPLENN_DIAGNOSE(NET, RES) plots in the current window
4 | % the average, maximum, and miminum element for all the filters
5 | % and biases in the network NET. If RES is also provided, it will
6 | % plot the average, minimum, and maximum element for all the
7 | % intermediate responses and deriviatives stored in RES as well.
8 | %
9 | % This function can be used to rapidly glance at the evolution
10 | % of the paramters during training.
11 |
12 | n = numel(net.layers) ;
13 | fmu = NaN + zeros(1, n) ;
14 | fmi = fmu ;
15 | fmx = fmu ;
16 | bmu = fmu ;
17 | bmi = fmu ;
18 | bmx = fmu ;
19 | xmu = fmu ;
20 | xmi = fmi ;
21 | xmx = fmx ;
22 | dxmu = fmu ;
23 | dxmi = fmi ;
24 | dxmx = fmx ;
25 | dfmu = fmu ;
26 | dfmi = fmu ;
27 | dfmx = fmu ;
28 | dbmu = fmu ;
29 | dbmi = fmu ;
30 | dbmx = fmu ;
31 |
32 | for i=1:numel(net.layers)
33 | ly = net.layers{i} ;
34 | if ismember(ly.type, {'conv', 'bnorm'}) && numel(ly.filters) > 0
35 | x = gather(ly.filters) ;
36 | fmu(i) = mean(x(:)) ;
37 | fmi(i) = min(x(:)) ;
38 | fmx(i) = max(x(:)) ;
39 | end
40 | if ismember(ly.type, {'conv', 'bnorm'}) && numel(ly.biases) > 0
41 | x = gather(ly.biases) ;
42 | bmu(i) = mean(x(:)) ;
43 | bmi(i) = min(x(:)) ;
44 | bmx(i) = max(x(:)) ;
45 | end
46 | if nargin > 1
47 | if numel(res(i).x) > 1
48 | x = gather(res(i).x) ;
49 | xmu(i) = mean(x(:)) ;
50 | xmi(i) = min(x(:)) ;
51 | xmx(i) = max(x(:)) ;
52 | end
53 | if numel(res(i).dzdx) > 1
54 | x = gather(res(i).dzdx);
55 | dxmu(i) = mean(x(:)) ;
56 | dxmi(i) = min(x(:)) ;
57 | dxmx(i) = max(x(:)) ;
58 | end
59 | if ismember(ly.type, {'conv', 'bnorm'}) && numel(res(i).dzdw{1}) > 0
60 | x = gather(res(i).dzdw{1}) ;
61 | dfmu(i) = mean(x(:)) ;
62 | dfmi(i) = min(x(:)) ;
63 | dfmx(i) = max(x(:)) ;
64 | end
65 | if ismember(ly.type, {'conv', 'bnorm'}) && numel(res(i).dzdw{2}) > 0
66 | x = gather(res(i).dzdw{2}) ;
67 | dbmu(i) = mean(x(:)) ;
68 | dbmi(i) = min(x(:)) ;
69 | dbmx(i) = max(x(:)) ;
70 | end
71 | end
72 | end
73 |
74 | if nargin > 1
75 | np = 6 ;
76 | else
77 | np = 2 ;
78 | end
79 |
80 | clf ; subplot(np,1,1) ;
81 | errorbar(1:n, fmu, fmi, fmx, 'bo') ;
82 | grid on ;
83 | xlabel('layer') ;
84 | ylabel('filters') ;
85 | title('coefficient ranges') ;
86 |
87 | subplot(np,1,2) ;
88 | errorbar(1:n, bmu, bmi, bmx, 'bo') ;
89 | grid on ;
90 | xlabel('layer') ;
91 | ylabel('biases') ;
92 |
93 | if nargin > 1
94 | subplot(np,1,3) ;
95 | errorbar(1:n, xmu, xmi, xmx, 'bo') ;
96 | grid on ;
97 | xlabel('layer') ;
98 | ylabel('x') ;
99 |
100 | subplot(np,1,4) ;
101 | errorbar(1:n, dxmu, dxmi, dxmx, 'bo') ;
102 | grid on ;
103 | xlabel('layer') ;
104 | ylabel('dzdx') ;
105 |
106 | subplot(np,1,5) ;
107 | errorbar(1:n, dfmu, dfmi, dfmx, 'bo') ;
108 | grid on ;
109 | xlabel('layer') ;
110 | ylabel('dfilters') ;
111 |
112 | subplot(np,1,6) ;
113 | errorbar(1:n, dbmu, dbmi, dbmx, 'bo') ;
114 | grid on ;
115 | xlabel('layer') ;
116 | ylabel('dbiases') ;
117 | end
118 |
119 |
120 | drawnow ;
121 |
--------------------------------------------------------------------------------
/matconvnet/matlab/simplenn/vl_simplenn_move.m:
--------------------------------------------------------------------------------
1 | function net = vl_simplenn_move(net, destination)
2 | % VL_SIMPLENN_MOVE Move a simple CNN between CPU and GPU
3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network
4 | % on the current GPU device.
5 | %
6 | % NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the network
7 | % on the CPU.
8 |
9 | % Copyright (C) 2014 Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | switch destination
16 | case 'gpu', moveop = @(x) gpuArray(x) ;
17 | case 'cpu', moveop = @(x) gather(x) ;
18 | otherwise, error('Unknown destination ''%s''.', destination) ;
19 | end
20 | for l=1:numel(net.layers)
21 | switch net.layers{l}.type
22 | case {'conv', 'convt', 'bnorm'}
23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'}
24 | f = char(f) ;
25 | if isfield(net.layers{l}, f)
26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ;
27 | end
28 | end
29 | for f = {'weights', 'momentum'}
30 | f = char(f) ;
31 | if isfield(net.layers{l}, f)
32 | for j=1:numel(net.layers{l}.(f))
33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ;
34 | end
35 | end
36 | end
37 | otherwise
38 | % nothing to do ?
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/data.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "data.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/datacu.hpp:
--------------------------------------------------------------------------------
1 | // @file data.hpp
2 | // @brief Basic data structures (CUDA support)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__datacu__
14 | #define __vl__datacu__
15 |
16 | #ifndef ENABLE_GPU
17 | #error "datacu.hpp cannot be compiled without GPU support"
18 | #endif
19 |
20 | #include "data.hpp"
21 | #include
22 | #include
23 | #include
24 | #if __CUDA_ARCH__ >= 200
25 | #define VL_CUDA_NUM_THREADS 1024
26 | #else
27 | #define VL_CUDA_NUM_THREADS 512
28 | #endif
29 |
30 | #ifdef ENABLE_CUDNN
31 | #include
32 | #endif
33 |
34 | namespace vl {
35 | class CudaHelper {
36 | public:
37 | // Cuda errors
38 | cudaError_t getLastCudaError() const ;
39 | std::string const& getLastCudaErrorMessage() const ;
40 | vl::Error catchCudaError(char const* description = NULL) ;
41 |
42 | // CuBLAS support
43 | cublasStatus_t getCublasHandle(cublasHandle_t* handle) ;
44 | void clearCublas() ;
45 | cublasStatus_t getLastCublasError() const ;
46 | std::string const& getLastCublasErrorMessage() const ;
47 | vl::Error catchCublasError(cublasStatus_t status,
48 | char const* description = NULL) ;
49 |
50 | #if ENABLE_CUDNN
51 | // CuDNN support
52 | cudnnStatus_t getCudnnHandle(cudnnHandle_t* handle) ;
53 | void clearCudnn() ;
54 | bool getCudnnEnabled() const ;
55 | void setCudnnEnabled(bool active) ;
56 | cudnnStatus_t getLastCudnnError() const ;
57 | std::string const& getLastCudnnErrorMessage() const ;
58 | vl::Error catchCudnnError(cudnnStatus_t status,
59 | char const* description = NULL) ;
60 | #endif
61 |
62 | protected:
63 | CudaHelper() ;
64 | ~CudaHelper() ;
65 | void clear() ;
66 | void invalidateGpu() ;
67 | friend class Context ;
68 |
69 | private:
70 | cudaError_t lastCudaError ;
71 | std::string lastCudaErrorMessage ;
72 |
73 | // CuBLAS
74 | cublasHandle_t cublasHandle ;
75 | bool isCublasInitialized ;
76 | cublasStatus_t lastCublasError ;
77 | std::string lastCublasErrorMessage ;
78 |
79 | #if ENABLE_CUDNN
80 | // CuDNN
81 | cudnnStatus_t lastCudnnError ;
82 | std::string lastCudnnErrorMessage ;
83 | cudnnHandle_t cudnnHandle ;
84 | bool isCudnnInitialized ;
85 | bool cudnnEnabled ;
86 | #endif
87 | } ;
88 | }
89 | #endif /* defined(__vl__datacu__) */
90 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/datamex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "datamex.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/datamex.hpp:
--------------------------------------------------------------------------------
1 | // @file datamex.hpp
2 | // @brief Basic data structures (MEX support)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__datamex__
14 | #define __vl__datamex__
15 |
16 | #include "mex.h"
17 |
18 | #if ENABLE_GPU
19 | #include "gpu/mxGPUArray.h"
20 | #endif
21 |
22 | #include "data.hpp"
23 |
24 | namespace vl {
25 |
26 | class MexTensor ;
27 |
28 | class MexContext : public Context
29 | {
30 | public:
31 | MexContext() ;
32 | ~MexContext() ;
33 |
34 | protected:
35 | #if ENABLE_GPU
36 | vl::Error initGpu() ;
37 | vl::Error validateGpu() ;
38 | mxArray * canary ; // if it breathes, the GPU state is valid
39 | bool gpuIsInitialized ;
40 | #endif
41 |
42 | friend class MexTensor ;
43 | } ;
44 |
45 | class MexTensor : public Tensor
46 | {
47 | public:
48 | MexTensor(MexContext & context) ;
49 | vl::Error init(Device dev, TensorGeometry const & geom) ;
50 | vl::Error init(Device dev, TensorGeometry const & geom, float value) ;
51 | vl::Error initWithZeros(Device dev, TensorGeometry const & geom) ;
52 | vl::Error init(mxArray const * array) ;
53 |
54 | mxArray * relinquish() ;
55 | void clear() ;
56 | ~MexTensor() ;
57 |
58 | protected:
59 | MexContext & context ;
60 | mxArray const * array ;
61 | #ifdef ENABLE_GPU
62 | mxGPUArray const * gpuArray ;
63 | #endif
64 | bool isArrayOwner ;
65 |
66 | private: // prevention
67 | MexTensor(MexTensor const &) ;
68 | MexTensor & operator= (MexTensor & tensor) ;
69 | } ;
70 |
71 | void print(char const * str, Tensor const & tensor) ;
72 |
73 | void mexThrowError(Context const& context, vl::Error error) ;
74 | }
75 |
76 |
77 | #endif /* defined(__vl__datamex__) */
78 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/bnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file bnorm.hpp
2 | // @brief Batch Normalization block implementation
3 | // @author Sebastien Ehrhardt
4 |
5 | /*
6 | Copyright (C) 2015 Sebastien Ehrhardt.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bnorm__
14 | #define __vl__bnorm__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct bnorm
23 | {
24 | static vl::Error
25 | forward(Context& context,
26 | type* output,
27 | type* moments, // can be null and it will be allocated internally
28 | type const* data,
29 | type const* multipliers,
30 | type const* biases,
31 | int height, int width, int depth, int size,
32 | type epsilon) ;
33 |
34 | static vl::Error
35 | forward_given_moments(Context& context,
36 | type* output,
37 | type const* moments,
38 | type const* data,
39 | type const* multipliers,
40 | type const* biases,
41 | int height, int width, int depth, int size) ;
42 |
43 | static vl::Error
44 | backward(Context& context,
45 | type* derData,
46 | type* derMultipliers,
47 | type* derBiases,
48 | type* moments,
49 | type const* data,
50 | type const* multipliers,
51 | type const* biases,
52 | type const* derOutput,
53 | int height, int width, int depth, int size,
54 | type epsilon) ;
55 |
56 | static vl::Error
57 | backward_given_moments(Context& context,
58 | type* derData,
59 | type* derMultipliers,
60 | type* derBiases,
61 | type const* moments,
62 | type const* data,
63 | type const* multipliers,
64 | type const* biases,
65 | type const* derOutput,
66 | int height, int width, int depth, int size,
67 | type epsilon) ;
68 | } ;
69 |
70 | } }
71 | #endif /* __vl__bnorm__ */
72 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/copy.hpp:
--------------------------------------------------------------------------------
1 | // @file copy.hpp
2 | // @brief Copy data
3 | // @author Andrea Vedaldi
4 |
5 | #ifndef __vl__copy__
6 | #define __vl__copy__
7 |
8 | #include "../data.hpp"
9 |
10 | namespace vl { namespace impl {
11 |
12 | template vl::Error
13 | copy(type * dest,
14 | type const * src,
15 | size_t numElements) ;
16 |
17 | template<> vl::Error
18 | copy (float * dest,
19 | float const * src,
20 | size_t numElements) ;
21 |
22 | #if ENABLE_GPU
23 | template<> vl::Error
24 | copy (float * dest,
25 | float const * src,
26 | size_t numElements) ;
27 | #endif
28 |
29 | } }
30 |
31 | #endif /* defined(__vl__copy__) */
32 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/copy_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file copy_cpu.cpp
2 | // @brief Copy data (CPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include
15 |
16 | using namespace vl ;
17 | using namespace vl::impl ;
18 |
19 | template <> vl::Error
20 | vl::impl::copy(float * dest,
21 | float const * src,
22 | size_t numElements)
23 | {
24 | memcpy(dest, src, numElements * sizeof(float)) ;
25 | return vlSuccess ;
26 | }
27 |
28 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/copy_gpu.cu:
--------------------------------------------------------------------------------
1 | // @file copy_gpu.cu
2 | // @brief Copy data (GPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include
15 |
16 | using namespace vl ;
17 | using namespace vl::impl ;
18 |
19 | template <> vl::Error
20 | vl::impl::copy(float * dest,
21 | float const * src,
22 | size_t numElements)
23 | {
24 | cudaMemcpy(dest, src, numElements * sizeof(float), cudaMemcpyDeviceToDevice) ;
25 | return vlSuccess ;
26 | }
27 |
28 |
29 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/im2row.hpp:
--------------------------------------------------------------------------------
1 | // @file im2row.hpp
2 | // @brief Stack image patches as matrix rows
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__im2row__
14 | #define __vl__im2row__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template vl::Error
22 | im2row(vl::Context& context,
23 | type* stacked,
24 | type const* data,
25 | size_t height, size_t width, size_t depth,
26 | size_t windowHeight, size_t windowWidth,
27 | size_t strideY, size_t strideX,
28 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
29 |
30 | template vl::Error
31 | row2im(vl::Context& context,
32 | type* data,
33 | type const* stacked,
34 | size_t height, size_t width, size_t depth,
35 | size_t windowHeight, size_t windowWidth,
36 | size_t strideY, size_t strideX,
37 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
38 |
39 |
40 | /* Specializations */
41 |
42 | template<> vl::Error
43 | im2row(vl::Context& context,
44 | float* stacked,
45 | float const* data,
46 | size_t height, size_t width, size_t depth,
47 | size_t windowHeight, size_t windowWidth,
48 | size_t strideY, size_t strideX,
49 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
50 |
51 | template<> vl::Error
52 | row2im(vl::Context& context,
53 | float* data,
54 | float const* stacked,
55 | size_t height, size_t width, size_t depth,
56 | size_t windowHeight, size_t windowWidth,
57 | size_t strideY, size_t strideX,
58 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
59 |
60 | #if ENABLE_GPU
61 | template<> vl::Error
62 | im2row(vl::Context& context,
63 | float* stacked,
64 | float const* data,
65 | size_t height, size_t width, size_t depth,
66 | size_t windowHeight, size_t windowWidth,
67 | size_t strideY, size_t strideX,
68 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
69 |
70 | template<> vl::Error
71 | row2im(vl::Context& context,
72 | float* data,
73 | float const* stacked,
74 | size_t height, size_t width, size_t depth,
75 | size_t windowHeight, size_t windowWidth,
76 | size_t strideY, size_t strideX,
77 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
78 | #endif
79 |
80 | } }
81 |
82 | #endif /* defined(__vl__im2row__) */
83 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/nnbias_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias_blas.hpp
2 | // @brief biasolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias_cudnn__
14 | #define __vl__nnbias_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template vl::Error
22 | nnbias_forward_cudnn(vl::Context& context,
23 | vl::Tensor output, double outputMult,
24 | vl::Tensor data, double dataMult,
25 | vl::Tensor biases, double biasesMult) ;
26 |
27 | template vl::Error
28 | nnbias_backward_cudnn(vl::Context& context,
29 | vl::Tensor derData, double derDataMult,
30 | vl::Tensor derBiases, double derBiasesMult,
31 | vl::Tensor derOutput, double derOutputMult) ;
32 |
33 | /* specializations */
34 |
35 | template<> vl::Error
36 | nnbias_forward_cudnn(vl::Context& context,
37 | vl::Tensor output, double outputMult,
38 | vl::Tensor data, double dataMult,
39 | vl::Tensor biases, double biasesMult) ;
40 |
41 | template<> vl::Error
42 | nnbias_backward_cudnn(vl::Context& context,
43 | vl::Tensor derData, double derDataMult,
44 | vl::Tensor derBiases, double derBiasesMult,
45 | vl::Tensor derOutput, double derOutputMult) ;
46 | } }
47 |
48 | #endif /* defined(__vl__nnbias_cudnn__) */
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/nnconv_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv_blas.hpp
2 | // @brief Convolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnconv_cudnn__
14 | #define __vl__nnconv_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template vl::Error
22 | nnconv_forward_cudnn(Context& context,
23 | Tensor output, double outputMult,
24 | Tensor data, double dataMult,
25 | Tensor filters,
26 | Tensor biases,
27 | int strideX, int strideY,
28 | int padLeft, int padRight,
29 | int padTop, int padBottom) ;
30 |
31 | template vl::Error
32 | nnconv_backward_cudnn(Context& context,
33 | Tensor derData,
34 | Tensor derFilters,
35 | Tensor derBiases,
36 | Tensor data,
37 | Tensor filters,
38 | Tensor derOutput,
39 | int strideX, int strideY,
40 | int padLeft, int padRight,
41 | int padTop, int padBottom) ;
42 |
43 | /* specializations */
44 |
45 | template<> vl::Error
46 | nnconv_forward_cudnn(Context& context,
47 | Tensor output, double outputMult,
48 | Tensor data, double dataMult,
49 | Tensor filters,
50 | Tensor biases,
51 | int strideX, int strideY,
52 | int padLeft, int padRight,
53 | int padTop, int padBottom) ;
54 |
55 | template<> vl::Error
56 | nnconv_backward_cudnn(Context& context,
57 | Tensor derData,
58 | Tensor derFilters,
59 | Tensor derBiases,
60 | Tensor data,
61 | Tensor filters,
62 | Tensor derOutput,
63 | int strideX, int strideY,
64 | int padLeft, int padRight,
65 | int padTop, int padBottom) ;
66 | } }
67 |
68 | #endif /* defined(__vl__nnconv_cudnn__) */
69 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/nnpooling_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling_blas.hpp
2 | // @brief Pooling block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling_cudnn__
14 | #define __vl__nnpooling_cudnn__
15 |
16 | #include "../nnpooling.hpp"
17 | #include "../data.hpp"
18 | #include "cudnn.h"
19 |
20 |
21 | namespace vl { namespace impl {
22 |
23 | template vl::Error
24 | nnpooling_forward_cudnn(Context& context,
25 | Tensor output,
26 | Tensor data,
27 | vl::PoolingMethod method,
28 | int poolHeight, int poolWidth,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | template vl::Error
34 | nnpooling_backward_cudnn(Context& context,
35 | Tensor derData,
36 | Tensor data,
37 | Tensor output,
38 | Tensor derOutput,
39 | vl::PoolingMethod method,
40 | int poolHeight, int poolWidth,
41 | int strideY, int strideX,
42 | int padTop, int padBottom,
43 | int padLeft, int padRight) ;
44 |
45 | /* specialisations */
46 |
47 | template<> vl::Error
48 | nnpooling_forward_cudnn(Context& context,
49 | Tensor output,
50 | Tensor data,
51 | vl::PoolingMethod method,
52 | int poolHeight, int poolWidth,
53 | int strideY, int strideX,
54 | int padTop, int padBottom,
55 | int padLeft, int padRight) ;
56 |
57 | template<> vl::Error
58 | nnpooling_backward_cudnn(Context& context,
59 | Tensor derData,
60 | Tensor data,
61 | Tensor output,
62 | Tensor derOutput,
63 | vl::PoolingMethod method,
64 | int poolHeight, int poolWidth,
65 | int strideY, int strideX,
66 | int padTop, int padBottom,
67 | int padLeft, int padRight) ;
68 | } }
69 |
70 | #endif /* defined(__vl__nnpooling_cudnn__) */
71 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/normalize.hpp:
--------------------------------------------------------------------------------
1 | // @file normalize.hpp
2 | // @brief Normalize block implementation
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__normalize__
14 | #define __vl__normalize__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template vl::Error
22 | normalize_forward(type* normalized,
23 | type const* data,
24 | size_t height, size_t width, size_t depth, size_t size,
25 | size_t normDetph,
26 | double kappa, double alpha, double beta) ;
27 |
28 | template vl::Error
29 | normalize_backward(type* derData,
30 | type const* data,
31 | type const* derNormalized,
32 | size_t height, size_t width, size_t depth, size_t size,
33 | size_t normDetph,
34 | double kappa, double alpha, double beta) ;
35 |
36 | /* Specializations: CPU, float */
37 |
38 | template<> vl::Error
39 | normalize_forward(float* normalized,
40 | float const* data,
41 | size_t height, size_t width, size_t depth, size_t size,
42 | size_t normDetph,
43 | double kappa, double alpha, double beta) ;
44 |
45 | template<> vl::Error
46 | normalize_backward(float* derData,
47 | float const* data,
48 | float const* derNormalized,
49 | size_t height, size_t width, size_t depth, size_t size,
50 | size_t normDetph,
51 | double kappa, double alpha, double beta) ;
52 |
53 |
54 | /* Specializations: GPU, float */
55 |
56 | #if ENABLE_GPU
57 | template<> vl::Error
58 | normalize_forward(float* normalized,
59 | float const* data,
60 | size_t height, size_t width, size_t depth, size_t size,
61 | size_t normDetph,
62 | double kappa, double alpha, double beta) ;
63 |
64 | template<> vl::Error
65 | normalize_backward(float* derData,
66 | float const* data,
67 | float const* derNormalized,
68 | size_t height, size_t width, size_t depth, size_t size,
69 | size_t normDetph,
70 | double kappa, double alpha, double beta) ;
71 | #endif
72 |
73 | } }
74 | #endif /* __vl__normalize__ */
75 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/impl/subsample.hpp:
--------------------------------------------------------------------------------
1 | // @file subsampling.hpp
2 | // @brief Subsampling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_NNSUBSAMPLE_H
15 | #define VL_NNSUBSAMPLE_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template vl::Error
23 | subsample_forward(vl::Context& context,
24 | type* subsampled,
25 | type const* data,
26 | size_t height, size_t width, size_t depth,
27 | size_t strideY, size_t strideX,
28 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
29 |
30 | template vl::Error
31 | subsample_backward(vl::Context& context,
32 | type* derData,
33 | type const* derSubsampled,
34 | size_t height, size_t width, size_t depth,
35 | size_t strideY, size_t strideX,
36 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
37 |
38 | /* Specializations */
39 |
40 | template<> vl::Error
41 | subsample_forward(vl::Context& context,
42 | float* subsampled,
43 | float const* data,
44 | size_t height, size_t width, size_t depth,
45 | size_t strideY, size_t strideX,
46 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
47 |
48 | template<> vl::Error
49 | subsample_backward(vl::Context& context,
50 | float* derData,
51 | float const* derSubsampled,
52 | size_t height, size_t width, size_t depth,
53 | size_t strideY, size_t strideX,
54 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
55 |
56 | #if ENABLE_GPU
57 | template<> vl::Error
58 | subsample_forward(vl::Context& context,
59 | float* stacked,
60 | float const* data,
61 | size_t height, size_t width, size_t depth,
62 | size_t strideY, size_t strideX,
63 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
64 |
65 | template<> vl::Error
66 | subsample_backward(vl::Context& context,
67 | float* derData,
68 | float const* derSubsampled,
69 | size_t height, size_t width, size_t depth,
70 | size_t strideY, size_t strideX,
71 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
72 | #endif
73 |
74 | } }
75 |
76 | #endif /* defined(VL_NNSUBSAMPLE_H) */
77 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/imread.hpp:
--------------------------------------------------------------------------------
1 | // @file imread.hpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__imread__
14 | #define __vl__imread__
15 |
16 | namespace vl {
17 |
18 | struct Image
19 | {
20 | int width ;
21 | int height ;
22 | int depth ;
23 | float * memory ;
24 | int error ;
25 |
26 | Image() : width(0), height(0), depth(0), memory(0), error(0) { }
27 | } ;
28 |
29 | class ImageReader
30 | {
31 | public:
32 | ImageReader() ;
33 | ~ImageReader() ;
34 | Image read(char const * fileName, float * memory = 0) ;
35 | Image readDimensions(char const * fileName) ;
36 |
37 | private:
38 | class Impl ;
39 | Impl * impl ;
40 | } ;
41 | }
42 |
43 | #endif
44 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnbias.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnbias.cu"
5 |
6 | /**
7 | @brief nnbias_forward
8 | @param context context.
9 | @param output output tensor $\by$ [output].
10 | @param outputMult output tensor multiplier $\alpha$.
11 | @param data data tensor $\bx$.
12 | @param dataMult data tensor multiplier $\beta$.
13 | @param biases biases tensor $\bb$.
14 | @param biasesMult biases tensor multiplier $\gamma$.
15 |
16 | The function computes
17 | @f[
18 | y_{ijkd} \leftarrow
19 | \alpha y_{ijkd} +
20 | \beta x_{ijkd} +
21 | \gamma b_k.
22 | @f]
23 |
24 | @a data can be the null tensor, in which case this tensor
25 | is dropped in the summation.
26 | */
27 |
28 | /**
29 | @brief nnbias_backward
30 | @param context context.
31 | @param derData data derivative tensor $d\bx$ [output].
32 | @param derDataMult data derivative tensor multiplier $\eta$.
33 | @param derBiases biases derivative tensor $d\bb$ [output].
34 | @param derBiasesMult biased derivative tensor multiplier $\tau$.
35 | @param data data tensor $\bx$.
36 | @param dataMult data tensor multiplier $\beta$.
37 | @param biases biases tensor $\bb$.
38 | @param biasesMult biases tensor multiplier $\gamma$.
39 |
40 | If @a derData is the null tensor, this derivative is not comptued and
41 | @param biases can also be null.
42 |
43 | If @a derBiases is the null tensor, this derivative is not computed and
44 | @param data can also be null.
45 | */
46 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnbias.cu:
--------------------------------------------------------------------------------
1 | // @file nnbias.cu
2 | // @brief Bias block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "nnbias.hpp"
14 | #include "impl/nnbias_blas.hpp"
15 | #if ENABLE_CUDNN
16 | #include "impl/nnbias_cudnn.hpp"
17 | #endif
18 | #include
19 |
20 | using namespace vl ;
21 |
22 | /* ---------------------------------------------------------------- */
23 | /* Dispatchers */
24 | /* ---------------------------------------------------------------- */
25 |
26 | vl::Error
27 | vl::nnbias_forward(vl::Context& context,
28 | vl::Tensor output, double outputMult,
29 | vl::Tensor data, double dataMult,
30 | vl::Tensor biases, double biasesMult)
31 | {
32 | vl::Error status = vlSuccess ;
33 | switch (output.getMemoryType()) {
34 | default:
35 | assert(false) ;
36 | status = vl::vlErrorUnknown ;
37 | break ;
38 |
39 | case vl::CPU:
40 | status = vl::impl::nnbias_forward_blas
41 | (context, output, outputMult, data, dataMult, biases, biasesMult) ;
42 | break ;
43 |
44 | #if ENABLE_GPU
45 | case vl::GPU:
46 | #if ENABLE_CUDNN
47 | if (context.getCudaHelper().getCudnnEnabled()) {
48 | status = vl::impl::nnbias_forward_cudnn
49 | (context, output, outputMult, data, dataMult, biases, biasesMult) ;
50 | if (status == vl::vlSuccess) { return status ; }
51 | if (status != vl::vlErrorUnsupported) { goto done ; }
52 | /* this case was not supported by CUDNN -- fallback */
53 | }
54 | #endif
55 | status = vl::impl::nnbias_forward_blas
56 | (context, output, outputMult, data, dataMult, biases, biasesMult) ;
57 | break ;
58 | #endif
59 | }
60 | #if ENABLE_CUDNN
61 | done:
62 | #endif
63 | return context.passError(status, "nnbias_forward: ") ;
64 | }
65 |
66 | vl::Error
67 | vl::nnbias_backward(vl::Context& context,
68 | vl::Tensor derData, double derDataMult,
69 | vl::Tensor derBiases, double derBiasesMult,
70 | vl::Tensor derOutput, double derOutputMult)
71 | {
72 | vl::Error status = vlSuccess ;
73 | switch (derOutput.getMemoryType()) {
74 | default:
75 | assert(false) ;
76 | status = vl::vlErrorUnknown ;
77 | break ;
78 |
79 | case vl::CPU:
80 | status = vl::impl::nnbias_backward_blas
81 | (context, derData, derDataMult, derBiases, derBiasesMult, derOutput, derOutputMult) ;
82 | break ;
83 |
84 | #if ENABLE_GPU
85 | case vl::GPU:
86 | #if ENABLE_CUDNN
87 | if (context.getCudaHelper().getCudnnEnabled()) {
88 | status = vl::impl::nnbias_backward_cudnn
89 | (context, derData, derDataMult, derBiases, derBiasesMult, derOutput, derOutputMult) ;
90 | if (status == vl::vlSuccess) { return status ; }
91 | if (status != vl::vlErrorUnsupported) { goto done ; }
92 | /* this case was not supported by CUDNN -- fallback */
93 | }
94 | #endif
95 | status = vl::impl::nnbias_backward_blas
96 | (context, derData, derDataMult, derBiases, derBiasesMult, derOutput, derOutputMult) ;
97 | break ;
98 | #endif
99 | }
100 | #if ENABLE_CUDNN
101 | done:
102 | #endif
103 | return context.passError(status, "nnbias_backward: ") ;
104 | }
105 |
106 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnbias.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias.hpp
2 | // @brief Bias block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias__
14 | #define __vl__nnbias__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::Error
21 | nnbias_forward(vl::Context& context,
22 | vl::Tensor output, double outputMult,
23 | vl::Tensor data, double dataMult,
24 | vl::Tensor biases, double biasesMult) ;
25 |
26 | vl::Error
27 | nnbias_backward(vl::Context& context,
28 | vl::Tensor derData, double derDataMult,
29 | vl::Tensor derBiases, double derBiasesMult,
30 | vl::Tensor derOutput, double derOutputMult) ;
31 | }
32 |
33 | #endif /* defined(__vl__nnbias__) */
34 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnbnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbnorm.hpp
2 | // @brief Batch normalizatoion block
3 | // @author Sebastien Ehrhardt
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef __vl__nnbnorm__
15 | #define __vl__nnbnorm__
16 |
17 | #include "data.hpp"
18 | #include
19 |
20 | namespace vl {
21 |
22 | // This version computes mean and sigma
23 | vl::Error
24 | nnbnorm_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor moments, // [output: can pass null]
27 | vl::Tensor data,
28 | vl::Tensor filters,
29 | vl::Tensor biases,
30 | float epsilon) ;
31 |
32 | // This version uses the mean and sigma specified
33 | vl::Error
34 | nnbnorm_forward_given_moments(vl::Context& context,
35 | vl::Tensor output,
36 | vl::Tensor moments, // input
37 | vl::Tensor data,
38 | vl::Tensor filters,
39 | vl::Tensor biases) ;
40 |
41 | vl::Error
42 | nnbnorm_backward(vl::Context& context,
43 | vl::Tensor derData,
44 | vl::Tensor derFilters,
45 | vl::Tensor derBiaises,
46 | vl::Tensor moments,
47 | vl::Tensor data,
48 | vl::Tensor filters,
49 | vl::Tensor biases,
50 | vl::Tensor derOutput,
51 | float epsilon) ;
52 |
53 | vl::Error
54 | nnbnorm_backward_given_moments(vl::Context& context,
55 | vl::Tensor derData,
56 | vl::Tensor derFilters,
57 | vl::Tensor derBiaises,
58 | vl::Tensor moments,
59 | vl::Tensor data,
60 | vl::Tensor filters,
61 | vl::Tensor biases,
62 | vl::Tensor derOutput,
63 | float epsilon) ;
64 | }
65 |
66 | #endif /* defined(__vl__nnbnorm__) */
67 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnconv.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnconv.cu should be compiled instead"
3 | #endif
4 | #include "nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnconv.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv.cu
2 | // @brief Convolution block
3 | // @author Andrea Vedaldi
4 | // @author Max Jaderberg
5 |
6 | /*
7 | Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg
8 | Copyright (C) 2015 Andrea Vedaldi.
9 |
10 | All rights reserved.
11 |
12 | This file is part of the VLFeat library and is made available under
13 | the terms of the BSD license (see the COPYING file).
14 | */
15 |
16 | #ifndef __vl__nnconv__
17 | #define __vl__nnconv__
18 |
19 | #include "data.hpp"
20 |
21 | namespace vl {
22 |
23 | vl::Error
24 | nnconv_forward(vl::Context& context,
25 | vl::Tensor output, double outputMult,
26 | vl::Tensor data, double dataMult,
27 | vl::Tensor filters,
28 | vl::Tensor biases,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | vl::Error
34 | nnconv_backward(vl::Context& context,
35 | vl::Tensor derData,
36 | vl::Tensor derFilters,
37 | vl::Tensor derBiases,
38 | vl::Tensor data,
39 | vl::Tensor filters,
40 | vl::Tensor derOutput,
41 | int strideY, int strideX,
42 | int padTop, int padBottom,
43 | int padLeft, int padRight) ;
44 |
45 | vl::Error
46 | nnconvt_forward(vl::Context& context,
47 | vl::Tensor output,
48 | vl::Tensor data,
49 | vl::Tensor filters,
50 | vl::Tensor biases,
51 | int upsampleY, int upsampleX,
52 | int cropTop, int cropBottom,
53 | int cropLeft, int cropRight) ;
54 |
55 | vl::Error
56 | nnconvt_backward(vl::Context& context,
57 | vl::Tensor derData,
58 | vl::Tensor derFilters,
59 | vl::Tensor derBiases,
60 | vl::Tensor data,
61 | vl::Tensor filters,
62 | vl::Tensor derOutput,
63 | int upsampleY, int upsampleX,
64 | int cropTop, int cropBottom,
65 | int cropLeft, int cropRight) ;
66 | }
67 |
68 |
69 | #endif /* defined(__vl__nnconv__) */
70 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnfullyconnected.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnfullyconnected.cu should be compiled instead"
3 | #endif
4 | #include "nnfullyconnected.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnfullyconnected.hpp:
--------------------------------------------------------------------------------
1 | // @file nnfullyconnected.hpp
2 | // @brief Fully-connected block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 |
14 | #ifndef __vl__nnfullyconnected__
15 | #define __vl__nnfullyconnected__
16 |
17 | #include "data.hpp"
18 |
19 | namespace vl {
20 |
21 | vl::Error
22 | nnfullyconnected_forward(vl::Context& context,
23 | vl::Tensor output,
24 | vl::Tensor data,
25 | vl::Tensor filters,
26 | vl::Tensor biases) ;
27 |
28 | vl::Error
29 | nnfullyconnected_backward(vl::Context& context,
30 | vl::Tensor derData,
31 | vl::Tensor derFilters,
32 | vl::Tensor derBiases,
33 | vl::Tensor data,
34 | vl::Tensor filters,
35 | vl::Tensor derOutput) ;
36 | }
37 |
38 |
39 | #endif /* defined(__vl__nnfullyconnected__) */
40 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnnormalize.cu should be compiled instead"
3 | #endif
4 | #include "nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnnormalize.hpp:
--------------------------------------------------------------------------------
1 | // @file nnnormalize.hpp
2 | // @brief Normalization block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 | #ifndef __vl__nnnormalize__
13 | #define __vl__nnnormalize__
14 |
15 | #include "data.hpp"
16 | #include
17 |
18 | namespace vl {
19 |
20 | vl::Error
21 | nnnormalize_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | size_t normDetph,
25 | double kappa, double alpha, double beta) ;
26 |
27 | vl::Error
28 | nnnormalize_backward(vl::Context& context,
29 | vl::Tensor derData,
30 | vl::Tensor data,
31 | vl::Tensor derOutput,
32 | size_t normDetph,
33 | double kappa, double alpha, double beta) ;
34 | }
35 |
36 | #endif /* defined(__vl__nnnormalize__) */
37 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnpooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnpooling.cu should be compiled instead"
3 | #endif
4 | #include "nnpooling.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnpooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling.hpp
2 | // @brief Pooling block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling__
14 | #define __vl__nnpooling__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 |
21 | enum PoolingMethod { vlPoolingMax, vlPoolingAverage } ;
22 |
23 | vl::Error
24 | nnpooling_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor data,
27 | PoolingMethod method,
28 | int poolHeight, int poolWidth,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | vl::Error
34 | nnpooling_backward(vl::Context& context,
35 | vl::Tensor derData,
36 | vl::Tensor data,
37 | vl::Tensor derOutput,
38 | PoolingMethod method,
39 | int poolHeight, int poolWidth,
40 | int strideY, int strideX,
41 | int padTop, int padBottom,
42 | int padLeft, int padRight) ;
43 | }
44 |
45 | #endif /* defined(__vl__nnpooling__) */
46 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnsubsample.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnsubsample.cu"
5 |
6 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/bits/nnsubsample.hpp:
--------------------------------------------------------------------------------
1 | // @file nnsubsample.hpp
2 | // @brief Subsamping block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnsubsample__
14 | #define __vl__nnsubsample__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::Error
21 | nnsubsample_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor biases,
25 | int strideY, int strideX,
26 | int padTop, int padBottom,
27 | int padLeft, int padRight) ;
28 |
29 | vl::Error
30 | nnsubsample_backward(vl::Context& context,
31 | vl::Tensor derData,
32 | vl::Tensor derBiases,
33 | vl::Tensor derOutput,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 | }
38 |
39 | #endif /* defined(__vl__nnsubsample__) */
40 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/config/mex_CUDA_glnxa64.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
11 |
26 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_imreadjpeg.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_nnconv.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_nnconvt.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconvt.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/src/vl_nnpool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnpool.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | %VL_IMREADJPEG (A)synchronous multithreaded JPEG image loader.
2 | % IMAGES = VL_IMREADJPEG(FILES) reads the specified cell array
3 | % FILES of JPEG files into the cell array of images IMAGES.
4 | %
5 | % IMAGES = VL_IMREADJPEG(FILES, 'NumThreads', T) uses T parallel
6 | % threads to accelerate the operation. Note that this is
7 | % independent of the number of computational threads used by
8 | % MATLAB.
9 | %
10 | % VL_IMREADJPEG(FILES, 'Prefetch') starts reading the specified
11 | % images but returns immediately to MATLAB. Reading happens
12 | % concurrently with MATLAB in one or more separated threads. A
13 | % subsequent call IMAGES=VL_IMREADJPEG(FILES) *specifying exactly
14 | % the same files in the same order* will then return the loaded
15 | % images. This can be sued to quickly load a batch of JPEG images
16 | % as MATLAB is busy doing something else.
17 | %
18 | % The function takes the following options:
19 | %
20 | % Prefetch:: [not specified]
21 | % If specified, run without blocking (see above).
22 | %
23 | % Verbose:: [not specified]
24 | % Increase the verbosity level.
25 | %
26 | % NumThreads:: [1]
27 | % Specify the number of threads used to read images. This number
28 | % must be at least 1. Note that it does not make sense to specify
29 | % a number larger than the number of available CPU cores, and
30 | % often fewer threads are sufficient as reading images is memory
31 | % access bound rather than CPU bound.
32 |
33 | % Copyright (C) 2014-15 Andrea Vedaldi.
34 | % All rights reserved.
35 | %
36 | % This file is part of the VLFeat library and is made available under
37 | % the terms of the BSD license (see the COPYING file).
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnbnorm.m:
--------------------------------------------------------------------------------
1 | %VL_NNBNORM CNN batch normalisation.
2 | % Y = VL_NNBNORM(X,G,B) computes the batch normalization of the
3 | % input X. This is defined as:
4 | %
5 | % Y(i,j,k,t) = G(k) * (X(i,j,k,t) - mu(k)) / sigma(k) + B(k)
6 | %
7 | % where:
8 | %
9 | % mu(k) = mean_ijt X(i,j,k,t),
10 | % sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2,
11 | % sigma(k) = sqrt(sigma2(k) + EPSILON)
12 | %
13 | % are respectively the per-channel mean, variance, and standard
14 | % deviation of a channel of the data X and G(k) and B(k) define
15 | % respectively a multiplicative and additive constant to scale each
16 | % data channel. Note that statistics are accumulated across all the
17 | % items of the batch in the 4D tensor X (from which the name batch
18 | % normalization). The constant EPSILON is used to regularize the
19 | % computation of sigma(k) and avoid divisions by zero or very small
20 | % numbers.
21 | %
22 | % [DZDX,DZDG,DZDB] = VL_NNBNORM(X,G,B,DZDY) computes the derviatives
23 | % of the block projected onto DZDY. DZDX, DZDG, DZDB and DZDY have
24 | % the same dimensions as X, G, B, and Y respectivey.
25 | %
26 | % Optionally, [Y,MEAN,MOMENTS] = VL_NNBNORM(...) and
27 | % [DZDX,DZDG,DZDB,MOMENTS] = VL_NNBNORM(...,DZDY) return the values
28 | % of the vectors mu and sigma in the formulas above. Here, MOMENTS
29 | % is a 2 x DEPTH array, where the vectors mu and sigma are the rows.
30 | %
31 | % VL_NNBNROM(..., 'Option', value) takes the following options:
32 | %
33 | % `Epsilon`:: 1e-4
34 | % Specifies the constant EPSILON in the formuals above.
35 | %
36 | % `Moments`:: unspecified
37 | % Specifies an array MOMENTS with the values of mu and sigma to
38 | % use instead of computing them according to the equations
39 | % above. This is useful to disable batch normalization during
40 | % testing.
41 | %
42 | % See also: VL_NNNORMALIZE().
43 |
44 | % Copyright (C) 2015 Sébastien Ehrhardt, Karel Lenc and Andrea Vedaldi.
45 | % All rights reserved.
46 | %
47 | % This file is part of the VLFeat library and is made available under
48 | % the terms of the BSD license (see the COPYING file).
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnconcat.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin)
2 | %VL_NNCONCAT CNN concatenate multiple inputs.
3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell
4 | % array INPUTS along dimension DIM generating an output Y.
5 | %
6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives
7 | % of the block projected onto DZDY. DZDINPUTS has one element for
8 | % each element of INPUTS, each of which is an array that has the same
9 | % dimensions of the corresponding array in INPUTS.
10 |
11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
12 | % All rights reserved.
13 | %
14 | % This file is part of the VLFeat library and is made available under
15 | % the terms of the BSD license (see the COPYING file).
16 |
17 | opts.inputSizes = [] ;
18 | opts = vl_argparse(opts, varargin) ;
19 |
20 | if nargin < 2, dim = 3; end;
21 | if nargin < 3, dzdy = []; end;
22 |
23 | if isempty(dzdy)
24 | y = cat(dim, inputs{:});
25 | else
26 | if isempty(opts.inputSizes)
27 | opts.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
28 | end
29 | start = 1 ;
30 | y = cell(1, numel(opts.inputSizes)) ;
31 | s.type = '()' ;
32 | s.subs = {':', ':', ':', ':'} ;
33 | for i = 1:numel(opts.inputSizes)
34 | stop = start + opts.inputSizes{i}(dim) ;
35 | s.subs{dim} = start:stop-1 ; ;
36 | y{i} = subsref(dzdy,s) ;
37 | start = stop ;
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnconv.m:
--------------------------------------------------------------------------------
1 | %VL_NNCONV CNN convolution.
2 | % Y = VL_NNCONV(X, F, B) computes the convolution of the image stack X
3 | % with the filter bank F and biases B. If B is the empty matrix,
4 | % then no biases are added. If F is the empty matrix, then
5 | % the function does not filter the image, but still adds the
6 | % biases as well as performing downsampling and padding as explained
7 | % below.
8 | %
9 | % X is a SINGLE array of dimension H x W x D x N where (H,W) are
10 | % the height and width of the image stack, D is the image depth
11 | % (number of feature channels) and N the number of images in the
12 | % stack.
13 | %
14 | % F is a SINGLE array of dimension FW x FH x FD x K where (FH,FW)
15 | % are the filter height and width and K the number o filters in the
16 | % bank. D is the depth of each filter and must match the depth D of
17 | % X. Alternatively, FD can *divide* the depth D; in this case,
18 | % filters are assumed to form G=D/FD *groups* of equal size (where
19 | % G must divide K). Each group of filters works on a consecutive
20 | % subset of feature channels of the input array X.
21 | %
22 | % [DZDX, DZDF, DZDB] = VL_NNCONV(X, F, B, DZDY) computes the
23 | % derivatives of the block projected onto DZDY. DZDX, DZDF, and
24 | % DZDB, and DZDY have the same dimensions as X, F, B, and Y
25 | % repsectively. In particular, if B is the empty matrix, then DZDB
26 | % is also empty.
27 | %
28 | % VL_NNCONV() implements a special `fully-connected' mode: when the
29 | % support of the filters matches exactly the support of the input
30 | % image, the code uses an optimized path for faster computation.
31 | %
32 | % VL_NNCONV(..., 'option', value, ...) takes the following options:
33 | %
34 | % `Stride`:: 1
35 | % The output stride or downsampling factor. If the value is a
36 | % scalar, then the same stride is applied to both vertical and
37 | % horizontal directions; otherwise, passing [STRIDEY STRIDEX]
38 | % allows specifying different downsampling factors for each
39 | % direction.
40 | %
41 | % `Pad`:: 0
42 | % The amount of input padding. Input images are padded with zeros
43 | % by this number of pixels before the convolution is
44 | % computed. Passing [TOP BOTTOM LEFT RIGHT] allows specifying
45 | % different padding amounts for the top, bottom, left, and right
46 | % sides respectively. Passing a single scalar applies the same
47 | % padding to all borders.
48 | %
49 | % The filter size must be not larger than the padded image, i.e.
50 | %
51 | % 1 <= FH <= H + 2*(PADTOP+PADBOTTOM),
52 | % 1 <= FW <= W + 2*(PADLEFT+PADRIGHT).
53 | %
54 | % The output a is a SINGLE array of dimension YH x YW x K x N of
55 | % N images with K challens and size:
56 | %
57 | % YH = floor((H + (PADTOP+PADBOTTOM) - FH)/STRIDEY) + 1,
58 | % YW = floor((W + (PADLEFT+PADRIGHT) - FW)/STRIDEX) + 1.
59 | %
60 | % ## CUDNN SUPPORT
61 | %
62 | % If compiled in, the function will use cuDNN convolution routines
63 | % (with the exception of asymmetric left-right or top-bottom
64 | % padding and a few corner cases such as 1x1 filters in Linux that
65 | % trigger current bugs in cuDNN). You can use the 'NoCuDNN' option
66 | % to disable cuDNN or 'cuDNN' to activate it back again (the choice
67 | % sticks until MATLAB purges the MEX files for any reason).
68 |
69 | % Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg.
70 | % Copyright (C) 2015 Andrea Vedaldi.
71 | % All rights reserved.
72 | %
73 | % This file is part of the VLFeat library and is made available under
74 | % the terms of the BSD license (see the COPYING file).
75 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nncrop.m:
--------------------------------------------------------------------------------
1 | function y = vl_nncrop(x, crop, dzdy, inputSize)
2 | %VL_NNCROP CNN crop.
3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the
4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT].
5 | %
6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the
7 | % function projected on the output derivative DZDY. DZDX has the same
8 | % dimension as X and DZDY the same dimension as Y.
9 | %
10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to
11 | % the previous call in which X is omitted and its size is passed as
12 | % INPUTSIZE.
13 |
14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi.
15 | % All rights reserved.
16 | %
17 | % This file is part of the VLFeat library and is made available under
18 | % the terms of the BSD license (see the COPYING file).
19 |
20 | if nargin < 4
21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ;
22 | else
23 | sz = inputSize ;
24 | end
25 |
26 | sv = 1 + crop(1) : sz(1) - crop(2) ;
27 | su = 1 + crop(3) : sz(2) - crop(4) ;
28 |
29 | if nargin <= 2 || isempty(dzdy)
30 | y = x(sv, su, :, :) ;
31 | else
32 | if isa(dzdy, 'gpuArray')
33 | y = gpuArray.zeros(sz, 'single') ;
34 | else
35 | y = zeros(sz, 'single') ;
36 | end
37 | y(sv, su, :, :) = dzdy ;
38 | end
39 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nndropout.m:
--------------------------------------------------------------------------------
1 | function [y,mask] = vl_nndropout(x,varargin)
2 | %VL_NNDROPOUT CNN dropout.
3 | % [Y,MASK] = VL_NNDROPOUT(X) applies dropout to the data X. MASK
4 | % is the randomly sampled dropout mask. Both Y and MASK have the
5 | % same size as X.
6 | %
7 | % VL_NNDROPOUT(X, 'rate', R) sets the dropout rate to R.
8 | %
9 | % [DZDX] = VL_NNDROPOUT(X, DZDY, 'mask', MASK) computes the
10 | % derivatives of the blocks projected onto DZDY. Note that MASK must
11 | % be specified in order to compute the derivative consistently with
12 | % the MASK randomly sampled in the forward pass. DZDX and DZDY have
13 | % the same dimesnions as X and Y respectivey.
14 | %
15 | % Note that in the original paper on dropout, at test time the
16 | % network weights for the dropout layers are scaled down to
17 | % compensate for having all the neurons active. In this
18 | % implementation the dropout function itself already does this
19 | % compensation during training. So at test time no alterations are
20 | % required.
21 |
22 | % Copyright (C) 2014-15 Andrea Vedaldi.
23 | % All rights reserved.
24 | %
25 | % This file is part of the VLFeat library and is made available under
26 | % the terms of the BSD license (see the COPYING file).
27 |
28 | opts.rate = 0.5 ;
29 | opts.mask = [] ;
30 |
31 | backMode = numel(varargin) > 0 && ~isstr(varargin{1}) ;
32 | if backMode
33 | dzdy = varargin{1} ;
34 | opts = vl_argparse(opts, varargin(2:end)) ;
35 | else
36 | opts = vl_argparse(opts, varargin) ;
37 | end
38 |
39 | % determine mask
40 | mask = opts.mask ;
41 | scale = single(1 / (1 - opts.rate)) ;
42 | if backMode && isempty(mask)
43 | warning('vl_nndropout: when using in backward mode, the mask should be specified') ;
44 | end
45 | if isempty(mask)
46 | if isa(x,'gpuArray')
47 | mask = scale * single(gpuArray.rand(size(x)) >= opts.rate) ;
48 | else
49 | mask = scale * single(rand(size(x)) >= opts.rate) ;
50 | end
51 | end
52 |
53 | % do job
54 | if ~backMode
55 | y = mask .* x ;
56 | else
57 | y = mask .* dzdy ;
58 | end
59 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnnoffset.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnoffset(x, param, dzdy)
2 | %VL_NNNOFFSET CNN norm-dependent offset.
3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the
4 | % weighted norm of the feature channels:
5 | %
6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2)
7 | %
8 | % where
9 | %
10 | % L(i,j) = sum_K X(i,j,k)^2
11 | %
12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the
13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
14 | % as X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | L = sum(x.^2,3) ;
23 | L = max(L, single(1e-8)) ;
24 | param = single(param) ;
25 |
26 | if nargin <= 2
27 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ;
28 | else
29 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ;
30 | end
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnnormalize.m:
--------------------------------------------------------------------------------
1 | %VL_NNNORMALIZE CNN LRN normalization.
2 | % Y = VL_NNORMALIZE(X, PARAM) performs feature-wise sliding window
3 | % normalization of the image X. The normalized output is given by:
4 | %
5 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA
6 | %
7 | % where the normalising factor is
8 | %
9 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2,
10 | %
11 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The
12 | % window Q(k) itself is defined as:
13 | %
14 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))].
15 | %
16 | % where D is the number of feature dimensions in X. Note in
17 | % particular that, by setting N >= 2D, the function can be used to
18 | % normalize the whole feature vector.
19 | %
20 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of
21 | % the block projected onto DZDY. DZDX and DZDY have the same
22 | % dimensions as X and Y respectively.
23 |
24 | % Copyright (C) 2014 Andrea Vedaldi.
25 | % All rights reserved.
26 | %
27 | % This file is part of the VLFeat library and is made available under
28 | % the terms of the BSD license (see the COPYING file).
29 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnormalizelp(x,dzdy,varargin)
2 | %VL_NNNORMALIZELP CNN Lp normalization
3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial
4 | % location in the array X:
5 | %
6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p)
7 | %
8 | %
9 | % DZDX = VLN_NNORMALIZELP(X, DZDY) computes the derivative of the
10 | % function with respect to X projected onto DZDY.
11 | %
12 | % Options:
13 | %
14 | % `p`:: 2
15 | % The exponent of the Lp norm. Warning: currently only even
16 | % exponents are supported.
17 | %
18 | % `epsilon`: 0.01
19 | % The constant added to the sum of p-powers before taking the
20 | % 1/p square root (see the formula above).
21 |
22 | opts.epsilon = 1e-2 ;
23 | opts.p = 2 ;
24 | opts = vl_argparse(opts, varargin) ;
25 |
26 | massp = (sum(x.^opts.p,3) + opts.epsilon) ;
27 | mass = massp.^(1/opts.p) ;
28 | y = bsxfun(@rdivide, x, mass) ;
29 |
30 | if nargin < 2 || isempty(dzdy)
31 | return ;
32 | else
33 | dzdy = bsxfun(@rdivide, dzdy, mass) ;
34 | y = dzdy - bsxfun(@times, sum(dzdy .* x, 3), bsxfun(@rdivide, x.^(opts.p-1), massp)) ;
35 | end
36 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnpool.m:
--------------------------------------------------------------------------------
1 | %VL_NNPOOL CNN poolinng.
2 | % Y = VL_NNPOOL(X, POOL) applies the pooling operator to all
3 | % channels of the data X using a square filter of size POOL. X is a
4 | % SINGLE array of dimension H x W x D x N where (H,W) are the
5 | % height and width of the map stack, D is the image depth (number
6 | % of feature channels) and N the number of of images in the stack.
7 | %
8 | % Y = VL_NNPOOL(X, [POOLY, POOLX]) uses a rectangular filter of
9 | % height POOLY and width POOLX.
10 | %
11 | % DZDX = VL_NNPOOL(X, POOL, DZDY) computes the derivatives of the
12 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
13 | % as X and Y respectively.
14 | %
15 | % VL_NNCONV(..., 'option', value, ...) takes the following options:
16 | %
17 | % `Stride`:: 1
18 | % The output stride (downsampling factor). It can be either a
19 | % scalar for isotropic downsampling or a vector [STRIDEY
20 | % STRIDEX].
21 | %
22 | % `Pad`:: 0
23 | % The amount of input padding. Input images are padded with zeros
24 | % by this number of pixels on all sides before the convolution is
25 | % computed. It can also be a vector [TOP BOTTOM LEFT RIGHT] to
26 | % specify a different amount of padding in each direction. The
27 | % size of the poolin filter has to exceed the padding.
28 | %
29 | % `Method`:: 'max'
30 | % Specify method of pooling. It can be either 'max' (retain max value
31 | % over the pooling region per channel) or 'avg' (compute the average
32 | % value over the poolling region per channel).
33 | %
34 | % The pooling window must be not larger than the padded image, i.e.
35 | %
36 | % 1 <= POOLY <= HEIGHT + (PADTOP + PADBOTTOM),
37 | % 1 <= POOLX <= WIDTH + (PADLEFT + PADRIGHT).
38 | %
39 | % The output a is a SINGLE array of dimension YH x YW x K x N of N
40 | % images with K challens and size:
41 | %
42 | % YH = floor((H + (PADTOP+PADBOTTOM) - POOLY)/STRIDEY) + 1,
43 | % YW = floor((W + (PADLEFT+PADRIGHT) - POOLX)/STRIDEX) + 1.
44 | %
45 | % The derivative DZDY has the same dimension of the output Y and
46 | % the derivative DZDX has the same dimension as the input X.
47 | %
48 | % ## CUDNN SUPPORT
49 | %
50 | % If compiled in, the function will use cuDNN convolution routines
51 | % (with the exception of asymmetric left-right or top-bottom
52 | % padding and avergage pooling that triggers a bug in cuDNN). You
53 | % can use the 'NoCuDNN' option to disable cuDNN or 'cuDNN' to
54 | % activate it back again (the choice sticks until MATLAB purges the
55 | % MEX files for any reason).
56 |
57 | % Copyright (C) 2014 Andrea Vedaldi, Karel Lenc, and Max Jaderberg.
58 | % Copyright (C) 2015 Andrea Vedaldi and Karel Lenc.
59 | % All rights reserved.
60 | %
61 | % This file is part of the VLFeat library and is made available under
62 | % the terms of the BSD license (see the COPYING file).
63 |
64 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnrelu.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnrelu(x,dzdy,varargin)
2 | %VL_NNRELU CNN rectified linear unit.
3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data
4 | % X. X can have arbitrary size.
5 | %
6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block
7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
8 | % X and Y respectively.
9 | %
10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options:
11 | %
12 | % `Leak`:: 0
13 | % Set the leak factor, a non-negative number. Y is equal to X if
14 | % X is not smaller than zero; otherwise, Y is equal to X
15 | % multipied by the leak factor. By default, the leak factor is
16 | % zero; for values greater than that one obtains the leaky ReLU
17 | % unit.
18 | %
19 | % ADVANCED USAGE
20 | %
21 | % As a further optimization, in the backward computation it is
22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then
23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY).
24 | % This is useful because it means that the buffer X does not need to
25 | % be remembered in the backward pass.
26 |
27 | % Copyright (C) 2014-15 Andrea Vedaldi.
28 | % All rights reserved.
29 | %
30 | % This file is part of the VLFeat library and is made available under
31 | % the terms of the BSD license (see the COPYING file).
32 |
33 | opts.leak = 0 ;
34 | opts = vl_argparse(opts, varargin) ;
35 |
36 | if opts.leak == 0
37 | if nargin <= 1 || isempty(dzdy)
38 | y = max(x, single(0)) ;
39 | else
40 | y = dzdy .* (x > single(0)) ;
41 | end
42 | else
43 | if nargin <= 1 || isempty(dzdy)
44 | y = x .* (opts.leak + (1 - opts.leak) * single(x > 0)) ;
45 | else
46 | y = dzdy .* (opts.leak + (1 - opts.leak) * single(x > 0)) ; ;
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnsigmoid.m:
--------------------------------------------------------------------------------
1 | function out = vl_nnsigmoid(x,dzdy)
2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit.
3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can
4 | % have an arbitrary size. The sigmoid is defined as follows:
5 | %
6 | % SIGMOID(X) = 1 / (1 + EXP(-X)).
7 | %
8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the
9 | % block projected onto DZDY. DZDX and DZDY have the same
10 | % dimensions as X and Y respectively.
11 |
12 | % Copyright (C) 2015 Karel Lenc.
13 | % All rights reserved.
14 | %
15 | % This file is part of the VLFeat library and is made available under
16 | % the terms of the BSD license (see the COPYING file).
17 |
18 | y = 1 ./ (1 + exp(-x));
19 |
20 | if nargin <= 1 || isempty(dzdy)
21 | out = y ;
22 | else
23 | out = dzdy .* (y .* (1 - y)) ;
24 | end
25 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnsoftmax.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmax(X,dzdY)
2 | %VL_NNSOFTMAX CNN softmax.
3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X
4 | % has dimension H x W x D x N, packing N arrays of W x H
5 | % D-dimensional vectors.
6 | %
7 | % D can be thought of as the number of possible classes and the
8 | % function computes the softmax along the D dimension. Often W=H=1,
9 | % but this is not a requirement, as the operator is applied
10 | % convolutionally at all spatial locations.
11 | %
12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block
13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
14 | % X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ;
23 | L = sum(E,3) ;
24 | Y = bsxfun(@rdivide, E, L) ;
25 |
26 | if nargin <= 1, return ; end
27 |
28 | % backward
29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ;
30 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmaxloss(X,c,dzdy)
2 | %VL_NNSOFTMAXLOSS CNN combined softmax and logistic loss.
3 | % **Deprecated: use `vl_nnloss` instead**
4 | %
5 | % Y = VL_NNSOFTMAX(X, C) applies the softmax operator followed by
6 | % the logistic loss the data X. X has dimension H x W x D x N,
7 | % packing N arrays of W x H D-dimensional vectors.
8 | %
9 | % C contains the class labels, which should be integers in the range
10 | % 1 to D. C can be an array with either N elements or with dimensions
11 | % H x W x 1 x N dimensions. In the fist case, a given class label is
12 | % applied at all spatial locations; in the second case, different
13 | % class labels can be specified for different locations.
14 | %
15 | % DZDX = VL_NNSOFTMAXLOSS(X, C, DZDY) computes the derivative of the
16 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
17 | % as X and Y respectively.
18 |
19 | % Copyright (C) 2014-15 Andrea Vedaldi.
20 | % All rights reserved.
21 | %
22 | % This file is part of the VLFeat library and is made available under
23 | % the terms of the BSD license (see the COPYING file).
24 |
25 | %X = X + 1e-6 ;
26 | sz = [size(X,1) size(X,2) size(X,3) size(X,4)] ;
27 |
28 | if numel(c) == sz(4)
29 | % one label per image
30 | c = reshape(c, [1 1 1 sz(4)]) ;
31 | end
32 | if size(c,1) == 1 & size(c,2) == 1
33 | c = repmat(c, [sz(1) sz(2)]) ;
34 | end
35 |
36 | % one label per spatial location
37 | sz_ = [size(c,1) size(c,2) size(c,3) size(c,4)] ;
38 | assert(isequal(sz_, [sz(1) sz(2) sz_(3) sz(4)])) ;
39 | assert(sz_(3)==1 | sz_(3)==2) ;
40 |
41 | % class c = 0 skips a spatial location
42 | mass = single(c(:,:,1,:) > 0) ;
43 | if sz_(3) == 2
44 | % the second channel of c (if present) is used as weights
45 | mass = mass .* c(:,:,2,:) ;
46 | c(:,:,2,:) = [] ;
47 | end
48 |
49 | % convert to indexes
50 | c = c - 1 ;
51 | c_ = 0:numel(c)-1 ;
52 | c_ = 1 + ...
53 | mod(c_, sz(1)*sz(2)) + ...
54 | (sz(1)*sz(2)) * max(c(:), 0)' + ...
55 | (sz(1)*sz(2)*sz(3)) * floor(c_/(sz(1)*sz(2))) ;
56 |
57 | % compute softmaxloss
58 | Xmax = max(X,[],3) ;
59 | ex = exp(bsxfun(@minus, X, Xmax)) ;
60 |
61 | %n = sz(1)*sz(2) ;
62 | if nargin <= 2
63 | t = Xmax + log(sum(ex,3)) - reshape(X(c_), [sz(1:2) 1 sz(4)]) ;
64 | Y = sum(sum(sum(mass .* t,1),2),4) ;
65 | else
66 | Y = bsxfun(@rdivide, ex, sum(ex,3)) ;
67 | Y(c_) = Y(c_) - 1;
68 | Y = bsxfun(@times, Y, bsxfun(@times, mass, dzdy)) ;
69 | end
70 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_nnspnorm.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnspnorm(x, param, dzdy)
2 | %VL_NNSPNORM CNN spaital normalization.
3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of
4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and
5 | % PW define the size of the spatial neighbourhood used for
6 | % nomalization.
7 | %
8 | % For each feature channel, the function computes the sum of squares
9 | % of X inside each rectangle, N2(i,j). It then divides each element
10 | % of X as follows:
11 | %
12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA.
13 | %
14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the
15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
16 | % as X and Y respectively.
17 |
18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | pad = floor((param(1:2)-1)/2) ;
25 | pad = [pad ; param(1:2)-1-pad] ;
26 |
27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ;
28 | f = 1 + param(3) * n2 ;
29 |
30 | if nargin <= 2 || isempty(dzdy)
31 | y = f.^(-param(4)) .* x ;
32 | else
33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ;
34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ;
35 | end
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_rootnn.m:
--------------------------------------------------------------------------------
1 | function root = vl_rootnn()
2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox.
3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox.
4 |
5 | % Copyright (C) 2014 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = fileparts(fileparts(mfilename('fullpath'))) ;
12 |
--------------------------------------------------------------------------------
/matconvnet/matlab/vl_setupnn.m:
--------------------------------------------------------------------------------
1 | function vl_setupnn()
2 | %VL_SETUPNN Setup the MatConvNet toolbox.
3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path.
4 |
5 | % Copyright (C) 2014 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = vl_rootnn() ;
12 | addpath(fullfile(root, 'matlab')) ;
13 | addpath(fullfile(root, 'matlab', 'mex')) ;
14 | addpath(fullfile(root, 'matlab', 'simplenn')) ;
15 | addpath(fullfile(root, 'matlab', 'xtest')) ;
16 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnbnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnbnorm < nntest
2 | properties (TestParameter)
3 | rows = {2 8 13}
4 | cols = {2 8 17}
5 | numDims = {1 3 4}
6 | batchSize = {2 7}
7 | end
8 | methods (Test)
9 | function basic(test, rows, cols, numDims, batchSize)
10 | r = rows ;
11 | c = cols ;
12 | nd = numDims ;
13 | bs = batchSize ;
14 | x = test.randn(r, c, nd, bs, 'single') ;
15 | g = test.randn(1, 1, nd, 1, 'single');
16 | b = test.randn(1, 1, nd, 1, 'single');
17 | g = test.randn(nd, 1, 'single');
18 | b = test.randn(nd, 1, 'single');
19 |
20 | y = vl_nnbnorm(x,g,b) ;
21 | dzdy = test.randn(size(y), 'single') ;
22 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ;
23 |
24 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-2, -1e-3) ;
25 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, test.range * 1e-2, -1e-3) ;
26 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, test.range * 1e-2, -1e-3) ;
27 | end
28 | end
29 | end
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnconcat.m:
--------------------------------------------------------------------------------
1 | classdef nnconcat < nntest
2 | methods (Test)
3 | function basic(test)
4 | pick = @(i,x) x{i} ;
5 | sz = [4,5,10,3] ;
6 | for dim = 1:3
7 | sz1 = sz ; sz1(dim) = 3 ;
8 | sz2 = sz ; sz2(dim) = 7 ;
9 | sz3 = sz ; sz3(dim) = 2 ;
10 | x1 = test.randn(sz1,'single') ;
11 | x2 = test.randn(sz2,'single') ;
12 | x3 = test.randn(sz3,'single') ;
13 |
14 | y = vl_nnconcat({x1, x2, x3}, dim) ;
15 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
16 | dzdy = test.randn(size(y),'single') ;
17 | dzdx = vl_nnconcat({x1, x2, x3} ,dim, dzdy) ;
18 |
19 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
20 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
21 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
22 | end
23 | end
24 |
25 | function by_size(test)
26 | pick = @(i,x) x{i} ;
27 | sz = [4,5,10,3] ;
28 | for dim = 1:3
29 | sz1 = sz ; sz1(dim) = 3 ;
30 | sz2 = sz ; sz2(dim) = 7 ;
31 | sz3 = sz ; sz3(dim) = 2 ;
32 | x1 = test.randn(sz1,'single') ;
33 | x2 = test.randn(sz2,'single') ;
34 | x3 = test.randn(sz3,'single') ;
35 |
36 | y = vl_nnconcat({x1, x2, x3}, dim) ;
37 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
38 | dzdy = test.randn(size(y),'single') ;
39 | dzdx = vl_nnconcat({}, dim, dzdy, 'inputSizes', {sz1, sz2, sz3}) ;
40 |
41 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
42 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
43 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnconvt.m:
--------------------------------------------------------------------------------
1 | classdef nnconvt < nntest
2 | properties (TestParameter)
3 | depth = {1 2 3}
4 | numImages = {1 2 3 4}
5 | numFilters = {1 2 3}
6 | upx = {1 2 3}
7 | upy = {1 2 3}
8 | padx1 = {1 2 3}
9 | padx2 = {1 2 3}
10 | pady1 = {1 2 3}
11 | pady2 = {1 2 3}
12 | up = {1 2}
13 | fsx = {1 2}
14 | crop = {1 2 3 4 5 6 7 8}
15 | numGroups = {1 2 3}
16 | end
17 |
18 | methods (Test)
19 | function basic(test, depth, numImages, numFilters)
20 | m = depth ;
21 | n = numImages ;
22 | k = numFilters;
23 | x = test.randn(10,12,m,n,'single') ;
24 | f = test.randn(3,4,k,m,'single') ;
25 | b = test.randn(1,k,'single') ;
26 | y = vl_nnconvt(x,f,b) ;
27 | dzdy = test.randn(size(y),'single') ;
28 | [dzdx,dzdf,dzdb] = vl_nnconvt(x,f,b,dzdy) ;
29 | test.der(@(x) vl_nnconvt(x,f,b), x, dzdy, dzdx, test.range * 1e-2) ;
30 | test.der(@(f) vl_nnconvt(x,f,b), f, dzdy, dzdf, test.range * 1e-2) ;
31 | test.der(@(b) vl_nnconvt(x,f,b), b, dzdy, dzdb, test.range) ;
32 | end
33 |
34 | function upsample_crop(test,upx,upy,padx1,pady1,padx2,pady2)
35 | m = 3 ; n = 2 ; k = 3;
36 | opts = {'upsample',[upy upx],'crop',[pady1 pady2 padx1 padx2]} ;
37 | x = test.randn(5,6,m,n,'single') ;
38 | f = test.randn(3,4,k,m,'single') ;
39 | b = test.randn(1,k,'single') ;
40 | y = vl_nnconvt(x,f,b,opts{:}) ;
41 | dzdy = test.randn(size(y),'single') ;
42 | [dzdx,dzdf,dzdb] = vl_nnconvt(x,f,b,dzdy,opts{:}) ;
43 | test.der(@(x) vl_nnconvt(x,f,b,opts{:}), x, dzdy, dzdx, test.range * 1e-2) ;
44 | test.der(@(f) vl_nnconvt(x,f,b,opts{:}), f, dzdy, dzdf, test.range * 1e-2) ;
45 | test.der(@(b) vl_nnconvt(x,f,b,opts{:}), b, dzdy, dzdb, test.range) ;
46 | end
47 |
48 | function grouped_filters(test, numGroups, depth, numFilters)
49 | ng = numGroups ;
50 | m = depth ;
51 | k = numFilters ;
52 | n = 3 ;
53 | opts = {'numgroups',ng} ;
54 | x = test.randn(10,12,m*ng,n,'single') ;
55 | f = test.randn(3,4,k,m*ng,'single') ;
56 | b = test.randn(1,k*ng,'single') ;
57 | y = vl_nnconvt(x,f,b,opts{:}) ;
58 | dzdy = test.randn(size(y),'single') ;
59 | [dzdx,dzdf,dzdb] = vl_nnconvt(x,f,b,dzdy,opts{:}) ;
60 | test.der(@(x) vl_nnconvt(x,f,b,opts{:}), x, dzdy, dzdx, test.range * 1e-2) ;
61 | test.der(@(f) vl_nnconvt(x,f,b,opts{:}), f, dzdy, dzdf, test.range * 1e-2) ;
62 | test.der(@(b) vl_nnconvt(x,f,b,opts{:}), b, dzdy, dzdb, test.range) ;
63 | end
64 |
65 | function one_one_image(test,up,fsx,crop)
66 | fsx = fsx*up ;
67 | if crop > fsx-1, return ; end
68 | m = 3 ;
69 | n = 4 ;
70 | k = 3 ;
71 | fsy = fsx * 3 ;
72 | x = test.randn(1,1,m,n,'single') ;
73 | f = test.randn(fsy,fsx,k,m,'single') ;
74 | b = test.randn(1,k,'single') ;
75 | croph = floor(crop/2) ;
76 | opts = {'crop', [croph, crop-croph, croph, crop-croph], 'upsample', [up up]} ;
77 | y = vl_nnconvt(x,f,b,opts{:}) ;
78 | dzdy = test.randn(size(y),'single') ;
79 | [dzdx,dzdf,dzdb] = vl_nnconvt(x,f,b,dzdy,opts{:}) ;
80 | test.der(@(x) vl_nnconvt(x,f,b,opts{:}), x, dzdy, dzdx, test.range * 1e-2) ;
81 | test.der(@(f) vl_nnconvt(x,f,b,opts{:}), f, dzdy, dzdf, test.range * 1e-2) ;
82 | test.der(@(b) vl_nnconvt(x,f,b,opts{:}), b, dzdy, dzdb, test.range * 1e-1) ;
83 | end
84 | end
85 | end
86 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nndropout.m:
--------------------------------------------------------------------------------
1 | classdef nndropout < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(4,5,10,3,'single') ;
5 | [y,mask] = vl_nndropout(x) ;
6 | dzdy = test.randn(size(y),'single') ;
7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ;
8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ;
9 | end
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnloss.m:
--------------------------------------------------------------------------------
1 | classdef nnloss < nntest
2 | properties (TestParameter)
3 | loss = {...
4 | 'classerror', 'log', 'softmaxlog', 'mhinge', 'mshinge', ...
5 | 'binaryerror', 'binarylog', 'logistic', 'hinge'}
6 | weighed = {false, true}
7 | end
8 |
9 | properties
10 | x
11 | end
12 |
13 | methods
14 | function [x,c,dzdy,instanceWeights] = getx(test,loss)
15 | numClasses = 3 ;
16 | numAttributes = 5 ;
17 | numImages = 3 ;
18 | w = 5 ;
19 | h = 4 ;
20 | switch loss
21 | case {'log', 'softmaxlog', 'mhinge', 'mshinge', 'classerror'}
22 | % multiclass
23 | instanceWeights = test.rand(h,w, 'single') / test.range / (h*w) ;
24 | c = randi(numClasses, h,w,1,numImages) ;
25 | c = test.toDevice(c) ;
26 | otherwise
27 | % binary
28 | instanceWeights = test.rand(h,w, numAttributes, 'single') / test.range / (h*w*numAttributes) ;
29 | c = sign(test.randn(h,w,numAttributes, numImages)) ;
30 | end
31 | c = single(c) ;
32 | switch loss
33 | case {'log'}
34 | x = test.rand(h,w, numClasses, numImages, 'single') / test.range * .60 + .20 ;
35 | x = bsxfun(@rdivide, x, sum(x,3)) ;
36 | case {'binarylog'}
37 | x = test.rand(h,w, numAttributes, numImages, 'single') / test.range * .60 + .20 ;
38 | case {'softmaxlog'}
39 | x = test.randn(h,w, numClasses, numImages, 'single') / test.range ;
40 | case {'mhinge', 'mshinge', 'classerror'}
41 | x = test.randn(h,w, numClasses, numImages, 'single') / test.range ;
42 | case {'hinge', 'logistic', 'binaryerror'}
43 | x = test.randn(h,w, numAttributes, numImages, 'single') / test.range ;
44 | end
45 | dzdy = test.randn(1,1) / test.range ;
46 | end
47 | end
48 |
49 | methods (Test)
50 | function nullcategories(test, loss, weighed)
51 | [x,c,dzdy,instanceWeights] = test.getx(loss) ;
52 | % make a number of categories null
53 | c(:) = c(:) .* (test.randn(numel(c),1) > 0) ;
54 | opts = {'loss',loss} ;
55 | if weighed, opts = {opts{:}, 'instanceWeights', instanceWeights} ; end
56 | y = vl_nnloss(x,c,[],opts{:}) ;
57 | dzdx = vl_nnloss(x,c,dzdy,opts{:}) ;
58 | test.der(@(x) vl_nnloss(x,c,[],opts{:}), x, dzdy, dzdx, 0.001, -5e-1) ;
59 | end
60 |
61 | function convolutional(test, loss, weighed)
62 | [x,c,dzdy,instanceWeights] = test.getx(loss) ;
63 | opts = {'loss',loss} ;
64 | if weighed, opts = {opts{:}, 'instanceWeights', instanceWeights} ; end
65 | y = vl_nnloss(x,c,[],opts{:}) ;
66 | dzdx = vl_nnloss(x,c,dzdy,opts{:}) ;
67 | test.der(@(x) vl_nnloss(x,c,[],opts{:}), x, dzdy, dzdx, 0.001, -5e-1) ;
68 | end
69 |
70 | end
71 | end
72 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnnormalize.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalize < nntest
2 | properties (TestParameter)
3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17}
4 | sgroup = {2 3 4 5 6 7}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, group)
9 | param = [group, .1, .5, .75] ;
10 |
11 | x = test.randn(3,2,10,4,'single') ;
12 | y = vl_nnnormalize(x,param) ;
13 | dzdy = test.rand(size(y),'single')-0.5 ;
14 | dzdx = vl_nnnormalize(x,param,dzdy) ;
15 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ;
16 | end
17 |
18 | function compare_to_naive(test, sgroup)
19 | param = [sgroup, .1, .5, .75] ;
20 | x = test.randn(3,2,10,4,'single') ;
21 | y = vl_nnnormalize(gather(x),param) ;
22 | y_ = zeros(size(y),'single') ;
23 | x_ = gather(x) ;
24 | for i=1:size(x,1)
25 | for j=1:size(x,2)
26 | for n=1:size(x,4)
27 | t = zeros(1,1,size(x,3),1) ;
28 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ...
29 | ones(param(1),1), 'same')).^(-param(4)) ;
30 | y_(i,j,:,n) = x_(i,j,:,n) .* t ;
31 | end
32 | end
33 | end
34 | test.eq(y,y_) ;
35 | end
36 |
37 | function l2(test)
38 | x = test.randn(1,1,10,1,'single') ;
39 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ;
40 | test.eq(sum(y(:).^2), single(1), 1e-2) ;
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalizelp < nntest
2 | properties (TestParameter)
3 | h = {1 2 3 4}
4 | w = {1 2 3 4}
5 | d = {2 3 4}
6 | p = {2 4}
7 | end
8 |
9 | methods (Test)
10 | function basicl2(test, h,w,d)
11 | x = test.randn(h,w,d,3,'single') ;
12 | y = vl_nnnormalizelp(x) ;
13 | dzdy = test.rand(size(y),'single')-0.5 ;
14 | dzdx = vl_nnnormalizelp(x,dzdy) ;
15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, test.range * 1e-3, 0.3) ;
16 | end
17 |
18 | function lp(test, p)
19 | x = test.randn(2,3,5,3,'single') / test.range ;
20 | y = vl_nnnormalizelp(x, [], 'p', p) ;
21 | dzdy = test.rand(size(y),'single')-0.5 ;
22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ;
23 | test.der(@(x) vl_nnnormalizelp(x,[],'p',p), x, dzdy, dzdx, 1e-4, 0.3) ;
24 | end
25 |
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnoffset.m:
--------------------------------------------------------------------------------
1 | classdef nnoffset < nntest
2 | methods (Test)
3 | function basic(test)
4 | param = [.34, .5] ;
5 | x = test.randn(4,5,10,3,'single') ;
6 | y = vl_nnnoffset(x,param) ;
7 | dzdy = test.randn(size(y),'single') ;
8 | dzdx = vl_nnnoffset(x,param,dzdy) ;
9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ;
10 | end
11 | end
12 | end
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnpdist.m:
--------------------------------------------------------------------------------
1 | classdef nnpdist < nntest
2 | properties (TestParameter)
3 | oneToOne = {false, true}
4 | noRoot = {false, true}
5 | p = {.5 1 2 3}
6 | end
7 | methods (Test)
8 | function basic(test,oneToOne, noRoot, p)
9 | h = 13 ;
10 | w = 17 ;
11 | d = 4 ;
12 | n = 5 ;
13 | x = test.randn(h,w,d,n,'single') ;
14 | if oneToOne
15 | x0 = test.randn(h,w,d,n,'single') ;
16 | else
17 | x0 = test.randn(1,1,d,n) ;
18 | end
19 | y = vl_nnpdist(x, x0, p, 'noRoot',noRoot) ;
20 |
21 | % make sure they are not too close in anyd dimension as
22 | % this may be a problem for the finite difference
23 | % dereivatives as one could approach0 which is not
24 | % differentiable for some p-norms
25 |
26 | s = abs(bsxfun(@minus, x, x0)) < 5*test.range*1e-3 ;
27 | x(s) = x(s) + 5*test.range ;
28 |
29 | dzdy = test.rand(h, w, 1, n) ;
30 | dzdx = vl_nnpdist(x,x0,p,dzdy,'noRoot',noRoot) ;
31 | test.der(@(x) vl_nnpdist(x,x0,p,'noRoot',noRoot), x, dzdy, dzdx, test.range * 1e-4) ;
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnrelu.m:
--------------------------------------------------------------------------------
1 | classdef nnrelu < nntest
2 | properties
3 | x
4 | end
5 |
6 | methods (TestMethodSetup)
7 | function data(test,device)
8 | % make sure that all elements in x are different. in this way,
9 | % we can compute numerical derivatives reliably by adding a delta < .5.
10 | x = test.randn(15,14,3,2,'single') ;
11 | x(:) = randperm(numel(x))' ;
12 | % avoid non-diff value for test
13 | x(x==0)=1 ;
14 | test.x = x ;
15 | test.range = 10 ;
16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
17 | end
18 | end
19 |
20 | methods (Test)
21 | function basic(test)
22 | x = test.x ;
23 | y = vl_nnrelu(x) ;
24 | dzdy = test.randn(size(y),'single') ;
25 | dzdx = vl_nnrelu(x,dzdy) ;
26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ;
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnsigmoid.m:
--------------------------------------------------------------------------------
1 | classdef nnsigmoid < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(5,5,1,1,'single')/test.range ;
5 | y = vl_nnsigmoid(x) ;
6 | dzdy = test.randn(size(y),'single') ;
7 | dzdx = vl_nnsigmoid(x,dzdy) ;
8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ;
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnsoftmax.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmax < nntest
2 | properties (TestParameter)
3 | h = {1 2 3}
4 | w = {1 2}
5 | end
6 | methods (Test)
7 | function basic(test,h,w)
8 | d = 10 ;
9 | n = 3 ;
10 | x = test.randn(h,w,d,n,'single')/test.range ;
11 | y = vl_nnsoftmax(x) ;
12 | dzdy = test.randn(size(y),'single') ;
13 | dzdx = vl_nnsoftmax(x, dzdy) ;
14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ;
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmaxloss < nntest
2 | properties (TestParameter)
3 | weighed = {false true}
4 | multilab = {false true}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, multilab, weighed)
9 | C = 10 ;
10 | n = 3 ;
11 | if multilab
12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ;
13 | else
14 | c = reshape([7 2 1],1,1,1,[]) ;
15 | end
16 | if weighed
17 | c = cat(3, c, test.rand(size(c),'single')) ;
18 | end
19 |
20 | % compare direct and indirect composition; this cannot
21 | % take large test.ranges
22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative
23 | y = vl_nnsoftmaxloss(x,c) ;
24 | if size(c,3) == 1
25 | opts = {'loss','log'} ;
26 | else
27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ;
28 | end
29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ;
30 | dzdy = test.randn(size(y)) ;
31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ;
33 | test.eq(y,y_) ;
34 | test.eq(dzdx,dzdx_) ;
35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ;
36 |
37 | % now larger input range
38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative
39 | y = vl_nnsoftmaxloss(x,c) ;
40 | dzdy = test.randn(size(y)) ;
41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ...
43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ;
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nnspnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnspnorm < nntest
2 | methods (Test)
3 | function basic(test)
4 | h = 13 ;
5 | w = 17 ;
6 | d = 4 ;
7 | n = 5 ;
8 | param = [3, 3, 0.1, 0.75] ;
9 | x = test.randn(h,w,d,n,'single') ;
10 | y = vl_nnspnorm(x, param) ;
11 | dzdy = test.rand(h, w, d, n) ;
12 | dzdx = vl_nnspnorm(x, param, dzdy) ;
13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/suite/nntest.m:
--------------------------------------------------------------------------------
1 | classdef nntest < matlab.unittest.TestCase
2 | properties (MethodSetupParameter)
3 | device = {'cpu', 'gpu'}
4 | end
5 |
6 | properties
7 | randn
8 | rand
9 | toDevice
10 | range = 128
11 | end
12 |
13 | methods (TestMethodSetup)
14 | function generators(test, device)
15 | range = 128 ;
16 | seed = 0 ;
17 | switch device
18 | case 'gpu'
19 | gpuDevice ;
20 | test.randn = @(varargin) range * gpuArray.randn(varargin{:}) ;
21 | test.rand = @(varargin) range * gpuArray.rand(varargin{:}) ;
22 | test.toDevice = @(x) gpuArray(x) ;
23 | parallel.gpu.rng(seed, 'combRecursive') ;
24 | case 'cpu'
25 | test.randn = @(varargin) range * randn(varargin{:}) ;
26 | test.rand = @(varargin) range * rand(varargin{:}) ;
27 | test.toDevice = @(x) gather(x) ;
28 | rng(seed, 'combRecursive') ;
29 | end
30 | end
31 | end
32 |
33 | methods
34 | function der(test, g, x, dzdy, dzdx, delta, tau)
35 | if nargin < 7
36 | tau = [] ;
37 | end
38 | dzdx_ = test.numder(g, x, dzdy, delta) ;
39 | test.eq(gather(dzdx_), gather(dzdx), tau) ;
40 | end
41 |
42 | function eq(test,a,b,tau)
43 | a = gather(a) ;
44 | b = gather(b) ;
45 | if nargin > 3 && ~isempty(tau) && tau < 0
46 | tau_min = -tau ;
47 | tau = [] ;
48 | else
49 | tau_min = 0 ;
50 | end
51 | if nargin < 4 || isempty(tau)
52 | maxv = max([max(a(:)), max(b(:))]) ;
53 | minv = min([min(a(:)), min(b(:))]) ;
54 | tau = max(1e-2 * (maxv - minv), 1e-3 * max(maxv, -minv)) ;
55 | end
56 | tau = max(tau, tau_min) ;
57 | if isempty(tau) % can happen if a and b are empty
58 | tau = 0 ;
59 | end
60 | tol = matlab.unittest.constraints.AbsoluteTolerance(single(tau)) ;
61 | test.verifyThat(a, ...
62 | matlab.unittest.constraints.IsEqualTo(b, 'Within', tol)) ;
63 | end
64 | end
65 |
66 | methods (Static)
67 | function dzdx = numder(g, x, dzdy, delta)
68 | if nargin < 3
69 | delta = 1e-3 ;
70 | end
71 | dzdy = gather(dzdy) ;
72 | y = gather(g(x)) ;
73 | dzdx = zeros(size(x),'double') ;
74 | for i=1:numel(x)
75 | x_ = x ;
76 | x_(i) = x_(i) + delta ;
77 | y_ = gather(g(x_)) ;
78 | factors = dzdy .* (y_ - y)/delta ;
79 | dzdx(i) = dzdx(i) + sum(factors(:)) ;
80 | end
81 | dzdx = single(dzdx) ;
82 | end
83 | end
84 | end
85 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_bench_bnorm.m:
--------------------------------------------------------------------------------
1 | function vl_bench_bnorm(gpu)
2 | if nargin < 1
3 | gpu = false ;
4 | end
5 |
6 | T = 100 ;
7 | x = randn(64,64,32,32,'single') ;
8 | g = randn(32,1,'single') ;
9 | b = randn(32,1,'single') ;
10 |
11 | if gpu
12 | x = gpuArray(x) ;
13 | g = gpuArray(g) ;
14 | b = gpuArray(b) ;
15 | end
16 |
17 | tic
18 | for t=1:T
19 | y = vl_nnbnorm(x,g,b) ;
20 | end
21 | if gpu, wait(gpuDevice) ; end
22 | fprintf('new: %f\n',toc);
23 |
24 | tic
25 | for t=1:T
26 | y_ = vl_nnbnorm_old(x,g,b) ;
27 | end
28 | if gpu, wait(gpuDevice) ; end
29 | fprintf('old: %f\n',toc);
30 |
31 | dzdy = randn(size(y),'single') ;
32 | if gpu
33 | dzdy = gpuArray(dzdy) ;
34 | end
35 |
36 | tic
37 | for t=1:T
38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ;
39 | end
40 | if gpu, wait(gpuDevice) ; end
41 | fprintf('new deriv: %f\n',toc);
42 |
43 | tic
44 | for t=1:T
45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ;
46 | end
47 | if gpu, wait(gpuDevice) ; end
48 | fprintf('old deriv: %f\n',toc);
49 |
50 | vl_testsim(y,y_);
51 | vl_testsim(a,a_);
52 | vl_testsim(b,b_);
53 | vl_testsim(c,c_);
54 | end
55 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_bench_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | % VL_BENCH_IMREADJPEG Evaluates the speed of imreadjpeg
2 |
3 | numThreads = 4 ;
4 | base = 'data/bench-imreadjpeg' ;
5 |
6 | files = {} ;
7 | files = dir(fullfile(base,'*.jpg')) ;
8 | files = fullfile(base, {files.name}) ;
9 | if numel(files) > 256, files = files(1:256) ; end
10 |
11 | for preallocate = [true, false]
12 | opts={'verbose','verbose', 'preallocate', preallocate} ;
13 | for t=1:4
14 | % simple read
15 | fprintf('direct read single thread\n') ;
16 | clear ims ;
17 | tic ;
18 | ims = vl_imreadjpeg(files, 'numThreads', 1, opts{:}) ;
19 | directSingle(t) = toc ;
20 | fprintf(' done\n') ;
21 | pause(1) ;
22 |
23 | % simple read
24 | fprintf('direct read multi thread\n') ;
25 | clear ims ;
26 | tic ;
27 | ims = vl_imreadjpeg(files, 'numThreads', numThreads, opts{:}) ;
28 | direct(t) = toc ;
29 | fprintf(' done\n') ;
30 | pause(1) ;
31 |
32 | % threaded read
33 | fprintf('issue prefetch\n') ;
34 | tic ;
35 | vl_imreadjpeg(files, 'prefetch', opts{:}) ;
36 | prefetch(t) = toc ;
37 | fprintf(' done [pause 6]\n') ;
38 | pause(6)
39 |
40 | fprintf('prefetched read\n') ;
41 | clear ims_ ; % do not accoutn for the time requried to delete this
42 | tic ;
43 | ims_ = vl_imreadjpeg(files, opts{:}) ;
44 | indirect(t) = toc ;
45 | pause(1) ;
46 | end
47 |
48 | n = numel(ims) ;
49 | fprintf('** test results preallcoate %d\n', preallocate) ;
50 | fprintf('\tsingle tread: %.1f pm %.1f\n', mean(n./directSingle), std(n./directSingle)) ;
51 | fprintf('\t%d threads: %.1f pm %.1f\n', numThreads, mean(n./direct), std(n./direct)) ;
52 | fprintf('\tissue prefetch: %.1f pm %.1f\n', mean(n./prefetch), std(n./prefetch)) ;
53 | fprintf('\tretrieve prefetched: %.1f pm %.1f\n', mean(n./indirect), std(n./indirect)) ;
54 | fprintf('\n\n') ;
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_nnbnorm_old.m:
--------------------------------------------------------------------------------
1 | function [y,dzdg,dzdb] = vl_nnbnorm_old(x,g,b,varargin)
2 | % VL_NNBNORM CNN batch normalisation
3 | % Y = VL_NNBNORM(X,G,B) computes the batch normalization of the
4 | % input X. This is defined as:
5 | %
6 | % Y(i,j,k,t) = G(k) * (X(i,j,k,t) - mu(k)) / sigma(k) + B(k)
7 | %
8 | % where
9 | %
10 | % mu(k) = mean_ijt X(i,j,k,t),
11 | % sigma(k) = sqrt(sigma2(k) + EPSILON),
12 | % sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2
13 | %
14 | % are respectively the per-channel mean, standard deviation, and
15 | % variance of the input and G(k) and B(k) define respectively a
16 | % multiplicative and additive constant to scale each input
17 | % channel. Note that statistics are computed across all feature maps
18 | % in the batch packed in the 4D tensor X. Note also that the
19 | % constant EPSILON is used to regularize the computation of sigma(k)
20 | %
21 | % [Y,DZDG,DZDB] = VL_NNBNORM(X,G,B,DZDY) computes the derviatives of
22 | % the output Z of the network given the derivatives with respect to
23 | % the output Y of this function.
24 | %
25 | % VL_NNBNROM(..., 'Option', value) takes the following options:
26 | %
27 | % `Epsilon`:: 1e-4
28 | % Specify the EPSILON constant.
29 | %
30 | % See also: VL_NNNORMALIZE().
31 |
32 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
33 | % All rights reserved.
34 | %
35 | % This file is part of the VLFeat library and is made available under
36 | % the terms of the BSD license (see the COPYING file).
37 |
38 | % ISSUE - needs to store internal state, another reason for having classes?
39 |
40 | % -------------------------------------------------------------------------
41 | % Parse options
42 | % -------------------------------------------------------------------------
43 |
44 | opts.epsilon = 1e-4 ;
45 | backMode = numel(varargin) > 0 && ~ischar(varargin{1}) ;
46 | if backMode
47 | dzdy = varargin{1} ;
48 | opts = vl_argparse(opts, varargin(2:end)) ;
49 | else
50 | opts = vl_argparse(opts, varargin) ;
51 | end
52 |
53 | % -------------------------------------------------------------------------
54 | % Do job
55 | % -------------------------------------------------------------------------
56 |
57 | x_size = [size(x,1), size(x,2), size(x,3), size(x,4)] ;
58 | g_size = size(g) ;
59 | b_size = size(b) ;
60 | g = reshape(g, [1 x_size(3) 1]) ;
61 | b = reshape(b, [1 x_size(3) 1]) ;
62 | x = reshape(x, [x_size(1)*x_size(2) x_size(3) x_size(4)]) ;
63 |
64 | mass = prod(x_size([1 2 4])) ;
65 | mu = sum(sum(x,1),3) / mass ;
66 | y = bsxfun(@minus, x, mu); % y <- x_mu
67 | sigma2 = sum(sum(y .* y,1),3) / mass + opts.epsilon ;
68 | sigma = sqrt(sigma2) ;
69 |
70 | if ~backMode
71 | y = bsxfun(@plus, bsxfun(@times, g ./ sigma, y), b) ;
72 | else
73 | % remember: y contains x_mu
74 | dzdy = reshape(dzdy, size(x)) ;
75 | dzdg = sum(sum(dzdy .* y,1),3) ./ sigma ;
76 | dzdb = sum(sum(dzdy,1),3) ;
77 |
78 | muz = dzdb / mass;
79 | y = ...
80 | bsxfun(@times, g ./ sigma, bsxfun(@minus, dzdy, muz)) - ...
81 | bsxfun(@times, g .* dzdg ./ (sigma2 * mass), y) ;
82 |
83 | dzdg = reshape(dzdg, g_size) ;
84 | dzdb = reshape(dzdb, b_size) ;
85 | end
86 |
87 | y = reshape(y, x_size) ;
88 | end
89 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_test_bnorm.m:
--------------------------------------------------------------------------------
1 | %%
2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with
3 | % using VLFEAT
4 | %%
5 |
6 | gpu = false;
7 | gpu = true ;
8 |
9 | T = 1 ;
10 | x = randn(64,64,32,32,'single') ;
11 | g = randn(32,1,'single') ;
12 | b = randn(32,1,'single') ;
13 |
14 | if gpu
15 | x = gpuArray(x) ;
16 | g = gpuArray(g) ;
17 | b = gpuArray(b) ;
18 | end
19 |
20 | a=vl_nnbnorm(x,g,b);
21 | a_=vl_nnbnorm_old(x,g,b);
22 |
23 | vl_testsim(a,a_)
24 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_test_economic_relu.m:
--------------------------------------------------------------------------------
1 | % VL_TEST_ECONOMIC_RELU
2 | function vl_test_economic_relu()
3 |
4 | x = randn(11,12,8,'single');
5 | w = randn(5,6,8,9,'single');
6 | b = randn(1,9,'single') ;
7 |
8 | net.layers{1} = struct('type', 'conv', ...
9 | 'filters', w, ...
10 | 'biases', b, ...
11 | 'stride', 1, ...
12 | 'pad', 0);
13 | net.layers{2} = struct('type', 'relu') ;
14 |
15 | res = vl_simplenn(net, x) ;
16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ;
17 | clear res ;
18 |
19 | res_ = vl_simplenn(net, x, dzdy) ;
20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ;
21 |
22 | a=whos('res_') ;
23 | b=whos('res__') ;
24 | assert(a.bytes > b.bytes) ;
25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ;
26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ;
27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ;
28 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_test_gpureset.m:
--------------------------------------------------------------------------------
1 | for explictMexReset = [false]
2 |
3 | % reset the same GPU device
4 | for t = 1:6
5 | if explictMexReset, clear mex ; end
6 | if mod(t-1,2) == 0
7 | disp('vl_test_gpureset: resetting GPU') ;
8 | gpuDevice(1) ;
9 | else
10 | disp('vl_test_gpureset: not resetting GPU') ;
11 | end
12 | if t > 1, disp(a) ; end
13 | a = gpuArray(single(ones(10))) ;
14 | b = gpuArray(single(ones(5))) ;
15 | c = vl_nnconv(a,b,[],'nocudnn') ;
16 | end
17 |
18 | % resetting GPU arguments to a MEX file should fail properly
19 | a = gpuArray(single(ones(10))) ;
20 | b = gpuArray(single(ones(5))) ;
21 | c = vl_nnconv(a,b,[],'nocudnn') ;
22 |
23 | gpuDevice(1) ;
24 | disp(a) ;
25 | try
26 | c = vl_nnconv(a,b,[],'nocudnn') ;
27 | catch e
28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ;
29 | end
30 |
31 | % switch GPU devices
32 | if gpuDeviceCount > 1
33 | disp('vl_text_gpureset: test switching GPU device') ;
34 | for t = 1:gpuDeviceCount
35 | if explictMexReset, clear mex ; end
36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ;
37 | gpuDevice(t) ;
38 | a = gpuArray(single(ones(10))) ;
39 | b = gpuArray(single(ones(5))) ;
40 | c = vl_nnconv(a,b,[],'nocudnn') ;
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_test_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | function vl_test_imreadjpeg
2 | % VL_TEST_IMREADJPEG
3 |
4 | % Test basic file reading capability
5 | for t=1:6
6 | files{t} = which(sprintf('office_%d.jpg', t)) ;
7 | end
8 | ims = vl_imreadjpeg(files) ;
9 |
10 | % Test inserting a non-image file
11 | files_ = files ;
12 | files_{3} = [mfilename('fullpath') '.m'];
13 | ims_ = vl_imreadjpeg(files_) ;
14 | for t=setdiff(1:6,3)
15 | assert(isequal(ims{t},ims_{t})) ;
16 | end
17 |
18 | % Test inserting a non-esiting file
19 | files__ = files_ ;
20 | files__{4} = 'idontexist.jpg' ;
21 | ims__ = vl_imreadjpeg(files__) ;
22 | for t=setdiff(1:6,[3 4])
23 | assert(isequal(ims{t},ims__{t})) ;
24 | end
25 |
26 | for n = 1:4
27 | % Test prefetching
28 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ;
29 | ims___ = vl_imreadjpeg(files) ;
30 | assert(isequal(ims,ims___)) ;
31 |
32 | % Hardening: test prefetching, clearing mex, fetching
33 | vl_imreadjpeg(files,'prefetch') ;
34 | clear mex ;
35 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ;
36 | assert(isequal(ims,ims___)) ;
37 | end
38 |
--------------------------------------------------------------------------------
/matconvnet/matlab/xtest/vl_testnn.m:
--------------------------------------------------------------------------------
1 | function vl_testnn(varargin)
2 | %VL_TESTNN Run MatConvNet test suite
3 | % VL_TESTNN('cpu', true)
4 | % VL_TESTNN('gpu', true)
5 | % VL_TESTNN('command', 'nnloss')
6 |
7 | opts.cpu = true ;
8 | opts.gpu = false ;
9 | opts.command = 'nn' ;
10 | opts.break = false ;
11 | opts = vl_argparse(opts, varargin) ;
12 |
13 | import matlab.unittest.constraints.* ;
14 | import matlab.unittest.selectors.* ;
15 |
16 | % Choose which tests to run
17 | sel = HasName(StartsWithSubstring(opts.command)) ;
18 | if opts.cpu & ~opts.gpu
19 | sel = sel & HasName(ContainsSubstring('cpu')) ;
20 | end
21 | if opts.gpu & ~opts.cpu
22 | sel = sel & HasName(ContainsSubstring('gpu')) ;
23 | end
24 |
25 | % Run tests
26 | root = fileparts(mfilename('fullpath')) ;
27 | suite = matlab.unittest.TestSuite.fromFolder(fullfile(root, 'suite'), sel) ;
28 | runner = matlab.unittest.TestRunner.withTextOutput('Verbosity',3);
29 | if opts.break
30 | runner.addPlugin(matlab.unittest.plugins.StopOnFailuresPlugin) ;
31 | end
32 | result = runner.run(suite);
33 |
--------------------------------------------------------------------------------
/matconvnet/utils/evaluate_ref_models.m:
--------------------------------------------------------------------------------
1 | function evaluate_ref_models()
2 | % Evaluate MatConvNet reference models to validate them
3 |
4 | addpath(fullfile(fileparts(mfilename('fullpath'))), '..','examples');
5 |
6 | models = {...
7 | 'caffe-ref', ...
8 | 'caffe-alex', ...
9 | 'vgg-s', ...
10 | 'vgg-m', ...
11 | 'vgg-f', ...
12 | 'vgg-verydeep-19', ...
13 | 'vgg-verydeep-16'} ;
14 |
15 | for i = 1:numel(models)
16 | opts.dataDir = fullfile('data', 'ram', 'ILSVRC2012') ;
17 | opts.expDir = fullfile('data','models-eval', models{i}) ;
18 | opts.imdbPath = fullfile(opts.expDir, 'imdb.mat');
19 | opts.modelPath = fullfile('data', 'models', ...
20 | sprintf('imagenet-%s.mat', models{i})) ;
21 | opts.lite = false ;
22 | opts.numFetchThreads = 12 ;
23 | opts.train.batchSize = 128 ;
24 | opts.train.numEpochs = 1 ;
25 | opts.train.gpus = [1] ;
26 | opts.train.prefetch = true ;
27 | opts.train.expDir = opts.expDir ;
28 | opts.train.conserveMemory = true ;
29 | opts.train.sync = false ;
30 |
31 | resultPath = fullfile(opts.expDir, 'results.mat') ;
32 | if ~exist(resultPath)
33 | results = cnn_imagenet_evaluate(opts) ;
34 | save(fullfile(opts.expDir, 'results.mat'), 'results') ;
35 | end
36 | end
37 |
38 |
39 | fprintf('|%20s|%10s|%10s|%10s|\n', 'model', 'top-1 err.', 'top-5 err.', 'images/s') ;
40 | fprintf('%s\n', repmat('-',1,20+10+10+10+5)) ;
41 |
42 | for i = 1:numel(models)
43 | opts.expDir = fullfile('data', 'models-eval', models{i}) ;
44 | resultPath = fullfile(opts.expDir, 'results.mat') ;
45 | load(resultPath, 'results') ;
46 |
47 | fprintf('|%20s|%10s|%10s|%10s|\n', ...
48 | models{i}, ...
49 | sprintf('%5.1f',results.val.error(end)*100), ...
50 | sprintf('%5.1f',results.val.topFiveError(end)*100), ...
51 | sprintf('%5.1f',results.val.speed(end))) ;
52 | end
53 |
--------------------------------------------------------------------------------
/matconvnet/utils/get-file.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | local_dir="$1"
4 | url="$2"
5 |
6 | function get_filename_from_url() {
7 | regexp='^([^\/]*\/)+'
8 | echo -n "$1" | sed -r "s/$regexp//g"
9 | }
10 |
11 | function get_remote_file_size() {
12 | curl -sI "$1" | grep Content-Length | grep -o '[0-9][0-9]*'
13 | }
14 |
15 | filename=$(get_filename_from_url "$url")
16 | local_path="$local_dir/$filename"
17 | remote_size=$(get_remote_file_size "$url")
18 |
19 | echo "Getting: $url"
20 | echo " File: $filename"
21 | echo " Local file path: $local_path"
22 | echo " Remote file size: $remote_size"
23 |
24 | if [ -e "$local_path" ]
25 | then
26 | local_size=$(stat -c%s "$local_path")
27 | echo " Local file size: $local_size"
28 | if [[ "$local_size" -eq "$remote_size" ]]
29 | then
30 | echo " Local and remote file sizes match: not downloading"
31 | exit 0
32 | else
33 | echo " Trying to resume partial download"
34 | if curl -f -C - -o "$local_path" "$url"
35 | then
36 | echo " Download completed successfully"
37 | exit 0
38 | else
39 | echo " Could not resume"
40 | fi
41 | fi
42 | fi
43 |
44 | echo " Downloading the whole file"
45 | curl -f -o "$local_path" "$url"
46 | exit $?
47 |
--------------------------------------------------------------------------------
/matconvnet/utils/googlenet_prototxt_patch.diff:
--------------------------------------------------------------------------------
1 | --- train_val_googlenet.prototxt.orig 2015-10-03 12:37:43.299697610 +0100
2 | +++ train_val_googlenet.prototxt 2015-10-03 13:05:39.343696311 +0100
3 | @@ -1,38 +1,11 @@
4 | name: "GoogLeNet"
5 | -layers {
6 | - name: "data"
7 | - type: DATA
8 | - top: "data"
9 | - top: "label"
10 | - data_param {
11 | - source: "train_lmdb"
12 | - backend: LMDB
13 | - batch_size: 128
14 | - }
15 | - transform_param {
16 | - crop_size: 224
17 | - mean_file: "imagenet_mean.binaryproto"
18 | - mirror: true
19 | - }
20 | - include: { phase: TRAIN }
21 | -}
22 | -layers {
23 | - name: "data"
24 | - type: DATA
25 | - top: "data"
26 | - top: "label"
27 | - data_param {
28 | - source: "test_lmdb"
29 | - backend: LMDB
30 | - batch_size: 32
31 | - }
32 | - transform_param {
33 | - crop_size: 224
34 | - mean_file: "imagenet_mean.binaryproto"
35 | - mirror: false
36 | - }
37 | - include: { phase: TEST }
38 | -}
39 | +
40 | +input: 'data'
41 | +input_dim: 1
42 | +input_dim: 3
43 | +input_dim: 224
44 | +input_dim: 224
45 | +
46 | layers {
47 | name: "conv1"
48 | type: CONVOLUTION
49 | @@ -853,7 +826,7 @@
50 | weight_decay: 1
51 | weight_decay: 0
52 | inner_product_param {
53 | - num_output: 205
54 | + num_output: 1000
55 | weight_filler {
56 | type: "gaussian"
57 | std: 0.01
58 | @@ -864,14 +837,6 @@
59 | }
60 | }
61 | }
62 | -layers {
63 | - name: "loss1"
64 | - type: SOFTMAX_LOSS
65 | - bottom: "cls1_fc2"
66 | - bottom: "label"
67 | - top: "loss1"
68 | - loss_weight: 1
69 | -}
70 |
71 | # Inception module 4 ***************
72 | layers {
73 | @@ -1546,7 +1511,7 @@
74 | weight_decay: 1
75 | weight_decay: 0
76 | inner_product_param {
77 | - num_output: 205
78 | + num_output: 1000
79 | weight_filler {
80 | type: "gaussian"
81 | std: 0.01
82 | @@ -1557,14 +1522,6 @@
83 | }
84 | }
85 | }
86 | -layers {
87 | - name: "loss2"
88 | - type: SOFTMAX_LOSS
89 | - bottom: "cls2_fc2"
90 | - bottom: "label"
91 | - top: "loss2"
92 | - loss_weight: 1
93 | -}
94 |
95 | # Inception module 7 ***************
96 | layers {
97 | @@ -2195,7 +2152,7 @@
98 | weight_decay: 1
99 | weight_decay: 0
100 | inner_product_param {
101 | - num_output: 205
102 | + num_output: 1000
103 | weight_filler {
104 | type: "gaussian"
105 | std: 0.01
106 | @@ -2206,30 +2163,3 @@
107 | }
108 | }
109 | }
110 | -layers {
111 | - name: "loss3"
112 | - type: SOFTMAX_LOSS
113 | - bottom: "cls3_fc"
114 | - bottom: "label"
115 | - top: "loss3"
116 | - loss_weight: 1
117 | -}
118 | -layers {
119 | - name: "accuracy1"
120 | - type: ACCURACY
121 | - bottom: "cls3_fc"
122 | - bottom: "label"
123 | - top: "accuracy1"
124 | - include: { phase: TEST }
125 | -}
126 | -layers {
127 | - name: "accuracy5"
128 | - type: ACCURACY
129 | - bottom: "cls3_fc"
130 | - bottom: "label"
131 | - top: "accuracy5"
132 | - include: { phase: TEST }
133 | - accuracy_param {
134 | - top_k: 5
135 | - }
136 | -}
137 |
--------------------------------------------------------------------------------
/matconvnet/utils/import-fcn.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # brief: Import FCN models from Caffe Model Zoo
3 | # author: Karel Lenc and Andrea Vedaldi
4 |
5 | # Models are written to /data/models
6 | # You can delete /data/tmp after conversion
7 |
8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?)
9 |
10 | overwrite=yes
11 |
12 | FCN32S_PROTO_URL=https://gist.githubusercontent.com/longjon/ac410cad48a088710872/raw/fe76e342641ddb0defad95f6dc670ccc99c35a1f/fcn-32s-pascal-deploy.prototxt
13 | FCN16S_PROTO_URL=https://gist.githubusercontent.com/longjon/d24098e083bec05e456e/raw/dd455b2978b2943a51c37ec047a0f46121d18b56/fcn-16s-pascal-deploy.prototxt
14 | FCN8S_PROTO_URL=https://gist.githubusercontent.com/longjon/1bf3aa1e0b8e788d7e1d/raw/2711bb261ee4404faf2ddf5b9d0d2385ff3bcc3e/fcn-8s-pascal-deploy.prototxt
15 | FCNALEX_PROTO_URL=https://gist.githubusercontent.com/shelhamer/3f2c75f3c8c71357f24c/raw/ccd0d97662e03b83e62f26bf9d870209f20f3efc/train_val.prototxt
16 |
17 | FCN32S_MODEL_URL=http://dl.caffe.berkeleyvision.org/fcn-32s-pascal.caffemodel
18 | FCN16S_MODEL_URL=http://dl.caffe.berkeleyvision.org/fcn-16s-pascal.caffemodel
19 | FCN8S_MODEL_URL=http://dl.caffe.berkeleyvision.org/fcn-8s-pascal.caffemodel
20 | FCNALEX_MODEL_URL=http://dl.caffe.berkeleyvision.org/fcn-alexnet-pascal.caffemodel
21 |
22 | FCN_AVERAGE_COLOR="(122.67891434, 116.66876762, 104.00698793)"
23 |
24 | FCN_CLASSES="('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')"
25 |
26 | # Obtain the path of this script
27 | pushd `dirname $0` > /dev/null
28 | SCRIPTPATH=`pwd`
29 | popd > /dev/null
30 |
31 | converter="python $SCRIPTPATH/import-caffe-dag.py"
32 | data="$SCRIPTPATH/../data"
33 |
34 | mkdir -p "$data/tmp/fcn"
35 |
36 | function get()
37 | {
38 | "$SCRIPTPATH/get-file.sh" "$data/tmp/fcn" "$1"
39 | }
40 |
41 | # --------------------------------------------------------------------
42 | # FCN models
43 | # --------------------------------------------------------------------
44 |
45 | get $FCN32S_MODEL_URL
46 | get $FCN32S_PROTO_URL
47 | get $FCN16S_MODEL_URL
48 | get $FCN16S_PROTO_URL
49 | get $FCN8S_MODEL_URL
50 | get $FCN8S_PROTO_URL
51 |
52 | if true
53 | then
54 | ins=(fcn-32s-pascal fcn-16s-pascal fcn-8s-pascal)
55 | outs=(pascal-fcn32s-dag pascal-fcn16s-dag pascal-fcn8s-dag)
56 |
57 | for ((i=0;i<${#ins[@]};++i)); do
58 | in="$data/tmp/fcn/${ins[i]}"
59 | out="$data/models/${outs[i]}.mat"
60 | if test -f "$out" -a -z "$overwrite"
61 | then
62 | echo "$out exists; skipping."
63 | else
64 | #PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \
65 | $converter \
66 | --caffe-variant=caffe_6e3916 \
67 | --preproc=fcn \
68 | --remove-dropout \
69 | --remove-loss \
70 | --average-value="${FCN_AVERAGE_COLOR}" \
71 | --class-names="${FCN_CLASSES}" \
72 | --caffe-data="$in".caffemodel \
73 | "$in"-deploy.prototxt \
74 | "$out"
75 | fi
76 | done
77 | fi
78 |
--------------------------------------------------------------------------------
/matconvnet/utils/import-googlenet.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # brief: Import various CNN models from the web
3 | # author: Karel Lenc and Andrea Vedaldi
4 |
5 | # Models are written to /data/models
6 | # You can delete /data/tmp after conversion
7 |
8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?)
9 |
10 | overwrite=yes
11 |
12 | CAFFE_URL=http://dl.caffe.berkeleyvision.org/
13 | GOOGLENET_PROTO_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/train_val_googlenet.prototxt
14 | GOOGLENET_MODEL_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_googlenet.caffemodel
15 | GOOGLENET_MEAN_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_mean.binaryproto
16 |
17 | # Obtain the path of this script
18 | pushd `dirname $0` > /dev/null
19 | SCRIPTPATH=`pwd`
20 | popd > /dev/null
21 |
22 | #converter="python -m pdb $SCRIPTPATH/import-caffe-dag.py"
23 | converter="python $SCRIPTPATH/import-caffe-dag.py"
24 | data="$SCRIPTPATH/../data"
25 |
26 | mkdir -pv "$data"/{tmp/caffe,tmp/googlenet,models}
27 |
28 | function get()
29 | {
30 | "$SCRIPTPATH/get-file.sh" "$data/tmp/googlenet" "$1"
31 | }
32 |
33 | # --------------------------------------------------------------------
34 | # GoogLeNet
35 | # --------------------------------------------------------------------
36 |
37 | get "$CAFFE_URL/caffe_ilsvrc12.tar.gz"
38 | (cd "$data/tmp/googlenet" ; tar xzvf caffe_ilsvrc12.tar.gz)
39 |
40 | get "$GOOGLENET_PROTO_URL"
41 | get "$GOOGLENET_MODEL_URL"
42 | get "$GOOGLENET_MEAN_URL"
43 |
44 | (cd $"data/tmp/googlenet" ; patch -Np0 < "$SCRIPTPATH/googlenet_prototxt_patch.diff")
45 |
46 | base="$data/tmp/googlenet"
47 | out="$data/models/imagenet-googlenet-dag.mat"
48 |
49 | if test -f "$out" -a -z "$overwrite"
50 | then
51 | echo "$out exists; skipping."
52 | else
53 | $converter \
54 | --caffe-variant=caffe_0115 \
55 | --preproc=caffe \
56 | --remove-dropout \
57 | --remove-loss \
58 | --append-softmax="cls3_fc" \
59 | --synsets="$base/synset_words.txt" \
60 | --average-image="$base/imagenet_mean.binaryproto" \
61 | --caffe-data="$base/imagenet_googlenet.caffemodel" \
62 | "$base/train_val_googlenet.prototxt" \
63 | "$out"
64 | fi
65 |
--------------------------------------------------------------------------------
/matconvnet/utils/preprocess-imagenet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # file: preprocess-imagenet.sh
3 | # auhtor: Andrea Vedaldi
4 |
5 | # Use as:
6 | # preprocess-imagenet.sh SRC_PATH DEST_PATH
7 | #
8 | # The script creates a copy of the ImageNet ILSVRC CLS-LOC challenge
9 | # data while rescaling the images. Images are rescaled to a minimum
10 | # side of 256 pixels. The data is supposed to be in the format defined
11 | # by examples/cnn_imagenet_setup_data.m
12 | #
13 | # Note that the default scripts in MatConvNet expect the following structure:
14 | #
15 | # ILSVRC2012/ILSVRC2012_devkit_t12/
16 | # ILSVRC2012/images/train/n01440764/
17 | # ILSVRC2012/images/train/n01484850/
18 | # ...
19 | # ILSVRC2012/images/train/n15075141/
20 | # ILSVRC2012/images/val/ILSVRC2012_val_00000001.JPEG
21 | # ILSVRC2012/images/val/ILSVRC2012_val_00000002.JPEG
22 | # ...
23 | # ILSVRC2012/images/val/ILSVRC2012_val_00050000.JPEG
24 | # ILSVRC2012/images/test/ILSVRC2012_test_00000001.JPEG
25 | # ILSVRC2012/images/test/ILSVRC2012_test_00000002.JPEG
26 | # ...
27 | # ILSVRC2012/images/test/ILSVRC2012_test_00100000.JPEG
28 | #
29 | # Symbolic links within the ILSVRC2012/images hierarchy are supported
30 | # by this script.
31 | #
32 | # Example:
33 | # Create a copy of the ILSVRC2012 data in the data/ILSVRC2012
34 | # subfolder. Create a link to a ramdisk directory
35 | # data/ram/ILSVRC2012 to contain the transformed images (provided
36 | # that your server has GBs of RAM!). Then:
37 | #
38 | # cd
39 | # ./utils/preprocess-imagenet data/ILSVRC2012 data/ram/ILSVRC2012
40 |
41 | data=$1
42 | ram=$2
43 |
44 | # graphics magick (much faster)
45 | num_cpus=1
46 | method=gm
47 |
48 | # image magick
49 | # num_cpus=8
50 | # method=im
51 |
52 | mkdir -p "$ram"/images ;
53 | rsync -rv --chmod=ugo=rwX "$data"/*devkit* "$ram/"
54 |
55 | function convert_some_im()
56 | {
57 | out="$1"
58 | shift
59 | for infile in "$@"
60 | do
61 | outfile="$out/$(basename $infile)"
62 | if test -f "$outfile"
63 | then
64 | continue ;
65 | fi
66 | convert -verbose -quality 90 -resize '256x256^' \
67 | "$infile" JPEG:"${outfile}.temp"
68 | mv "${outfile}.temp" "$outfile"
69 | done
70 | }
71 | export -f convert_some_im
72 |
73 | function convert_some_gm()
74 | {
75 | gm=gm
76 | out="$1"
77 | shift
78 | for infile in "$@"
79 | do
80 | outfile="$out/$(basename $infile)"
81 | if test -f "$outfile"
82 | then
83 | continue ;
84 | fi
85 | echo convert -verbose "'$infile'" -quality 90 -resize 256x256^ \
86 | JPEG:"${outfile}"
87 | done | ${gm} batch -echo on -feedback on -
88 | }
89 | export -f convert_some_gm
90 |
91 | dirs=$(find $data/images/* -maxdepth 2 -type d)
92 | for d in $dirs
93 | do
94 | sub=${d#${data}/images/}
95 | out="$ram/images/$sub"
96 | echo "Converting $d -> $out"
97 | mkdir -p "$out"
98 | find "$d" -maxdepth 1 -type f -name '*.JPEG' | \
99 | xargs -n 1000 --max-procs=$num_cpus \
100 | bash -c "convert_some_$method \"$out\" \"\$@\"" _
101 | done
102 |
103 | # copy any symlink
104 | find "$data/images/" -type l -printf '%P\n' | \
105 | rsync -lv --files-from=- "$data/images/" "$ram/images/"
106 |
--------------------------------------------------------------------------------
/matconvnet/utils/proto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/matconvnet/utils/proto/__init__.py
--------------------------------------------------------------------------------
/matconvnet/utils/proto/get-protos.sh:
--------------------------------------------------------------------------------
1 | # FCN
2 | wget -nc "https://raw.githubusercontent.com/longjon/caffe/6e3916766c6b63bff07e2cfadf210ee5e46af807/src/caffe/proto/caffe.proto" --output-document=./caffe_6e3916.proto
3 | protoc ./caffe_6e3916.proto --python_out=./
4 |
--------------------------------------------------------------------------------
/network.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/network.mat
--------------------------------------------------------------------------------
/processing.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/processing.p
--------------------------------------------------------------------------------
/processing_patch.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XMU-smartdsp/Removing_Rain/adbc1dd55187595691c5096a4e71da86d6aae8e3/processing_patch.p
--------------------------------------------------------------------------------