├── CONTRIBUTING.md ├── COPYING ├── Makefile ├── README.md ├── README_MatConvNet.md ├── addpath_sparse_coding_layer.m ├── compileSparseNet.m ├── diary ├── doc ├── Makefile ├── blocks.tex ├── figures │ ├── imnet.pdf │ ├── pepper.pdf │ └── svg │ │ ├── conv.svg │ │ ├── convt.svg │ │ ├── matconvnet-blue.svg │ │ └── matconvnet-white.svg ├── fundamentals.tex ├── geometry.tex ├── impl.tex ├── intro.tex ├── matconvnet-manual.tex ├── matdoc.py ├── matdocparser.py ├── references.bib ├── site │ ├── docs │ │ ├── about.md │ │ ├── css │ │ │ └── fixes.css │ │ ├── developers.md │ │ ├── faq.md │ │ ├── figures │ │ │ ├── stn-perf.png │ │ │ └── stn-samples.png │ │ ├── functions.md │ │ ├── gpu.md │ │ ├── index.md │ │ ├── install-alt.md │ │ ├── install.md │ │ ├── js │ │ │ ├── mathjaxhelper.js │ │ │ └── toggle.js │ │ ├── pretrained.md │ │ ├── quick.md │ │ ├── spatial-transformer.md │ │ ├── training.md │ │ └── wrappers.md │ ├── mkdocs.yml │ └── theme │ │ ├── base.html │ │ ├── content.html │ │ ├── css │ │ └── base.css │ │ ├── js │ │ └── base.js │ │ ├── matconvnet-blue.svg │ │ ├── nav.html │ │ └── toc.html └── wrappers.tex ├── examples ├── cifar │ ├── cnn_cifar100_experiments.m │ ├── cnn_cifar10_experiments.m │ ├── sparseNet_cifar10.m │ ├── sparseNet_cifar100.m │ ├── sparseNet_cifar100_init.m │ └── sparseNet_cifar10_init.m ├── cnn_train.m ├── cnn_train_dag.m ├── imagenet │ ├── cnn_imagenet.m │ ├── cnn_imagenet_camdemo.m │ ├── cnn_imagenet_deploy.m │ ├── cnn_imagenet_evaluate.m │ ├── cnn_imagenet_experiments.m │ ├── cnn_imagenet_googlenet.m │ ├── cnn_imagenet_init.m │ ├── cnn_imagenet_init_inception.m │ ├── cnn_imagenet_init_resnet.m │ ├── cnn_imagenet_minimal.m │ ├── cnn_imagenet_setup_data.m │ ├── cnn_imagenet_sync_labels.m │ ├── getImageBatch.m │ └── getImageStats.m ├── mnist │ ├── scn_mnist_experiments.m │ ├── sparseNet_mnist.m │ └── sparseNet_mnist_init.m ├── spatial_transformer │ ├── cnn_stn_cluttered_mnist.m │ ├── cnn_stn_cluttered_mnist_init.m │ └── readme.txt ├── stl10 │ ├── cnn_stl10_experiments.m │ ├── sparseNet_stl10.m │ └── sparseNet_stl10_init.m └── vggfaces │ └── cnn_vgg_faces.m ├── log_matlab.txt ├── matconvnet.sln ├── matconvnet.vcxproj ├── matconvnet.vcxproj.filters ├── matconvnet.xcodeproj ├── project.pbxproj ├── project.xcworkspace │ └── contents.xcworkspacedata └── xcshareddata │ └── xcschemes │ ├── matconv CPU.xcscheme │ ├── matconv GPU.xcscheme │ └── matconv cuDNN.xcscheme ├── matlab ├── +dagnn │ ├── @DagNN │ │ ├── DagNN.m │ │ ├── addLayer.m │ │ ├── eval.m │ │ ├── fromSimpleNN.m │ │ ├── getVarReceptiveFields.m │ │ ├── getVarSizes.m │ │ ├── initParams.m │ │ ├── loadobj.m │ │ ├── move.m │ │ ├── print.m │ │ ├── rebuild.m │ │ ├── removeLayer.m │ │ ├── renameLayer.m │ │ ├── renameVar.m │ │ ├── reset.m │ │ ├── saveobj.m │ │ ├── setLayerInputs.m │ │ ├── setLayerOutputs.m │ │ └── setLayerParams.m │ ├── AffineGridGenerator.m │ ├── BatchNorm.m │ ├── BilinearSampler.m │ ├── Concat.m │ ├── Conv.m │ ├── ConvTranspose.m │ ├── Crop.m │ ├── DropOut.m │ ├── ElementWise.m │ ├── Filter.m │ ├── LRN.m │ ├── Layer.m │ ├── Loss.m │ ├── NormOffset.m │ ├── Pooling.m │ ├── ReLU.m │ ├── Scale.m │ ├── Sigmoid.m │ ├── SoftMax.m │ ├── SpatialNorm.m │ ├── Sum.m │ └── UniformScalingGridGenerator.m ├── ParameterServer.m ├── compatibility │ └── parallel │ │ ├── gather.m │ │ ├── labindex.m │ │ └── numlabs.m ├── mex │ ├── .build │ │ ├── bits │ │ │ ├── data.o │ │ │ ├── datacu.o │ │ │ ├── datamex.o │ │ │ ├── impl │ │ │ │ ├── bilinearsampler_cpu.o │ │ │ │ ├── bilinearsampler_gpu.o │ │ │ │ ├── bnorm_cpu.o │ │ │ │ ├── bnorm_gpu.o │ │ │ │ ├── copy_cpu.o │ │ │ │ ├── copy_gpu.o │ │ │ │ ├── im2row_cpu.o │ │ │ │ ├── im2row_gpu.o │ │ │ │ ├── imread_libjpeg.o │ │ │ │ ├── normalize_cpu.o │ │ │ │ ├── normalize_gpu.o │ │ │ │ ├── pooling_cpu.o │ │ │ │ ├── pooling_gpu.o │ │ │ │ ├── subsample_cpu.o │ │ │ │ ├── subsample_gpu.o │ │ │ │ └── tinythread.o │ │ │ ├── imread.o │ │ │ ├── nnbias.o │ │ │ ├── nnbilinearsampler.o │ │ │ ├── nnbnorm.o │ │ │ ├── nnconv.o │ │ │ ├── nnfullyconnected.o │ │ │ ├── nnnormalize.o │ │ │ ├── nnpooling.o │ │ │ └── nnsubsample.o │ │ ├── vl_cudatool.o │ │ ├── vl_imreadjpeg.o │ │ ├── vl_imreadjpeg_old.o │ │ ├── vl_nnbilinearsampler.o │ │ ├── vl_nnbnorm.o │ │ ├── vl_nnconv.o │ │ ├── vl_nnconvt.o │ │ ├── vl_nnnormalize.o │ │ ├── vl_nnpool.o │ │ ├── vl_taccummex.o │ │ └── vl_tmove.o │ ├── vl_cudatool.mexa64 │ ├── vl_imreadjpeg.mexa64 │ ├── vl_imreadjpeg_old.mexa64 │ ├── vl_nnbilinearsampler.mexa64 │ ├── vl_nnbnorm.mexa64 │ ├── vl_nnconv.mexa64 │ ├── vl_nnconvt.mexa64 │ ├── vl_nnnormalize.mexa64 │ ├── vl_nnpool.mexa64 │ ├── vl_taccummex.mexa64 │ └── vl_tmove.mexa64 ├── simplenn │ ├── vl_simplenn.m │ ├── vl_simplenn_diagnose.m │ ├── vl_simplenn_display.m │ ├── vl_simplenn_move.m │ ├── vl_simplenn_move.m~ │ ├── vl_simplenn_start_parserv.m │ └── vl_simplenn_tidy.m ├── sparse_coding_layer │ ├── backward_sparse_coding_layer.m │ ├── euclideanloss.m │ ├── fista_nonnegative_l1_gpu.m │ ├── forward_sparse_coding_layer.m │ ├── get_gradient_batch.m │ ├── gradient_check_sparse_coding_layer.m │ ├── gradient_check_sparse_coding_layer_dictionary.m │ ├── mexCol2im.cu │ ├── mexCol2im.mexa64 │ ├── mexCol2im.mexw64 │ ├── mexCol2im.o │ ├── mexGetActiveAtA.cu │ ├── mexGetActiveAtA.mexa64 │ ├── mexGetGradA3D.cu │ ├── mexGetGradA3D.mexa64 │ ├── mexIm2col.cu │ ├── mexIm2col.mexa64 │ ├── mexIm2col.mexw64 │ ├── mexIm2col.o │ ├── test_backward_sparse_coding_layer.m │ ├── test_mexCol2im.m │ ├── test_mexGetGradA3D.m │ ├── test_mexIm2col.m │ └── vec.m ├── src │ ├── bits │ │ ├── data.cpp │ │ ├── data.cu │ │ ├── data.hpp │ │ ├── datacu.cu │ │ ├── datacu.hpp │ │ ├── datamex.cpp │ │ ├── datamex.cu │ │ ├── datamex.hpp │ │ ├── impl │ │ │ ├── bilinearsampler.hpp │ │ │ ├── bilinearsampler_cpu.cpp │ │ │ ├── bilinearsampler_gpu.cu │ │ │ ├── blashelper.hpp │ │ │ ├── bnorm.hpp │ │ │ ├── bnorm_cpu.cpp │ │ │ ├── bnorm_gpu.cu │ │ │ ├── copy.hpp │ │ │ ├── copy_cpu.cpp │ │ │ ├── copy_gpu.cu │ │ │ ├── cudnnhelper.hpp │ │ │ ├── fast_mutex.h │ │ │ ├── im2row.hpp │ │ │ ├── im2row_cpu.cpp │ │ │ ├── im2row_gpu.cu │ │ │ ├── imread_gdiplus.cpp │ │ │ ├── imread_helpers.hpp │ │ │ ├── imread_libjpeg.cpp │ │ │ ├── imread_quartz.cpp │ │ │ ├── nnbias_blas.hpp │ │ │ ├── nnbias_cudnn.cu │ │ │ ├── nnbias_cudnn.hpp │ │ │ ├── nnbilinearsampler_cudnn.cu │ │ │ ├── nnbilinearsampler_cudnn.hpp │ │ │ ├── nnbnorm_cudnn.cu │ │ │ ├── nnbnorm_cudnn.hpp │ │ │ ├── nnconv_blas.hpp │ │ │ ├── nnconv_cudnn.cu │ │ │ ├── nnconv_cudnn.hpp │ │ │ ├── nnpooling_cudnn.cu │ │ │ ├── nnpooling_cudnn.hpp │ │ │ ├── normalize.hpp │ │ │ ├── normalize_cpu.cpp │ │ │ ├── normalize_gpu.cu │ │ │ ├── pooling.hpp │ │ │ ├── pooling_cpu.cpp │ │ │ ├── pooling_gpu.cu │ │ │ ├── sharedmem.cuh │ │ │ ├── subsample.hpp │ │ │ ├── subsample_cpu.cpp │ │ │ ├── subsample_gpu.cu │ │ │ ├── tinythread.cpp │ │ │ └── tinythread.h │ │ ├── imread.cpp │ │ ├── imread.hpp │ │ ├── mexutils.h │ │ ├── nnbias.cpp │ │ ├── nnbias.cu │ │ ├── nnbias.hpp │ │ ├── nnbilinearsampler.cpp │ │ ├── nnbilinearsampler.cu │ │ ├── nnbilinearsampler.hpp │ │ ├── nnbnorm.cpp │ │ ├── nnbnorm.cu │ │ ├── nnbnorm.hpp │ │ ├── nnconv.cpp │ │ ├── nnconv.cu │ │ ├── nnconv.hpp │ │ ├── nnfullyconnected.cpp │ │ ├── nnfullyconnected.cu │ │ ├── nnfullyconnected.hpp │ │ ├── nnnormalize.cpp │ │ ├── nnnormalize.cu │ │ ├── nnnormalize.hpp │ │ ├── nnpooling.cpp │ │ ├── nnpooling.cu │ │ ├── nnpooling.hpp │ │ ├── nnsubsample.cpp │ │ ├── nnsubsample.cu │ │ └── nnsubsample.hpp │ ├── config │ │ ├── mex_CUDA_glnxa64.sh │ │ ├── mex_CUDA_glnxa64.xml │ │ ├── mex_CUDA_maci64.sh │ │ └── mex_CUDA_maci64.xml │ ├── vl_cudatool.cpp │ ├── vl_cudatool.cu │ ├── vl_imreadjpeg.cpp │ ├── vl_imreadjpeg.cu │ ├── vl_imreadjpeg_old.cpp │ ├── vl_imreadjpeg_old.cu │ ├── vl_nnbilinearsampler.cpp │ ├── vl_nnbilinearsampler.cu │ ├── vl_nnbnorm.cpp │ ├── vl_nnbnorm.cu │ ├── vl_nnconv.cpp │ ├── vl_nnconv.cu │ ├── vl_nnconvt.cpp │ ├── vl_nnconvt.cu │ ├── vl_nnnormalize.cpp │ ├── vl_nnnormalize.cu │ ├── vl_nnpool.cpp │ ├── vl_nnpool.cu │ ├── vl_taccummex.cpp │ ├── vl_taccummex.cu │ ├── vl_tmove.cpp │ └── vl_tmove.cu ├── vl_argparse.m ├── vl_compilenn.m ├── vl_imreadjpeg.m ├── vl_nnabs.m ├── vl_nnbilinearsampler.m ├── vl_nnbnorm.m ├── vl_nnconcat.m ├── vl_nnconv.m ├── vl_nnconvt.m ├── vl_nncrop.m ├── vl_nndropout.m ├── vl_nnloss.m ├── vl_nnnoffset.m ├── vl_nnnormalize.m ├── vl_nnnormalizelp.m ├── vl_nnpdist.m ├── vl_nnpool.m ├── vl_nnrelu.m ├── vl_nnsigmoid.m ├── vl_nnsoftmax.m ├── vl_nnsoftmaxloss.m ├── vl_nnspnorm.m ├── vl_rootnn.m ├── vl_setupnn.m ├── vl_taccum.m ├── vl_tmove.m └── xtest │ ├── suite │ ├── Scale.m │ ├── nnbilinearsampler.m │ ├── nnbnorm.m │ ├── nnconcat.m │ ├── nnconv.m │ ├── nnconvt.m │ ├── nndagnn.m │ ├── nndropout.m │ ├── nnloss.m │ ├── nnmnist.m │ ├── nnnormalize.m │ ├── nnnormalizelp.m │ ├── nnoffset.m │ ├── nnpdist.m │ ├── nnpool.m │ ├── nnrelu.m │ ├── nnsigmoid.m │ ├── nnsimplenn.m │ ├── nnsoftmax.m │ ├── nnsoftmaxloss.m │ ├── nnspnorm.m │ ├── nntest.m │ └── tmovemex.m │ ├── vl_bench_bnorm.m │ ├── vl_bench_imreadjpeg.m │ ├── vl_nnbnorm_old.m │ ├── vl_test_bnorm.m │ ├── vl_test_economic_relu.m │ ├── vl_test_gpureset.m │ ├── vl_test_imreadjpeg.m │ └── vl_testnn.m └── utils ├── evaluate_ref_models.m ├── get-file.sh ├── import-caffe.py ├── import-fcn.sh ├── import-googlenet.sh ├── import-ref-models.sh ├── import-resnet.sh ├── layers.py ├── model2dot.m ├── preprocess-imagenet.sh ├── proto ├── __init__.py ├── caffe.proto ├── caffe_0115.proto ├── caffe_0115_pb2.py ├── caffe_6e3916.proto ├── caffe_6e3916_pb2.py ├── caffe_b590f1d.proto ├── caffe_b590f1d_pb2.py ├── caffe_old.proto ├── caffe_old_pb2.py ├── caffe_pb2.py ├── get-protos.sh ├── googlenet_prototxt_patch.diff ├── vgg_caffe.proto ├── vgg_caffe_pb2.py └── vgg_synset_words.txt ├── test_examples.m └── tidy_ref_models.m /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | ## How to contribute to MatConvNet 4 | 5 | For a description of how the library is structured, take a look at the 6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on 7 | the MatConvNet website. 8 | 9 | ### Issues 10 | 11 | We are grateful for any reported issues which help to remove bugs and 12 | improve the overall quality of the library. In particular, you can use 13 | the issue tracker to: 14 | 15 | * report bugs and unexpected crashes 16 | * discuss library design decisions 17 | * request new features 18 | 19 | When reporting bugs, it really helps if you can provide the following: 20 | 21 | * Which steps are needed to reproduce the issue 22 | * MATLAB, compiler and CUDA version (where appropriate) 23 | 24 | Before opening an issue to report a bug, please make sure that the bug 25 | is reproducible on the latest version of the master branch. 26 | 27 | The most difficult bugs to remove are those which cause crashes of the 28 | core functions (e.g. CUDA errors etc.). In those cases, it is really 29 | useful to create a *minimal example* which is able to reproduce the 30 | issue. We know that this may mean a bit of work, but it helps us to 31 | remove the bug more quickly. 32 | 33 | ### Pull requests 34 | 35 | Please make any Pull Requests against the `devel` branch rather than 36 | the `master` branch which is maintained as the latest stable release 37 | of the library. 38 | 39 | As a general rule, it is much easier to accept small Pull Requests 40 | that make a single improvement to the library than complex code 41 | changes that affect multiple parts of the library. When submitting 42 | substantial changes, it is useful if unit tests are provided with the 43 | code. 44 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 The MatConvNet team. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms are permitted 5 | provided that the above copyright notice and this paragraph are 6 | duplicated in all such forms and that any documentation, 7 | advertising materials, and other materials related to such 8 | distribution and use acknowledge that the software was developed 9 | by the . The name of the 10 | may not be used to endorse or promote products derived 11 | from this software without specific prior written permission. 12 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 13 | IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Supervised Sparse Coding Networks 2 | ============= 3 | 4 | This code is used for experiments of Supervised Deep Sparse Coding Networks https://arxiv.org/abs/1701.08349 by Xiaoxia Sun, Nasser M. Nasrabadi and Trac D. Tran. 5 | 6 | ``` 7 | @article{xxsun2017deep, 8 | author = {Xiaoxia Sun and Nasser M. Nasrabadi and Trac D. Tran}, 9 | title = {Supervised Multilayer Sparse Coding Networks for Image Classification}, 10 | journal = {arXiv preprint arXiv:1701.08349}, 11 | year = {2017}, 12 | } 13 | ``` 14 | 15 | 16 | 17 | The code is verified on a system of Linux Ubuntu 14.01, CUDA 8.0, with 3 Nvidia Titan X (Pascal) GPUs or 4 Nvidia Tesla P40 GPUs. 18 | 19 | With 3 Titan X (Pascal) GPUs, training speed is about 80~90 images/sec on CIFAR-10 using SCN-4 settings in the paper. 20 | 21 | With 4 Tesla P40 GPUs, training speed is about 100~120 images/sec on CIFAR-10 using SCN-4 settings in the paper. 22 | 23 | 24 | The sparse coding layer only has GPU version, GPU is required to run the code 25 | 26 | 0. **To install the MatConvNet toolbox and the SparseNet** 27 | ```matlab 28 | compileSparseNet 29 | ``` 30 | 31 | 0. **Before run the experiments, add path*1 32 | ```matlab 33 | addpath_sparse_coding_layer 34 | ``` 35 | 36 | 0. **To reproduce the result on CIFAR-10** 37 | ```matlab 38 | [net_bn, info_bn] = sparseNet_cifar10('expDir', 'data/cifar10-sparseNet', 'gpus', [1, 2, 3, 4], 'batchSize', 128, , 'numSlice', 2); 39 | ``` 40 | 41 | batchSize: minibatch size of stochastic gradient descent. 42 | gpus: indices of gpus to be used. Starting from 1. 43 | numSlice: during backpropagation, a batch of samples are sliced into numSlice to reduce the memory usage. 44 | 45 | 0. **To reproduce the result on CIFAR-100:** 46 | ```matlab 47 | [net_bn, info_bn] = sparseNet_cifar100('expDir', 'data/cifar100-sparseNet', 'gpus', [1, 2, 3, 4], 'batchSize', 128, 'numSlice', 2); 48 | ``` 49 | 50 | 0. **To reproduce the result on STL-10:** 51 | ```matlab 52 | [net_bn, info_bn] = sparseNet_stl10('expDir', 'data/stl10-scn', 'gpus', [1,2,3,4], 'batchSize', 16, 'numSlice', 2); 53 | ``` 54 | 55 | 0. **To reproduce the result on MNIST:** 56 | ```matlab 57 | [net_bn, info_bn] = sparseNet_mnist('expDir', 'data/mnist-scn', 'gpus', [1,2,3,4], 'batchSize', 128, 'numSlice', 2); 58 | ``` 59 | -------------------------------------------------------------------------------- /README_MatConvNet.md: -------------------------------------------------------------------------------- 1 | # MatConvNet: CNNs for MATLAB 2 | 3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural 4 | Networks* (CNNs) for computer vision applications. It is simple, 5 | efficient, and can run and learn state-of-the-art CNNs. Several 6 | example CNNs are included to classify and encode images. Please visit 7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more. 8 | 9 | In case of compilation issues, please read first the 10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and 11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub 12 | issue. For general inquiries regarding network design and training 13 | related questions, please use the 14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet). 15 | -------------------------------------------------------------------------------- /addpath_sparse_coding_layer.m: -------------------------------------------------------------------------------- 1 | addpath('matlab'); 2 | addpath('matlab/sparse_coding_layer/'); 3 | addpath('examples/mnist') 4 | addpath('examples/cifar') 5 | addpath('examples/stl10') 6 | addpath('examples/imagenet') -------------------------------------------------------------------------------- /compileSparseNet.m: -------------------------------------------------------------------------------- 1 | close all; clc; clear; 2 | 3 | disp('GPU and CUDA required to compile and test the code.') 4 | disp('The code is tested on Linux 14.01 with 3 Nvidia Titan X (Pascal) GPU / 4 Nvidia Tesla P40 GPU') 5 | 6 | addpath_sparse_coding_layer; 7 | vl_compilenn('enableGpu', true); 8 | 9 | cd matlab/sparse_coding_layer; 10 | 11 | mexcuda mexCol2im.cu -lc -lstdc++; 12 | mexcuda mexIm2col.cu -lc -lstdc++; 13 | mexcuda mexGetActiveAtA.cu -lc -lstdc++; 14 | mexcuda mexGetGradA3D.cu -lc -lstdc++; 15 | 16 | cd .. 17 | cd .. 18 | -------------------------------------------------------------------------------- /diary: -------------------------------------------------------------------------------- 1 | diary log_matlab.txt 2 | -------------------------------------------------------------------------------- /doc/figures/imnet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/doc/figures/imnet.pdf -------------------------------------------------------------------------------- /doc/figures/pepper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/doc/figures/pepper.pdf -------------------------------------------------------------------------------- /doc/site/docs/faq.md: -------------------------------------------------------------------------------- 1 | # Frequently-asked questions (FAQ) 2 | 3 | ## Running MatConvNet 4 | 5 | ### Do I need a specific version of the CUDA devkit? 6 | 7 | Officially, MathWorks supports a specific version of the CUDA devkit 8 | with each MATLAB version (see [here](install.md#gpu)). However, in 9 | practice we normally use the most recent version of CUDA (and cuDNN) 10 | available from NVIDIA without problems (see 11 | [here](install.md#nvcc)). 12 | 13 | ### Can I use MatConvNet with CuDNN? 14 | 15 | Yes, and this is the recommended way of running MatConvNet on NVIDIA 16 | GPUs. However, you need to install cuDNN and link it to 17 | MatConvNet. See the [installation instructions](install.md#cudnn) to 18 | know how. 19 | 20 | ### How do I fix the error `Attempt to execute SCRIPT vl_nnconv as a function`? 21 | 22 | Before the toolbox can be used, the 23 | [MEX files](http://www.mathworks.com/support/tech-notes/1600/1605.html 24 | ) must be compiled. Make sure to follow the 25 | [installation instructions](install.md). If you have done so and the 26 | MEX files are still not recognized, check that the directory 27 | `matlab/toolbox/mex` contains the missing files. If the files are 28 | there, there may be a problem with the way MEX files have been 29 | compiled. 30 | 31 | ### Why files such as `vl_nnconv.m` do not contain any code? 32 | 33 | Functions such as `vl_nnconv`, `vl_nnpool`, `vl_nnbnorm` and many 34 | others are implemented MEX files. In this case, M files such as 35 | `vl_nnconv.m` contain only the function documentation. The code of the 36 | function is actually found in `matlab/src/vl_nnconv.cu` (a CUDA/C++ 37 | source file) or similar. 38 | 39 | ### Why do I get compilation error `error: unrecognized command line option "-std=c++11"` on a Linux machine? 40 | 41 | This is caused by an incompatible version of GCC compiler 42 | ([<4.6](https://gcc.gnu.org/projects/cxx-status.html#cxx11)) with your MATLAB. 43 | You can either install a newer version of GCC (if available), or you 44 | can force MATLAB not to use the offending compiler option and replace it with 45 | the previous name of the C++11 standard argument: 46 | * In MATLAB run: `mex -setup c++`. 47 | * Run `edit(fullfile(prefdir, 'mex_C++_glnxa64.xml'))` to edit your MATLAB 48 | compiler options. 49 | * Replace all occurrences of `-std=c++11` with `-std=c++0x` and save the file. 50 | -------------------------------------------------------------------------------- /doc/site/docs/figures/stn-perf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/doc/site/docs/figures/stn-perf.png -------------------------------------------------------------------------------- /doc/site/docs/figures/stn-samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/doc/site/docs/figures/stn-samples.png -------------------------------------------------------------------------------- /doc/site/docs/gpu.md: -------------------------------------------------------------------------------- 1 | # Using GPU acceleration 2 | 3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the 4 | [Parallel Computing Toolbox](http://www.mathworks.com/products/parallel-computing/). This 5 | toolbox requires CUDA-compatible cards, and you will need a copy of 6 | the corresponding 7 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to 8 | compile GPU support in MatConvNet (see 9 | [compiling](install#compiling)). 10 | 11 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox 12 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore, 13 | switching to use the GPU is as simple as converting the input CPU 14 | arrays in GPU arrays. 15 | 16 | In order to make the very best of powerful GPUs, it is important to 17 | balance the load between CPU and GPU in order to avoid starving the 18 | latter. In training on a problem like ImageNet, the CPU(s) in your 19 | system will be busy loading data from disk and streaming it to the GPU 20 | to evaluate the CNN and its derivative. MatConvNet includes the 21 | utility `vl_imreadjpeg` to accelerate and parallelize loading images 22 | into memory (this function is currently a bottleneck will be made more 23 | powerful in future releases). 24 | -------------------------------------------------------------------------------- /doc/site/docs/js/mathjaxhelper.js: -------------------------------------------------------------------------------- 1 | /* 2 | #if false 3 | Prevent Unity to try compiling this js 4 | */ 5 | MathJax.Hub.Config({ 6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] } 7 | }); 8 | /* 9 | #endif 10 | */ -------------------------------------------------------------------------------- /doc/site/docs/js/toggle.js: -------------------------------------------------------------------------------- 1 | function toggle_visibility(id) { 2 | var e = document.getElementById(id); 3 | if(e.style.display == 'block') 4 | e.style.display = 'none'; 5 | else 6 | e.style.display = 'block'; 7 | } 8 | -------------------------------------------------------------------------------- /doc/site/docs/spatial-transformer.md: -------------------------------------------------------------------------------- 1 | # Spatial Transformer Networks 2 | 3 | This example demonstrates the use of a Spatial Transformer Network 4 | for classifying distorted MNIST digits in clutter. 5 | The source files used in this examples can be found in the 6 | `examples/spatial_transformer` directory. 7 | 8 | The spatial transformer network is defined in the `cnn_stn_cluttered_mnist.m` 9 | file. It has three components: (1) a localization network which 10 | predicts six affine transformation parameters for an input image, 11 | (2) a bilinear sampler which applies the above transformation 12 | to the input image, and (3) a classification network which classifies the 13 | output of the bilinear sampler. 14 | 15 | The picture below shows input images and their transformed versions as determined 16 | by the STN. Note how the STN has learned to rectify the input image. 17 | 18 | ![Transformations inferred by the Spatial Transformer Network for images from a cluttered MNIST dataset.](figures/stn-samples.png) 19 | 20 | The following graph compares the training and test errors of two CNNs: 21 | a STN and, a plain classification CNN (with the same configuration as the 22 | classification component of the STN). We note that the STN performs significantly 23 | better (STN test-error = 5.7%, CNN test-error = 14.2%). 24 | 25 | ![Classification error comparison between a STN and a CNN.](figures/stn-perf.png) 26 | -------------------------------------------------------------------------------- /doc/site/docs/training.md: -------------------------------------------------------------------------------- 1 | ## Using MatConvNet to train convnets 2 | 3 | MatConvNet can be used to train models, typically by using a form of 4 | stochastic gradient descent (SGD) and back-propagation. 5 | 6 | The following learning demonstrators are provided in the MatConvNet 7 | package: 8 | 9 | - **MNIST**. See `examples/mnist/cnn_mnist.m`. 10 | - **CIFAR**. See `examples/cifar/cnn_cifar.m`. 11 | - **ImageNet**. See `examples/imagenet/cnn_imagenet.m`. 12 | 13 | These demos are self-contained; MNIST and CIFAR, in particular, 14 | automatically download and unpack the required data, so that they 15 | should work out-of-the-box. 16 | 17 | While MNIST and CIFAR are small datasets (by today's standard) and 18 | training is feasible on a CPU, ImageNet requires a powerful GPU to 19 | complete in a reasonable time (a few days!). It also requires the 20 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to 21 | accelerate reading large batches of JPEG images and avoid starving the 22 | GPU. 23 | 24 | All these demos use the `example/cnn_train.m` and 25 | `example/cnn_train_dag.m` SGD drivers, which are simple 26 | implementations of the standard SGD with momentum, done directly in 27 | MATLAB code. However, it should be easy to implement your own 28 | specialized or improved solver. 29 | -------------------------------------------------------------------------------- /doc/site/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: MatConvNet 2 | markdown_extensions: ['extra', 'mathjax', 'toc'] 3 | theme_dir: theme 4 | 5 | extra_css: ['css/fixes.css'] 6 | extra_javascript: ['https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML', 'js/mathjaxhelper.js', 'js/toggle.js'] 7 | 8 | repo_url: https://github.com/vlfeat/matconvnet 9 | copyright: "Copyright © 2014-16 The MatConvNet Team." 10 | 11 | pages: 12 | - Home: 'index.md' 13 | - Getting Started: 14 | - Quick Start: 'quick.md' 15 | - Installing: 'install.md' 16 | - CNN Wrappers: 'wrappers.md' 17 | - Pretrained CNNs: 'pretrained.md' 18 | - Training new CNNs: 'training.md' 19 | - Spatial Transformers: 'spatial-transformer.md' 20 | - Using a GPU: 'gpu.md' 21 | - About: 'about.md' 22 | - Frequently-asked questions: 'faq.md' 23 | 24 | - Functions: 25 | - Index: 'functions.md' 26 | - vl_nnbnorm: 'mfiles/vl_nnbnorm.md' 27 | - vl_nnbilinearsampler: 'mfiles/vl_nnbilinearsampler.md' 28 | - vl_nnconcat: 'mfiles/vl_nnconcat.md' 29 | - vl_nnspnorm: 'mfiles/vl_nnspnorm.md' 30 | - vl_nnpdist: 'mfiles/vl_nnpdist.md' 31 | - vl_nnconv: 'mfiles/vl_nnconv.md' 32 | - vl_nnconvt: 'mfiles/vl_nnconvt.md' 33 | - vl_nncrop: 'mfiles/vl_nncrop.md' 34 | - vl_nndropout: 'mfiles/vl_nndropout.md' 35 | - vl_nnloss: 'mfiles/vl_nnloss.md' 36 | - vl_nnnoffset: 'mfiles/vl_nnnoffset.md' 37 | - vl_nnnormalize: 'mfiles/vl_nnnormalize.md' 38 | - vl_nnpool: 'mfiles/vl_nnpool.md' 39 | - vl_nnrelu: 'mfiles/vl_nnrelu.md' 40 | - vl_nnsigmoid: 'mfiles/vl_nnsigmoid.md' 41 | - vl_nnsoftmax: 'mfiles/vl_nnsoftmax.md' 42 | - vl_nnsoftmaxloss: 'mfiles/vl_nnsoftmaxloss.md' 43 | - DagNN wrapper: 'mfiles/+dagnn/@DagNN/DagNN.md' 44 | - vl_simplenn: 'mfiles/simplenn/vl_simplenn.md' 45 | - vl_simplenn_tidy: 'mfiles/simplenn/vl_simplenn_tidy.md' 46 | - vl_simplenn_diagnose: 'mfiles/simplenn/vl_simplenn_diagnose.md' 47 | - vl_simplenn_display: 'mfiles/simplenn/vl_simplenn_display.md' 48 | - vl_simplenn_move: 'mfiles/simplenn/vl_simplenn_move.md' 49 | - vl_argparse: 'mfiles/vl_argparse.md' 50 | - vl_compilenn: 'mfiles/vl_compilenn.md' 51 | - vl_rootnn: 'mfiles/vl_rootnn.md' 52 | - vl_setupnn: 'mfiles/vl_setupnn.md' 53 | - vl_imreadjpeg: 'mfiles/vl_imreadjpeg.md' 54 | - vl_taccum: 'mfiles/vl_taccum.md' 55 | - vl_tmove: 'mfiles/vl_tmove.md' 56 | 57 | - Contributing: 58 | - Developers notes: 'developers.md' 59 | - Compiling from the command line: 'install-alt.md' 60 | -------------------------------------------------------------------------------- /doc/site/theme/content.html: -------------------------------------------------------------------------------- 1 | {% if meta.source %} 2 | 7 | {% endif %} 8 | 9 | {{ content }} 10 | -------------------------------------------------------------------------------- /doc/site/theme/js/base.js: -------------------------------------------------------------------------------- 1 | 2 | /* Highlight */ 3 | $( document ).ready(function() { 4 | hljs.initHighlightingOnLoad(); 5 | $('table').addClass('table table-striped table-hover'); 6 | }); 7 | 8 | 9 | $('body').scrollspy({ 10 | target: '.bs-sidebar', 11 | }); 12 | 13 | 14 | /* Prevent disabled links from causing a page reload */ 15 | $("li.disabled a").click(function() { 16 | event.preventDefault(); 17 | }); 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /doc/site/theme/toc.html: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /examples/cifar/cnn_cifar100_experiments.m: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | [net_bn, info_bn] = sparseNet_cifar100('expDir', 'data/cifar100-sparseNet', 'gpus', [1], 'batchSize', 32); 6 | -------------------------------------------------------------------------------- /examples/cifar/cnn_cifar10_experiments.m: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | [net_bn, info_bn] = sparseNet_cifar10('expDir', 'data/cifar10-sparseNet', 'gpus', [1], 'batchSize', 32); 6 | -------------------------------------------------------------------------------- /examples/imagenet/cnn_imagenet_camdemo.m: -------------------------------------------------------------------------------- 1 | function cnn_imagenet_camdemo() 2 | %CNN_IMAGENET_CAMDEMO Realtime classification with a webcam 3 | % Download the pre-trained models from 4 | % http://www.vlfeat.org/matconvnet/pretrained/ 5 | % 6 | % Download MATLAB's webcam support from: 7 | % http://mathworks.com/hardware-support/matlab-webcam.html 8 | 9 | run(fullfile(fileparts(mfilename('fullpath')), ... 10 | '..', '..', 'matlab', 'vl_setupnn.m')) ; 11 | 12 | cam = webcam(1) ; 13 | run matlab/vl_setupnn ; 14 | model = 'imagenet-googlenet-dag' ; 15 | %model = 'imagenet-vgg-m' ; 16 | %model = 'imagenet-vgg-f' ; 17 | net = load(fullfile(vl_rootnn, 'data', 'models', sprintf('%s.mat', model))) ; 18 | 19 | if strcmp(model, 'imagenet-googlenet-dag') 20 | net = dagnn.DagNN.loadobj(net) ; 21 | out = net.getVarIndex('prob') ; 22 | dag = true ; 23 | else 24 | dag = false ; 25 | end 26 | 27 | scoress = zeros(1000,1) ; 28 | momentum = .5 ; 29 | 30 | while true 31 | % obtain and preprocess an image 32 | im = snapshot(cam) ; 33 | d = size(im,1)-size(im,2) ; 34 | dy = floor(max(d,0)/2) ; 35 | dx = floor(max(-d,0)/2) ; 36 | im = im(dy+1:end-dy, dx+1:end-dx, :) ; % center crop 37 | im_ = single(im) ; % note: 255 range 38 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2), 'bilinear') ; 39 | im_ = im_ - net.meta.normalization.averageImage ; 40 | 41 | % run the CNN 42 | if dag 43 | net.eval({'data',im_}) ; 44 | scores = squeeze(gather(net.vars(out).value)) ; 45 | else 46 | res = vl_simplenn(net, im_) ; 47 | scores = squeeze(gather(res(end).x)) ; 48 | end 49 | 50 | % smooth scores and pick the best 51 | scoress = momentum*scoress + (1-momentum)*scores ; 52 | [bestScore, best] = max(scoress) ; 53 | 54 | % visualize 55 | figure(1) ; clf ; imagesc(im) ; 56 | title(sprintf('%s, score %.3f',... 57 | strtok(net.meta.classes.description{best},','), bestScore), ... 58 | 'FontSize', 30) ; 59 | axis equal off ; 60 | drawnow ; 61 | end 62 | -------------------------------------------------------------------------------- /examples/imagenet/cnn_imagenet_experiments.m: -------------------------------------------------------------------------------- 1 | [net_bn, info_bn] = cnn_imagenet(... 2 | 'expDir', 'data/imagenet-scn'); -------------------------------------------------------------------------------- /examples/imagenet/cnn_imagenet_googlenet.m: -------------------------------------------------------------------------------- 1 | function cnn_imagenet_googlenet() 2 | %CNN_IMAGENET_GOOGLENET Demonstrates how to use GoogLeNet 3 | 4 | run matlab/vl_setupnn 5 | modelPath = 'data/models/imagenet-googlenet-dag.mat' ; 6 | 7 | if ~exist(modelPath) 8 | mkdir(fileparts(modelPath)) ; 9 | urlwrite(... 10 | 'http://www.vlfeat.org/matconvnet/models/imagenet-googlenet-dag.mat', ... 11 | modelPath) ; 12 | end 13 | 14 | net = dagnn.DagNN.loadobj(load(modelPath)) ; 15 | 16 | im = imread('peppers.png') ; 17 | im_ = single(im) ; % note: 255 range 18 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 19 | im_ = im_ - net.meta.normalization.averageImage ; 20 | net.eval({'data', im_}) ; 21 | 22 | % show the classification result 23 | scores = squeeze(gather(net.vars(end).value)) ; 24 | [bestScore, best] = max(scores) ; 25 | figure(1) ; clf ; imagesc(im) ; 26 | title(sprintf('%s (%d), score %.3f',... 27 | net.meta.classes.description{best}, best, bestScore)) ; 28 | -------------------------------------------------------------------------------- /examples/imagenet/cnn_imagenet_minimal.m: -------------------------------------------------------------------------------- 1 | function cnn_imagenet_minimal() 2 | % CNN_IMAGENET_MINIMAL Minimalistic demonstration of how to run an ImageNet CNN model 3 | 4 | % Setup MatConvNet. 5 | run(fullfile(fileparts(mfilename('fullpath')), ... 6 | '..', '..', 'matlab', 'vl_setupnn.m')) ; 7 | 8 | % Download a pre-trained CNN from the web. 9 | if ~exist('imagenet-vgg-f.mat', 'file') 10 | fprintf('Downloading the VGG-F model ... this may take a while\n') ; 11 | urlwrite('http://www.vlfeat.org/matconvnet/models/imagenet-vgg-f.mat', ... 12 | 'imagenet-vgg-f.mat') ; 13 | end 14 | 15 | % Load the model and upgrade it to MatConvNet current version. 16 | net = load('imagenet-vgg-f.mat') ; 17 | net = vl_simplenn_tidy(net) ; 18 | 19 | % Obtain and preprocess an image. 20 | im = imread('peppers.png') ; 21 | im_ = single(im) ; % note: 255 range 22 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 23 | im_ = im_ - net.meta.normalization.averageImage ; 24 | 25 | % Run the CNN. 26 | res = vl_simplenn(net, im_) ; 27 | 28 | % Show the classification result. 29 | scores = squeeze(gather(res(end).x)) ; 30 | [bestScore, best] = max(scores) ; 31 | figure(1) ; clf ; imagesc(im) ; 32 | title(sprintf('%s (%d), score %.3f',... 33 | net.meta.classes.description{best}, best, bestScore)) ; 34 | 35 | -------------------------------------------------------------------------------- /examples/imagenet/cnn_imagenet_sync_labels.m: -------------------------------------------------------------------------------- 1 | function imdb = cnn_imagenet_sync_labels(imdb, net) 2 | % CNN_IMAGENET_SYNC_LABELS Match CNN and database labels 3 | % A CNN NET and the image database IMDB may use a different label ordering. 4 | % This function matches classes by name and reorder the labels 5 | % in IMDB to match NET. 6 | 7 | [~,perm] = ismember(imdb.classes.name, net.meta.classes.name); 8 | assert(all(perm ~= 0)); 9 | 10 | imdb.classes.description = imdb.classes.description(perm) ; 11 | imdb.classes.name = imdb.classes.name(perm) ; 12 | ok = imdb.images.label > 0 ; 13 | iperm(perm) = 1:numel(perm) ; 14 | imdb.images.label(ok) = perm(imdb.images.label(ok)) ; 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/imagenet/getImageBatch.m: -------------------------------------------------------------------------------- 1 | function data = getImageBatch(imagePaths, varargin) 2 | % GETIMAGEBATCH Load and jitter a batch of images 3 | 4 | opts.useGpu = false ; 5 | opts.prefetch = false ; 6 | opts.numThreads = 1 ; 7 | 8 | opts.imageSize = [227, 227] ; 9 | opts.cropSize = 227 / 256 ; 10 | opts.keepAspect = true ; 11 | opts.subtractAverage = [] ; 12 | 13 | opts.jitterFlip = false ; 14 | opts.jitterLocation = false ; 15 | opts.jitterAspect = 1 ; 16 | opts.jitterScale = 1 ; 17 | opts.jitterBrightness = 0 ; 18 | opts.jitterContrast = 0 ; 19 | opts.jitterSaturation = 0 ; 20 | 21 | opts = vl_argparse(opts, varargin); 22 | 23 | args{1} = {imagePaths, ... 24 | 'NumThreads', opts.numThreads, ... 25 | 'Pack', ... 26 | 'Interpolation', 'bicubic', ... 27 | 'Resize', opts.imageSize(1:2), ... 28 | 'CropSize', opts.cropSize * opts.jitterScale, ... 29 | 'CropAnisotropy', opts.jitterAspect, ... 30 | 'Brightness', opts.jitterBrightness, ... 31 | 'Contrast', opts.jitterContrast, ... 32 | 'Saturation', opts.jitterSaturation} ; 33 | 34 | if ~opts.keepAspect 35 | % Squashign effect 36 | args{end+1} = {'CropAnisotropy', 0} ; 37 | end 38 | 39 | if opts.jitterFlip 40 | args{end+1} = {'Flip'} ; 41 | end 42 | 43 | if opts.jitterLocation 44 | args{end+1} = {'CropLocation', 'random'} ; 45 | else 46 | args{end+1} = {'CropLocation', 'center'} ; 47 | end 48 | 49 | if opts.useGpu 50 | args{end+1} = {'Gpu'} ; 51 | end 52 | 53 | if ~isempty(opts.subtractAverage) 54 | args{end+1} = {'SubtractAverage', opts.subtractAverage} ; 55 | end 56 | 57 | args = horzcat(args{:}) ; 58 | 59 | if opts.prefetch 60 | vl_imreadjpeg(args{:}, 'prefetch') ; 61 | data = [] ; 62 | else 63 | data = vl_imreadjpeg(args{:}) ; 64 | data = data{1} ; 65 | end 66 | -------------------------------------------------------------------------------- /examples/imagenet/getImageStats.m: -------------------------------------------------------------------------------- 1 | function [averageImage, rgbMean, rgbCovariance] = getImageStats(images, varargin) 2 | %GETIMAGESTATS Get image statistics 3 | 4 | opts.gpus = [] ; 5 | opts.batchSize = 256 ; 6 | opts.imageSize = [256 256] ; 7 | opts.numThreads = 6 ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | avg = {} ; 11 | rgbm1 = {} ; 12 | rgbm2 = {} ; 13 | 14 | numGpus = numel(opts.gpus) ; 15 | if numGpus > 0 16 | fprintf('%s: resetting GPU device\n', mfilename) ; 17 | clear mex ; 18 | gpuDevice(opts.gpus(1)) 19 | end 20 | 21 | for t=1:opts.batchSize:numel(images) 22 | time = tic ; 23 | batch = t : min(t+opts.batchSize-1, numel(images)) ; 24 | fprintf('collecting image stats: batch starting with image %d ...', batch(1)) ; 25 | 26 | data = getImageBatch(images(batch), ... 27 | 'numThreads', opts.numThreads, ... 28 | 'imageSize', opts.imageSize, ... 29 | 'useGpu', numGpus > 0) ; 30 | 31 | z = reshape(shiftdim(data,2),3,[]) ; 32 | rgbm1{end+1} = mean(z,2) ; 33 | rgbm2{end+1} = z*z'/size(z,2) ; 34 | avg{end+1} = mean(data, 4) ; 35 | time = toc(time) ; 36 | fprintf(' %.1f Hz\n', numel(batch) / time) ; 37 | end 38 | 39 | averageImage = gather(mean(cat(4,avg{:}),4)) ; 40 | rgbm1 = gather(mean(cat(2,rgbm1{:}),2)) ; 41 | rgbm2 = gather(mean(cat(3,rgbm2{:}),3)) ; 42 | rgbMean = rgbm1 ; 43 | rgbCovariance = rgbm2 - rgbm1*rgbm1' ; 44 | 45 | if numGpus > 0 46 | fprintf('%s: finished with GPU device, resetting again\n', mfilename) ; 47 | clear mex ; 48 | gpuDevice(opts.gpus(1)) ; 49 | end 50 | fprintf('%s: all done\n', mfilename) ; 51 | -------------------------------------------------------------------------------- /examples/mnist/scn_mnist_experiments.m: -------------------------------------------------------------------------------- 1 | 2 | [net_bn, info_bn] = sparseNet_mnist('expDir', 'data/mnist-scn', 'gpus', [1,2,3], 'batchSize', 128, 'numSlice', 3); -------------------------------------------------------------------------------- /examples/spatial_transformer/readme.txt: -------------------------------------------------------------------------------- 1 | Example scripts to train a spatial transformer network [1] 2 | for cluttered MNIST dataset. 3 | 4 | Demonstrates how to initialize and train the network. 5 | 6 | References: 7 | ----------- 8 | 1. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman 9 | Spatial transformer networks 10 | Advances in Neural Information Processing Systems, 2015 11 | -------------------------------------------------------------------------------- /examples/stl10/cnn_stl10_experiments.m: -------------------------------------------------------------------------------- 1 | %% Experiment with the cnn_cifar10_fc_bnorm 2 | [net_bn, info_bn] = sparseNet_stl10('expDir', 'data/stl10-scn', 'gpus', [1,2,3], 'batchSize', 16); 3 | 4 | -------------------------------------------------------------------------------- /examples/vggfaces/cnn_vgg_faces.m: -------------------------------------------------------------------------------- 1 | function cnn_vgg_faces() 2 | %CNN_VGG_FACES Demonstrates how to use VGG-Face 3 | 4 | % Setup MatConvNet. 5 | run(fullfile(fileparts(mfilename('fullpath')), ... 6 | '..', '..', 'matlab', 'vl_setupnn.m')) ; 7 | 8 | % Load the VGG-Face model. 9 | modelPath = fullfile(vl_rootnn,'data','models','vgg-face.mat') ; 10 | if ~exist(modelPath) 11 | fprintf('Downloading the VGG-Face model ... this may take a while\n') ; 12 | mkdir(fileparts(modelPath)) ; 13 | urlwrite(... 14 | 'http://www.vlfeat.org/matconvnet/models/vgg-face.mat', ... 15 | modelPath) ; 16 | end 17 | 18 | % Load the model and upgrade it to MatConvNet current version. 19 | net = load('data/models/vgg-face.mat') ; 20 | net = vl_simplenn_tidy(net) ; 21 | 22 | % Load a test image from Wikipedia and run the model. 23 | im = imread('https://upload.wikimedia.org/wikipedia/commons/4/4a/Aamir_Khan_March_2015.jpg') ; 24 | im = im(1:250,:,:) ; % crop 25 | im_ = single(im) ; % note: 255 range 26 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 27 | im_ = bsxfun(@minus,im_,net.meta.normalization.averageImage) ; 28 | res = vl_simplenn(net, im_) ; 29 | 30 | % Show the classification result. 31 | scores = squeeze(gather(res(end).x)) ; 32 | [bestScore, best] = max(scores) ; 33 | figure(1) ; clf ; imagesc(im) ; axis equal off ; 34 | title(sprintf('%s (%d), score %.3f',... 35 | net.meta.classes.description{best}, best, bestScore), ... 36 | 'Interpreter', 'none') ; 37 | -------------------------------------------------------------------------------- /matconvnet.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 14 4 | VisualStudioVersion = 14.0.24720.0 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "matconvnet", "matconvnet.vcxproj", "{B0BD9132-1D90-4267-A07A-B44DE497A9C7}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|Win32 = Debug|Win32 11 | Debug|x64 = Debug|x64 12 | Release|Win32 = Release|Win32 13 | Release|x64 = Release|x64 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.ActiveCfg = Debug|Win32 17 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.Build.0 = Debug|Win32 18 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|x64.ActiveCfg = Debug|Win32 19 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.ActiveCfg = Release|Win32 20 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.Build.0 = Release|Win32 21 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|x64.ActiveCfg = Release|Win32 22 | EndGlobalSection 23 | GlobalSection(SolutionProperties) = preSolution 24 | HideSolutionNode = FALSE 25 | EndGlobalSection 26 | EndGlobal 27 | -------------------------------------------------------------------------------- /matconvnet.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/addLayer.m: -------------------------------------------------------------------------------- 1 | function addLayer(obj, name, block, inputs, outputs, params, varargin) 2 | %ADDLAYER Adds a layer to a DagNN 3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the 4 | % specified layer to the network. NAME is a string with the layer 5 | % name, used as a unique indentifier. BLOCK is the object 6 | % implementing the layer, which should be a subclass of the 7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and 8 | % PARAMS of parameter names. 9 | % 10 | % See Also REMOVELAYER(). 11 | opts.skipRebuild = false; 12 | opts = vl_argparse(opts, varargin); 13 | 14 | index = find(strcmp(name, {obj.layers.name})) ; 15 | if ~isempty(index), error('There is already a layer with name ''%s''.', name), end 16 | index = numel(obj.layers) + 1 ; 17 | 18 | if nargin < 6, params = {} ; end 19 | if ischar(inputs), inputs = {inputs} ; end 20 | if ischar(outputs), outputs = {outputs} ; end 21 | if ischar(params), params = {params} ; end 22 | 23 | obj.layers(index) = struct(... 24 | 'name', {name}, ... 25 | 'inputs', {inputs}, ... 26 | 'outputs', {outputs}, ... 27 | 'params', {params}, ... 28 | 'inputIndexes', {[]}, ... 29 | 'outputIndexes', {[]}, ... 30 | 'paramIndexes', {[]}, ... 31 | 'forwardTime', {[]}, ... 32 | 'backwardTime', {[]}, ... 33 | 'block', {block}) ; 34 | obj.layers(index).block.attach(obj, index) ; 35 | if ~opts.skipRebuild, obj.rebuild() ; end ; 36 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/getVarSizes.m: -------------------------------------------------------------------------------- 1 | function sizes = getVarSizes(obj, inputSizes) 2 | %GETVARSIZES Get the size of the variables 3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the 4 | % DagNN variables given the size of the inputs. `inputSizes` is 5 | % a cell array of the type `{'inputName', inputSize, ...}` 6 | % Returns a cell array with sizes of all network variables. 7 | % 8 | % Example, compute the storage needed for a batch size of 256 for an 9 | % imagenet-like network: 10 | % ``` 11 | % batch_size = 256; single_num_bytes = 4; 12 | % input_size = [net.meta.normalization.imageSize, batch_size]; 13 | % var_sizes = net.getVarSizes({'data', input_size}); 14 | % fprintf('Network activations will take %.2fMiB in single.\n', ... 15 | % sum(prod(cell2mat(var_sizes, 1))) * single_num_bytes ./ 1024^3); 16 | % ``` 17 | 18 | % Copyright (C) 2015 Andrea Vedaldi, Karel Lenc. 19 | % All rights reserved. 20 | % 21 | % This file is part of the VLFeat library and is made available under 22 | % the terms of the BSD license (see the COPYING file). 23 | 24 | nv = numel(obj.vars) ; 25 | sizes = num2cell(NaN(nv, 4),2)' ; 26 | 27 | for i = 1:2:numel(inputSizes) 28 | v = obj.getVarIndex(inputSizes{i}) ; 29 | if isnan(v) 30 | error('Variable `%s` not found in the network.', inputSizes{i}); 31 | end; 32 | sizes{v} = [inputSizes{i+1}(:)' ones(1, 4 - numel(inputSizes{i+1}))] ; 33 | end 34 | 35 | for layer = obj.layers(obj.executionOrder) 36 | in = layer.inputIndexes ; 37 | out = layer.outputIndexes ; 38 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ; 39 | end 40 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/initParams.m: -------------------------------------------------------------------------------- 1 | function initParams(obj) 2 | % INITPARAM Initialize the paramers of the DagNN 3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize 4 | % the corresponding parameters (usually randomly). 5 | 6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 7 | % All rights reserved. 8 | % 9 | % This file is part of the VLFeat library and is made available under 10 | % the terms of the BSD license (see the COPYING file). 11 | 12 | for l = 1:numel(obj.layers) 13 | p = obj.getParamIndex(obj.layers(l).params) ; 14 | params = obj.layers(l).block.initParams() ; 15 | switch obj.device 16 | case 'cpu' 17 | params = cellfun(@gather, params, 'UniformOutput', false) ; 18 | case 'gpu' 19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ; 20 | end 21 | [obj.params(p).value] = deal(params{:}) ; 22 | end 23 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/loadobj.m: -------------------------------------------------------------------------------- 1 | function obj = loadobj(s) 2 | % LOADOBJ Initialize a DagNN object from a structure. 3 | % OBJ = LOADOBJ(S) initializes a DagNN objet from the structure 4 | % S. It is the opposite of S = OBJ.SAVEOBJ(). 5 | % If S is a string, initializes the DagNN object with data 6 | % from a mat-file S. Otherwise, if S is an instance of `dagnn.DagNN`, 7 | % returns S. 8 | 9 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 10 | % All rights reserved. 11 | % 12 | % This file is part of the VLFeat library and is made available under 13 | % the terms of the BSD license (see the COPYING file). 14 | 15 | if ischar(s) s = load(s); end 16 | if isstruct(s) 17 | assert(isfield(s, 'layers'), 'Invalid model.'); 18 | if ~isstruct(s.layers) 19 | warning('The model appears to be `simplenn` model. Using `fromSimpleNN` instead.'); 20 | obj = dagnn.DagNN.fromSimpleNN(s); 21 | return; 22 | end 23 | obj = dagnn.DagNN() ; 24 | try 25 | for l = 1:numel(s.layers) 26 | constr = str2func(s.layers(l).type) ; 27 | block = constr() ; 28 | block.load(struct(s.layers(l).block)) ; 29 | obj.addLayer(... 30 | s.layers(l).name, ... 31 | block, ... 32 | s.layers(l).inputs, ... 33 | s.layers(l).outputs, ... 34 | s.layers(l).params,... 35 | 'skipRebuild', true) ; 36 | end 37 | catch e % Make sure the DagNN object is in valid state 38 | obj.rebuild(); 39 | rethrow(e); 40 | end 41 | obj.rebuild(); 42 | if isfield(s, 'params') 43 | for f = setdiff(fieldnames(s.params)','name') 44 | f = char(f) ; 45 | for i = 1:numel(s.params) 46 | p = obj.getParamIndex(s.params(i).name) ; 47 | obj.params(p).(f) = s.params(i).(f) ; 48 | end 49 | end 50 | end 51 | if isfield(s, 'vars') 52 | for f = setdiff(fieldnames(s.vars)','name') 53 | f = char(f) ; 54 | for i = 1:numel(s.vars) 55 | p = obj.getVarIndex(s.vars(i).name) ; 56 | obj.vars(p).(f) = s.vars(i).(f) ; 57 | end 58 | end 59 | end 60 | for f = setdiff(fieldnames(s)', {'vars','params','layers'}) 61 | f = char(f) ; 62 | obj.(f) = s.(f) ; 63 | end 64 | elseif isa(s, 'dagnn.DagNN') 65 | obj = s ; 66 | else 67 | error('Unknown data type %s for `loadobj`.', class(s)); 68 | end 69 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/move.m: -------------------------------------------------------------------------------- 1 | function move(obj, device) 2 | %MOVE Move the DagNN to either CPU or GPU 3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU. 4 | % 5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU. 6 | 7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 8 | % All rights reserved. 9 | % 10 | % This file is part of the VLFeat library and is made available under 11 | % the terms of the BSD license (see the COPYING file). 12 | 13 | obj.reset() ; 14 | obj.device = device ; 15 | switch device 16 | case 'gpu' 17 | for i=1:numel(obj.params) 18 | obj.params(i).value = gpuArray(obj.params(i).value) ; 19 | end 20 | case 'cpu' 21 | for i=1:numel(obj.params) 22 | obj.params(i).value = gather(obj.params(i).value) ; 23 | end 24 | otherwise 25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ; 26 | end 27 | for l = 1:numel(obj.layers) 28 | obj.layers(l).block.move(device) ; 29 | end 30 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/removeLayer.m: -------------------------------------------------------------------------------- 1 | function removeLayer(obj, layerName) 2 | %REMOVELAYER Remove a layer from the network 3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object 4 | % OBJ. NAME can be a string or a cell array of strings. 5 | 6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 7 | % All rights reserved. 8 | % 9 | % This file is part of the VLFeat library and is made available under 10 | % the terms of the BSD license (see the COPYING file). 11 | 12 | if ischar(layerName), layerName = {layerName}; end; 13 | idxs = obj.getLayerIndex(layerName); 14 | if any(isnan(idxs)) 15 | error('Invalid layer name `%s`', ... 16 | strjoin(layerName(isnan(idxs)), ', ')); 17 | end 18 | obj.layers(idxs) = [] ; 19 | obj.rebuild() ; 20 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/renameLayer.m: -------------------------------------------------------------------------------- 1 | function renameLayer(obj, oldName, newName, varargin) 2 | %RENAMELAYER Rename a layer 3 | % RENAMELAYER(OLDNAME, NEWNAME) changes the name of the layer 4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an 5 | % existing layer. 6 | 7 | opts.quiet = false ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | % Find the layer to rename 11 | v = obj.getLayerIndex(oldName) ; 12 | if isnan(v) 13 | % There is no such layer, nothing to do 14 | if ~opts.quiet 15 | warning('There is no layer ''%s''.', oldName) ; 16 | end 17 | return ; 18 | end 19 | 20 | % Check if newName is an existing layer 21 | newNameExists = any(strcmp(newName, {obj.layers.name})) ; 22 | if newNameExists 23 | error('There is already a layer ''%s''.', newName) ; 24 | end 25 | 26 | % Replace oldName with newName in all the layers 27 | obj.layers(v).name = newName ; 28 | obj.rebuild() ; 29 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/renameVar.m: -------------------------------------------------------------------------------- 1 | function renameVar(obj, oldName, newName, varargin) 2 | %RENAMEVAR Rename a variable 3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable 4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an 5 | % existing variable. 6 | 7 | opts.quiet = false ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | % Find the variable to rename 11 | v = obj.getVarIndex(oldName) ; 12 | if isnan(v) 13 | % There is no such a variable, nothing to do 14 | if ~opts.quiet 15 | warning('There is no variable ''%s''.', oldName) ; 16 | end 17 | return ; 18 | end 19 | 20 | % Check if newName is an existing variable 21 | newNameExists = any(strcmp(newName, {obj.vars.name})) ; 22 | 23 | % Replace oldName with newName in all the layers 24 | for l = 1:numel(obj.layers) 25 | for f = {'inputs', 'outputs'} 26 | f = char(f) ; 27 | sel = find(strcmp(oldName, obj.layers(l).(f))) ; 28 | [obj.layers(l).(f){sel}] = deal(newName) ; 29 | end 30 | end 31 | 32 | % If newVariable is a variable in the graph, then there is not 33 | % anything else to do. obj.rebuild() will remove the slot 34 | % in obj.vars() for oldName as that variable becomes unused. 35 | % 36 | % If, however, newVariable is not in the graph already, then 37 | % the slot in obj.vars() is preserved and only the variable name 38 | % is changed. 39 | 40 | if ~newNameExists 41 | obj.vars(v).name = newName ; 42 | % update variable name hash otherwise rebuild() won't find this var 43 | % corectly 44 | obj.varNames.(newName) = v ; 45 | end 46 | 47 | obj.rebuild() ; 48 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/reset.m: -------------------------------------------------------------------------------- 1 | function reset(obj) 2 | %RESET Reset the DagNN 3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN 4 | % object, including parameter gradients. It also calls the reset 5 | % function of every layer. 6 | 7 | obj.clearParameterServer() ; 8 | [obj.vars.value] = deal([]) ; 9 | [obj.vars.der] = deal([]) ; 10 | [obj.params.der] = deal([]) ; 11 | for l = 1:numel(obj.layers) 12 | obj.layers(l).block.reset() ; 13 | end 14 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/saveobj.m: -------------------------------------------------------------------------------- 1 | function s = saveobj(obj) 2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure 3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB 4 | % structure S. This is particularly convenient to preserve future 5 | % compatibility and to ship networks that are pure structures, 6 | % instead of embedding dependencies to code. 7 | % 8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`. 9 | % 10 | % As a side-effect the network is being reset (all variables are cleared) 11 | % and is transfered to CPU. 12 | % 13 | % See Also: dagnn.DagNN.loadobj, dagnn.DagNN.reset 14 | 15 | % Copyright (C) 2015-2016 Karel Lenc and Andrea Vedaldi. 16 | % All rights reserved. 17 | % 18 | % This file is part of the VLFeat library and is made available under 19 | % the terms of the BSD license (see the COPYING file). 20 | 21 | device = obj.device ; 22 | obj.move('cpu') ; 23 | s.vars = struct(... 24 | 'name', {obj.vars.name}, ... 25 | 'precious', {obj.vars.precious}) ; 26 | s.params = struct(... 27 | 'name', {obj.params.name}, ... 28 | 'value', {obj.params.value}, ... 29 | 'learningRate', {obj.params.learningRate}, ... 30 | 'weightDecay', {obj.params.weightDecay}) ; 31 | s.layers = struct(... 32 | 'name', {obj.layers.name}, ... 33 | 'type', {[]}, ... 34 | 'inputs', {obj.layers.inputs}, ... 35 | 'outputs', {obj.layers.outputs}, ... 36 | 'params', {obj.layers.params}, ... 37 | 'block', {[]}) ; 38 | s.meta = obj.meta ; 39 | 40 | for l = 1:numel(obj.layers) 41 | block = obj.layers(l).block ; 42 | slayer = block.save() ; 43 | s.layers(l).type = class(block) ; 44 | s.layers(l).block = slayer ; 45 | end 46 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/setLayerInputs.m: -------------------------------------------------------------------------------- 1 | function v = setLayerInputs(obj, layer, inputs) 2 | %SETLAYERINPUTS Set or change the inputs to a layer 3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for input = inputs 8 | v(end+1) = obj.addVar(char(input)) ; 9 | end 10 | obj.layers(l).inputs = inputs ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/setLayerOutputs.m: -------------------------------------------------------------------------------- 1 | function v = setLayerOutputs(obj, layer, outputs) 2 | %SETLAYEROUTPUTS Set or change the outputs of a layer 3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for output = outputs 8 | v(end+1) = obj.addVar(char(output)) ; 9 | end 10 | obj.layers(l).outputs = outputs ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /matlab/+dagnn/@DagNN/setLayerParams.m: -------------------------------------------------------------------------------- 1 | function v = setLayerParams(obj, layer, params) 2 | %SETLAYEPARAMS Set or change the parameters of a layer 3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for param = params 8 | v(end+1) = obj.addParam(char(param)) ; 9 | end 10 | obj.layers(l).params = params ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /matlab/+dagnn/BatchNorm.m: -------------------------------------------------------------------------------- 1 | classdef BatchNorm < dagnn.ElementWise 2 | properties 3 | numChannels 4 | epsilon = 1e-5 5 | opts = {'NoCuDNN'} % ours seems slightly faster 6 | end 7 | 8 | properties (Transient) 9 | moments 10 | end 11 | 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | if strcmp(obj.net.mode, 'test') 15 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}, ... 16 | 'moments', params{3}, ... 17 | 'epsilon', obj.epsilon, ... 18 | obj.opts{:}) ; 19 | else 20 | [outputs{1},obj.moments] = ... 21 | vl_nnbnorm(inputs{1}, params{1}, params{2}, ... 22 | 'epsilon', obj.epsilon, ... 23 | obj.opts{:}) ; 24 | end 25 | end 26 | 27 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 28 | [derInputs{1}, derParams{1}, derParams{2}, derParams{3}] = ... 29 | vl_nnbnorm(inputs{1}, params{1}, params{2}, derOutputs{1}, ... 30 | 'epsilon', obj.epsilon, ... 31 | 'moments', obj.moments, ... 32 | obj.opts{:}) ; 33 | obj.moments = [] ; 34 | % multiply the moments update by the number of images in the batch 35 | % this is required to make the update additive for subbatches 36 | % and will eventually be normalized away 37 | derParams{3} = derParams{3} * size(inputs{1},4) ; 38 | end 39 | 40 | % --------------------------------------------------------------------- 41 | function obj = BatchNorm(varargin) 42 | obj.load(varargin{:}) ; 43 | end 44 | 45 | function params = initParams(obj) 46 | params{1} = ones(obj.numChannels,1,'single') ; 47 | params{2} = zeros(obj.numChannels,1,'single') ; 48 | params{3} = zeros(obj.numChannels,2,'single') ; 49 | end 50 | 51 | function attach(obj, net, index) 52 | attach@dagnn.ElementWise(obj, net, index) ; 53 | p = net.getParamIndex(net.layers(index).params{3}) ; 54 | net.params(p).trainMethod = 'average' ; 55 | net.params(p).learningRate = 0.1 ; 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /matlab/+dagnn/BilinearSampler.m: -------------------------------------------------------------------------------- 1 | % Wrapper for BilinearSampler block: 2 | % (c) 2016 Ankush Gupta 3 | 4 | classdef BilinearSampler < dagnn.Layer 5 | methods 6 | function outputs = forward(obj, inputs, params) 7 | outputs = vl_nnbilinearsampler(inputs{1}, inputs{2}); 8 | outputs = {outputs}; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | [dX,dG] = vl_nnbilinearsampler(inputs{1}, inputs{2}, derOutputs{1}); 13 | derInputs = {dX,dG}; 14 | derParams = {}; 15 | end 16 | 17 | function outputSizes = getOutputSizes(obj, inputSizes) 18 | xSize = inputSizes{1}; 19 | gSize = inputSizes{2}; 20 | outputSizes = {[gSize(2), gSize(3), xSize(3), xSize(4)]}; 21 | end 22 | 23 | function obj = BilinearSampler(varargin) 24 | obj.load(varargin); 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /matlab/+dagnn/Concat.m: -------------------------------------------------------------------------------- 1 | classdef Concat < dagnn.ElementWise 2 | properties 3 | dim = 3 4 | end 5 | 6 | properties (Transient) 7 | inputSizes = {} 8 | end 9 | 10 | methods 11 | function outputs = forward(obj, inputs, params) 12 | outputs{1} = vl_nnconcat(inputs, obj.dim) ; 13 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ; 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | derInputs = vl_nnconcat(inputs, obj.dim, derOutputs{1}, 'inputSizes', obj.inputSizes) ; 18 | derParams = {} ; 19 | end 20 | 21 | function reset(obj) 22 | obj.inputSizes = {} ; 23 | end 24 | 25 | function outputSizes = getOutputSizes(obj, inputSizes) 26 | sz = inputSizes{1} ; 27 | for k = 2:numel(inputSizes) 28 | sz(obj.dim) = sz(obj.dim) + inputSizes{k}(obj.dim) ; 29 | end 30 | outputSizes{1} = sz ; 31 | end 32 | 33 | function rfs = getReceptiveFields(obj) 34 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ; 35 | if obj.dim == 3 || obj.dim == 4 36 | rfs = getReceptiveFields@dagnn.ElementWise(obj) ; 37 | rfs = repmat(rfs, numInputs, 1) ; 38 | else 39 | for i = 1:numInputs 40 | rfs(i,1).size = [NaN NaN] ; 41 | rfs(i,1).stride = [NaN NaN] ; 42 | rfs(i,1).offset = [NaN NaN] ; 43 | end 44 | end 45 | end 46 | 47 | function load(obj, varargin) 48 | s = dagnn.Layer.argsToStruct(varargin{:}) ; 49 | % backward file compatibility 50 | if isfield(s, 'numInputs'), s = rmfield(s, 'numInputs') ; end 51 | load@dagnn.Layer(obj, s) ; 52 | end 53 | 54 | function obj = Concat(varargin) 55 | obj.load(varargin{:}) ; 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /matlab/+dagnn/Conv.m: -------------------------------------------------------------------------------- 1 | classdef Conv < dagnn.Filter 2 | properties 3 | size = [0 0 0 0] 4 | hasBias = true 5 | opts = {'cuDNN'} 6 | end 7 | 8 | methods 9 | function outputs = forward(obj, inputs, params) 10 | if ~obj.hasBias, params{2} = [] ; end 11 | outputs{1} = vl_nnconv(... 12 | inputs{1}, params{1}, params{2}, ... 13 | 'pad', obj.pad, ... 14 | 'stride', obj.stride, ... 15 | 'dilate', obj.dilate, ... 16 | obj.opts{:}) ; 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | if ~obj.hasBias, params{2} = [] ; end 21 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconv(... 22 | inputs{1}, params{1}, params{2}, derOutputs{1}, ... 23 | 'pad', obj.pad, ... 24 | 'stride', obj.stride, ... 25 | 'dilate', obj.dilate, ... 26 | obj.opts{:}) ; 27 | end 28 | 29 | function kernelSize = getKernelSize(obj) 30 | kernelSize = obj.size(1:2) ; 31 | end 32 | 33 | function outputSizes = getOutputSizes(obj, inputSizes) 34 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ; 35 | outputSizes{1}(3) = obj.size(4) ; 36 | end 37 | 38 | function params = initParams(obj) 39 | % Xavier improved 40 | sc = sqrt(2 / prod(obj.size(1:3))) ; 41 | %sc = sqrt(2 / prod(obj.size([1 2 4]))) ; 42 | params{1} = randn(obj.size,'single') * sc ; 43 | if obj.hasBias 44 | params{2} = zeros(obj.size(4),1,'single') ; 45 | end 46 | end 47 | 48 | function set.size(obj, ksize) 49 | % make sure that ksize has 4 dimensions 50 | ksize = [ksize(:)' 1 1 1 1] ; 51 | obj.size = ksize(1:4) ; 52 | end 53 | 54 | function obj = Conv(varargin) 55 | obj.load(varargin) ; 56 | % normalize field by implicitly calling setters defined in 57 | % dagnn.Filter and here 58 | obj.size = obj.size ; 59 | obj.stride = obj.stride ; 60 | obj.pad = obj.pad ; 61 | end 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /matlab/+dagnn/Crop.m: -------------------------------------------------------------------------------- 1 | classdef Crop < dagnn.ElementWise 2 | %CROP DagNN cropping layer. 3 | % This is a pecurial layer from FCN. It crops inputs{1} to 4 | % match the size of inputs{2} (starting with a base crop amount). 5 | % A future version 6 | 7 | properties 8 | crop = [0 0] 9 | end 10 | 11 | properties (Transient) 12 | inputSizes = {} 13 | end 14 | 15 | methods 16 | function crop = getAdaptedCrops(obj) 17 | cropv = obj.inputSizes{1}(1) - obj.inputSizes{2}(1) ; 18 | cropu = obj.inputSizes{1}(2) - obj.inputSizes{2}(2) ; 19 | cropv1 = max(0, cropv - obj.crop(1)) ; 20 | cropu1 = max(0, cropu - obj.crop(2)) ; 21 | crop = [cropv - cropv1, cropv1, cropu - cropu1, cropu1] ; 22 | end 23 | 24 | function outputs = forward(obj, inputs, params) 25 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ; 26 | adjCrop = obj.getAdaptedCrops() ; 27 | outputs{1} = vl_nncrop(inputs{1}, adjCrop) ; 28 | end 29 | 30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 31 | adjCrop = obj.getAdaptedCrops() ; 32 | derInputs{1} = vl_nncrop(inputs{1}, adjCrop, derOutputs{1}, obj.inputSizes{1}) ; 33 | derInputs{2} = [] ; 34 | derParams = {} ; 35 | end 36 | 37 | function reset(obj) 38 | obj.inputSizes = {} ; 39 | end 40 | 41 | function outputSizes = getOutputSizes(obj, inputSizes) 42 | obj.inputSizes = inputSizes ; 43 | crop = obj.getAdaptedCrops() ; 44 | outputSizes{1} = inputSizes{1} - [crop(1)+crop(2), crop(3)+crop(4), 0, 0] ; 45 | end 46 | 47 | function rfs = getReceptiveFields(obj) 48 | rfs(1,1).size = [1 1] ; 49 | rfs(1,1).stride = [1 1] ; 50 | rfs(1,1).offset = 1 + obj.crop ; 51 | rfs(2,1).size = [] ; 52 | rfs(2,1).stride = [] ; 53 | rfs(2,1).offset = [] ; 54 | end 55 | 56 | function obj = Crop(varargin) 57 | obj.load(varargin) ; 58 | end 59 | end 60 | end 61 | -------------------------------------------------------------------------------- /matlab/+dagnn/DropOut.m: -------------------------------------------------------------------------------- 1 | classdef DropOut < dagnn.ElementWise 2 | properties 3 | rate = 0.5 4 | frozen = false 5 | end 6 | 7 | properties (Transient) 8 | mask 9 | end 10 | 11 | methods 12 | function outputs = forward(obj, inputs, params) 13 | if strcmp(obj.net.mode, 'test') 14 | outputs = inputs ; 15 | return ; 16 | end 17 | if obj.frozen & ~isempty(obj.mask) 18 | outputs{1} = vl_nndropout(inputs{1}, 'mask', obj.mask) ; 19 | else 20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ; 21 | end 22 | end 23 | 24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 25 | if strcmp(obj.net.mode, 'test') 26 | derInputs = derOutputs ; 27 | derParams = {} ; 28 | return ; 29 | end 30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ; 31 | derParams = {} ; 32 | end 33 | 34 | % --------------------------------------------------------------------- 35 | function obj = DropOut(varargin) 36 | obj.load(varargin{:}) ; 37 | end 38 | 39 | function obj = reset(obj) 40 | reset@dagnn.ElementWise(obj) ; 41 | obj.mask = [] ; 42 | obj.frozen = false ; 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /matlab/+dagnn/ElementWise.m: -------------------------------------------------------------------------------- 1 | classdef ElementWise < dagnn.Layer 2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations 3 | methods 4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes) 5 | outputSizes = inputSizes ; 6 | transforms = {eye(6)} ; 7 | end 8 | 9 | function rfs = getReceptiveFields(obj) 10 | rfs.size = [1 1] ; 11 | rfs.stride = [1 1] ; 12 | rfs.offset = [1 1] ; 13 | end 14 | 15 | function outputSizes = getOutputSizes(obj, inputSizes) 16 | outputSizes = inputSizes ; 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /matlab/+dagnn/Filter.m: -------------------------------------------------------------------------------- 1 | classdef Filter < dagnn.Layer 2 | properties 3 | pad = [0 0 0 0] 4 | stride = [1 1] 5 | dilate = [1 1] 6 | end 7 | methods 8 | function set.pad(obj, pad) 9 | if numel(pad) == 1 10 | obj.pad = [pad pad pad pad] ; 11 | elseif numel(pad) == 2 12 | obj.pad = pad([1 1 2 2]) ; 13 | else 14 | obj.pad = pad ; 15 | end 16 | end 17 | 18 | function set.stride(obj, stride) 19 | if numel(stride) == 1 20 | obj.stride = [stride stride] ; 21 | else 22 | obj.stride = stride ; 23 | end 24 | end 25 | 26 | function set.dilate(obj, dilate) 27 | if numel(dilate) == 1 28 | obj.dilate = [dilate dilate] ; 29 | else 30 | obj.dilate = dilate ; 31 | end 32 | end 33 | 34 | function kernelSize = getKernelSize(obj) 35 | kernelSize = [1 1] ; 36 | end 37 | 38 | function outputSizes = getOutputSizes(obj, inputSizes) 39 | ks = obj.getKernelSize() ; 40 | ke = (ks - 1) .* obj.dilate + 1 ; 41 | outputSizes{1} = [... 42 | fix((inputSizes{1}(1) + obj.pad(1) + obj.pad(2) - ke(1)) / obj.stride(1)) + 1, ... 43 | fix((inputSizes{1}(2) + obj.pad(3) + obj.pad(4) - ke(2)) / obj.stride(2)) + 1, ... 44 | 1, ... 45 | inputSizes{1}(4)] ; 46 | end 47 | 48 | function rfs = getReceptiveFields(obj) 49 | ks = obj.getKernelSize() ; 50 | ke = (ks - 1) .* obj.dilate + 1 ; 51 | y1 = 1 - obj.pad(1) ; 52 | y2 = 1 - obj.pad(1) + ke(1) - 1 ; 53 | x1 = 1 - obj.pad(3) ; 54 | x2 = 1 - obj.pad(3) + ke(2) - 1 ; 55 | h = y2 - y1 + 1 ; 56 | w = x2 - x1 + 1 ; 57 | rfs.size = [h, w] ; 58 | rfs.stride = obj.stride ; 59 | rfs.offset = [y1+y2, x1+x2]/2 ; 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /matlab/+dagnn/LRN.m: -------------------------------------------------------------------------------- 1 | classdef LRN < dagnn.ElementWise 2 | properties 3 | param = [5 1 0.0001/5 0.75] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = LRN(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /matlab/+dagnn/Loss.m: -------------------------------------------------------------------------------- 1 | classdef Loss < dagnn.ElementWise 2 | properties 3 | loss = 'softmaxlog' 4 | opts = {} 5 | end 6 | 7 | properties (Transient) 8 | average = 0 9 | numAveraged = 0 10 | end 11 | 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss, obj.opts{:}) ; 15 | n = obj.numAveraged ; 16 | m = n + size(inputs{1},4) ; 17 | obj.average = (n * obj.average + gather(outputs{1})) / m ; 18 | obj.numAveraged = m ; 19 | end 20 | 21 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 22 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss, obj.opts{:}) ; 23 | derInputs{2} = [] ; 24 | derParams = {} ; 25 | end 26 | 27 | function reset(obj) 28 | obj.average = 0 ; 29 | obj.numAveraged = 0 ; 30 | end 31 | 32 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes) 33 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ; 34 | end 35 | 36 | function rfs = getReceptiveFields(obj) 37 | % the receptive field depends on the dimension of the variables 38 | % which is not known until the network is run 39 | rfs(1,1).size = [NaN NaN] ; 40 | rfs(1,1).stride = [NaN NaN] ; 41 | rfs(1,1).offset = [NaN NaN] ; 42 | rfs(2,1) = rfs(1,1) ; 43 | end 44 | 45 | function obj = Loss(varargin) 46 | obj.load(varargin) ; 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /matlab/+dagnn/NormOffset.m: -------------------------------------------------------------------------------- 1 | classdef NormOffset < dagnn.ElementWise 2 | properties 3 | param = [1 0.5] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = NormOffset(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /matlab/+dagnn/Pooling.m: -------------------------------------------------------------------------------- 1 | classdef Pooling < dagnn.Filter 2 | properties 3 | method = 'max' 4 | poolSize = [1 1] 5 | opts = {'cuDNN'} 6 | end 7 | 8 | methods 9 | function outputs = forward(self, inputs, params) 10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ... 11 | 'pad', self.pad, ... 12 | 'stride', self.stride, ... 13 | 'method', self.method, ... 14 | self.opts{:}) ; 15 | end 16 | 17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs) 18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ... 19 | 'pad', self.pad, ... 20 | 'stride', self.stride, ... 21 | 'method', self.method, ... 22 | self.opts{:}) ; 23 | derParams = {} ; 24 | end 25 | 26 | function kernelSize = getKernelSize(obj) 27 | kernelSize = obj.poolSize ; 28 | end 29 | 30 | function outputSizes = getOutputSizes(obj, inputSizes) 31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ; 32 | outputSizes{1}(3) = inputSizes{1}(3) ; 33 | end 34 | 35 | function obj = Pooling(varargin) 36 | obj.load(varargin) ; 37 | end 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /matlab/+dagnn/ReLU.m: -------------------------------------------------------------------------------- 1 | classdef ReLU < dagnn.ElementWise 2 | properties 3 | useShortCircuit = true 4 | leak = 0 5 | opts = {} 6 | end 7 | 8 | methods 9 | function outputs = forward(obj, inputs, params) 10 | outputs{1} = vl_nnrelu(inputs{1}, [], ... 11 | 'leak', obj.leak, obj.opts{:}) ; 12 | end 13 | 14 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 15 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}, ... 16 | 'leak', obj.leak, ... 17 | obj.opts{:}) ; 18 | derParams = {} ; 19 | end 20 | 21 | function forwardAdvanced(obj, layer) 22 | if ~obj.useShortCircuit || ~obj.net.conserveMemory 23 | forwardAdvanced@dagnn.Layer(obj, layer) ; 24 | return ; 25 | end 26 | net = obj.net ; 27 | in = layer.inputIndexes ; 28 | out = layer.outputIndexes ; 29 | net.vars(out).value = vl_nnrelu(net.vars(in).value, [], ... 30 | 'leak', obj.leak, ... 31 | obj.opts{:}) ; 32 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) - 1; 33 | if ~net.vars(in).precious & net.numPendingVarRefs(in) == 0 34 | net.vars(in).value = [] ; 35 | end 36 | end 37 | 38 | function backwardAdvanced(obj, layer) 39 | if ~obj.useShortCircuit || ~obj.net.conserveMemory 40 | backwardAdvanced@dagnn.Layer(obj, layer) ; 41 | return ; 42 | end 43 | net = obj.net ; 44 | in = layer.inputIndexes ; 45 | out = layer.outputIndexes ; 46 | 47 | if isempty(net.vars(out).der), return ; end 48 | 49 | derInput = vl_nnrelu(net.vars(out).value, net.vars(out).der, ... 50 | 'leak', obj.leak, obj.opts{:}) ; 51 | 52 | if ~net.vars(out).precious 53 | net.vars(out).der = [] ; 54 | net.vars(out).value = [] ; 55 | end 56 | 57 | if net.numPendingVarRefs(in) == 0 58 | net.vars(in).der = derInput ; 59 | else 60 | net.vars(in).der = net.vars(in).der + derInput ; 61 | end 62 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ; 63 | end 64 | 65 | function obj = ReLU(varargin) 66 | obj.load(varargin) ; 67 | end 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /matlab/+dagnn/Scale.m: -------------------------------------------------------------------------------- 1 | classdef Scale < dagnn.ElementWise 2 | properties 3 | size 4 | hasBias = true 5 | end 6 | 7 | methods 8 | 9 | function outputs = forward(obj, inputs, params) 10 | args = horzcat(inputs, params) ; 11 | outputs{1} = bsxfun(@times, args{1}, args{2}); 12 | if obj.hasBias 13 | outputs{1} = bsxfun(@plus, outputs{1}, args{3}) ; 14 | end 15 | end 16 | 17 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 18 | args = horzcat(inputs, params) ; 19 | sz = [size(args{2}) 1 1 1 1] ; 20 | sz = sz(1:4) ; 21 | dargs{1} = bsxfun(@times, derOutputs{1}, args{2}) ; 22 | dargs{2} = derOutputs{1} .* args{1} ; 23 | for k = find(sz == 1) 24 | dargs{2} = sum(dargs{2}, k) ; 25 | end 26 | if obj.hasBias 27 | dargs{3} = derOutputs{1} ; 28 | for k = find(sz == 1) 29 | dargs{3} = sum(dargs{3}, k) ; 30 | end 31 | end 32 | derInputs = dargs(1:numel(inputs)) ; 33 | derParams = dargs(numel(inputs)+(1:numel(params))) ; 34 | end 35 | 36 | function obj = Scale(varargin) 37 | obj.load(varargin) ; 38 | end 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /matlab/+dagnn/Sigmoid.m: -------------------------------------------------------------------------------- 1 | classdef Sigmoid < dagnn.ElementWise 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | outputs{1} = vl_nnsigmoid(inputs{1}) ; 5 | end 6 | 7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ; 9 | derParams = {} ; 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /matlab/+dagnn/SoftMax.m: -------------------------------------------------------------------------------- 1 | classdef SoftMax < dagnn.ElementWise 2 | methods 3 | function outputs = forward(self, inputs, params) 4 | outputs{1} = vl_nnsoftmax(inputs{1}) ; 5 | end 6 | 7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs) 8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ; 9 | derParams = {} ; 10 | end 11 | 12 | function obj = SoftMax(varargin) 13 | obj.load(varargin) ; 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /matlab/+dagnn/SpatialNorm.m: -------------------------------------------------------------------------------- 1 | classdef SpatialNorm < dagnn.ElementWise 2 | properties 3 | param = [2 2 10 2] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = SpatialNorm(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /matlab/+dagnn/Sum.m: -------------------------------------------------------------------------------- 1 | classdef Sum < dagnn.ElementWise 2 | %SUM DagNN sum layer 3 | % The SUM layer takes the sum of all its inputs and store the result 4 | % as its only output. 5 | 6 | properties (Transient) 7 | numInputs 8 | end 9 | 10 | methods 11 | function outputs = forward(obj, inputs, params) 12 | obj.numInputs = numel(inputs) ; 13 | outputs{1} = inputs{1} ; 14 | for k = 2:obj.numInputs 15 | outputs{1} = outputs{1} + inputs{k} ; 16 | end 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | for k = 1:obj.numInputs 21 | derInputs{k} = derOutputs{1} ; 22 | end 23 | derParams = {} ; 24 | end 25 | 26 | function outputSizes = getOutputSizes(obj, inputSizes) 27 | outputSizes{1} = inputSizes{1} ; 28 | for k = 2:numel(inputSizes) 29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1})) 30 | if ~isequal(inputSizes{k}, outputSizes{1}) 31 | warning('Sum layer: the dimensions of the input variables is not the same.') ; 32 | end 33 | end 34 | end 35 | end 36 | 37 | function rfs = getReceptiveFields(obj) 38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ; 39 | rfs.size = [1 1] ; 40 | rfs.stride = [1 1] ; 41 | rfs.offset = [1 1] ; 42 | rfs = repmat(rfs, numInputs, 1) ; 43 | end 44 | 45 | function obj = Sum(varargin) 46 | obj.load(varargin) ; 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /matlab/compatibility/parallel/gather.m: -------------------------------------------------------------------------------- 1 | function x=gather(x) 2 | % GATHER Compatibility stub for the GATHER() function 3 | % GATHER() is a function in the Parallel MATLAB toolbox. MATCONVNET 4 | % can work without it. 5 | -------------------------------------------------------------------------------- /matlab/compatibility/parallel/labindex.m: -------------------------------------------------------------------------------- 1 | function i = labindex() 2 | i = 1 ; 3 | -------------------------------------------------------------------------------- /matlab/compatibility/parallel/numlabs.m: -------------------------------------------------------------------------------- 1 | function n = numlabs() 2 | n = 1 ; 3 | -------------------------------------------------------------------------------- /matlab/mex/.build/bits/data.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/data.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/datacu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/datacu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/datamex.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/datamex.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/bilinearsampler_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/bilinearsampler_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/bilinearsampler_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/bilinearsampler_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/bnorm_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/bnorm_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/bnorm_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/bnorm_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/copy_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/copy_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/copy_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/copy_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/im2row_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/im2row_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/im2row_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/im2row_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/imread_libjpeg.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/imread_libjpeg.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/normalize_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/normalize_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/normalize_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/normalize_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/pooling_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/pooling_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/pooling_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/pooling_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/subsample_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/subsample_cpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/subsample_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/subsample_gpu.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/impl/tinythread.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/impl/tinythread.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/imread.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/imread.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnbias.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnbias.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnbilinearsampler.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnbilinearsampler.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnbnorm.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnbnorm.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnconv.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnconv.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnfullyconnected.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnfullyconnected.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnnormalize.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnnormalize.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnpooling.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnpooling.o -------------------------------------------------------------------------------- /matlab/mex/.build/bits/nnsubsample.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/bits/nnsubsample.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_cudatool.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_cudatool.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_imreadjpeg.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_imreadjpeg.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_imreadjpeg_old.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_imreadjpeg_old.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnbilinearsampler.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnbilinearsampler.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnbnorm.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnbnorm.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnconv.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnconv.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnconvt.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnconvt.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnnormalize.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnnormalize.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_nnpool.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_nnpool.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_taccummex.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_taccummex.o -------------------------------------------------------------------------------- /matlab/mex/.build/vl_tmove.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/.build/vl_tmove.o -------------------------------------------------------------------------------- /matlab/mex/vl_cudatool.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_cudatool.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_imreadjpeg.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_imreadjpeg.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_imreadjpeg_old.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_imreadjpeg_old.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnbilinearsampler.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnbilinearsampler.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnbnorm.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnbnorm.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnconv.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnconv.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnconvt.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnconvt.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnnormalize.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnnormalize.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_nnpool.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_nnpool.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_taccummex.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_taccummex.mexa64 -------------------------------------------------------------------------------- /matlab/mex/vl_tmove.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/mex/vl_tmove.mexa64 -------------------------------------------------------------------------------- /matlab/simplenn/vl_simplenn_move.m: -------------------------------------------------------------------------------- 1 | function net = vl_simplenn_move(net, destination) 2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU. 3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the 4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the 5 | % network to the CPU. 6 | % 7 | % See also: VL_SIMPLENN(). 8 | 9 | % Copyright (C) 2014-15 Andrea Vedaldi. 10 | % All rights reserved. 11 | % 12 | % This file is part of the VLFeat library and is made available under 13 | % the terms of the BSD license (see the COPYING file). 14 | 15 | switch destination 16 | case 'gpu', moveop = @(x) gpuArray(x) ; 17 | case 'cpu', moveop = @(x) gather(x) ; 18 | otherwise, error('Unknown destination ''%s''.', destination) ; 19 | end 20 | for l=1:numel(net.layers) 21 | switch net.layers{l}.type 22 | case {'conv', 'convt', 'bnorm', 'sc_layer'} 23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'} 24 | f = char(f) ; 25 | if isfield(net.layers{l}, f) 26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ; 27 | end 28 | end 29 | for f = {'weights', 'momentum'} 30 | f = char(f) ; 31 | if isfield(net.layers{l}, f) 32 | for j=1:numel(net.layers{l}.(f)) 33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ; 34 | end 35 | end 36 | end 37 | otherwise 38 | % nothing to do ? 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /matlab/simplenn/vl_simplenn_move.m~: -------------------------------------------------------------------------------- 1 | function net = vl_simplenn_move(net, destination) 2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU. 3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the 4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the 5 | % network to the CPU. 6 | % 7 | % See also: VL_SIMPLENN(). 8 | 9 | % Copyright (C) 2014-15 Andrea Vedaldi. 10 | % All rights reserved. 11 | % 12 | % This file is part of the VLFeat library and is made available under 13 | % the terms of the BSD license (see the COPYING file). 14 | 15 | switch destination 16 | case 'gpu', moveop = @(x) gpuArray(x) ; 17 | case 'cpu', moveop = @(x) gather(x) ; 18 | otherwise, error('Unknown destination ''%s''.', destination) ; 19 | end 20 | for l=1:numel(net.layers) 21 | switch net.layers{l}.type 22 | case {'conv', 'convt', 'bnorm', 'sc_layer'} 23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'} 24 | f = char(f) ; 25 | if isfield(net.layers{l}, f) 26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ; 27 | end 28 | end 29 | for f = {'weights', 'momentum'} 30 | f = char(f) ; 31 | if isfield(net.layers{l}, f) 32 | for j=1:numel(net.layers{l}.(f)) 33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ; 34 | end 35 | end 36 | end 37 | otherwise 38 | % nothing to do ? 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /matlab/simplenn/vl_simplenn_start_parserv.m: -------------------------------------------------------------------------------- 1 | function vl_simplenn_start_parserv(net, ps) 2 | %VL_SIMPLENN_START_PARSERV Setup a parameter server for this network 3 | % VL_SIMPLENN_START_PARSERV(NET, PS) registers the network 4 | % parameter derivatives with the specified ParameterServer instance 5 | % PS and then starts the server. 6 | 7 | for i = 1:numel(net.layers) 8 | for j = 1:numel(net.layers{i}.weights) 9 | value = net.layers{i}.weights{j} ; 10 | name = sprintf('l%d_%d',i,j) ; 11 | if strcmp(class(value),'gpuArray') 12 | deviceType = 'gpu' ; 13 | dataType = classUnderlying(value) ; 14 | else 15 | deviceType = 'cpu' ; 16 | dataType = class(value) ; 17 | end 18 | ps.register(... 19 | name, ... 20 | size(value), ... 21 | dataType, ... 22 | deviceType) ; 23 | end 24 | end 25 | ps.start() ; 26 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/backward_sparse_coding_layer.m: -------------------------------------------------------------------------------- 1 | function [ grad_input, grad_A, grad_lambda ] = backward_sparse_coding_layer( Y, X, A, lambda1, grad_output, stride, num_slice ) 2 | %backward_sparse_coding_layer Perform backpropagataion of sparse coding layer 3 | % Input: 4 | % Y: input image, height x width x num_channel x batch_size 5 | % grad_output: \partial L / \partial x, height x width x N x 6 | % batch_size 7 | % X: computed sparse code, height x width x N x batch_size 8 | % A: dictionary (weights), ksize x ksize x nchannel x N 9 | % Output: 10 | % grad_A: gradient for updating dictionary 11 | % grad_input: gradient of sparse code w.r.t. input feature, 12 | % height x width x m 13 | 14 | 15 | 16 | [height, width, ~, batch_size] = size(Y); 17 | [ksize, ~, nchannel, N] = size(A); 18 | 19 | M = ksize^2*nchannel; 20 | A = reshape(A, M, N); 21 | grad_output = permute(grad_output, [3, 1, 2, 4]); %N x height x width x batch_size 22 | tmp_height = floor((height+2*floor(ksize/2) - ksize) / stride) + 1; 23 | tmp_width = floor((width+2*floor(ksize/2) - ksize) / stride) + 1; 24 | Y_col = mexIm2col((Y), ksize, stride); 25 | grad_output = reshape(grad_output, N, tmp_height*tmp_width*batch_size); 26 | X_col = reshape(permute(X, [3, 1, 2, 4]), N, tmp_height*tmp_width*batch_size); 27 | P = size(X_col, 2); 28 | clear X; 29 | 30 | % start computing gradient 31 | % param_grad.K = K; 32 | param_grad.height = height; 33 | param_grad.width = width; 34 | param_grad.nchannel = nchannel; 35 | param_grad.kernel_size = ksize; 36 | param_grad.stride = stride; 37 | 38 | % num_slice = 3; 39 | size_slice = ceil(batch_size/num_slice); 40 | 41 | grad_A = zeros(M, N, 'single', 'gpuArray'); 42 | grad_lambda = zeros(N, 1, 'single', 'gpuArray'); 43 | grad_input = zeros(height, width, nchannel, batch_size, 'single', 'gpuArray'); 44 | 45 | for s = 1:num_slice 46 | 47 | idx = (s-1)*tmp_height*tmp_width*size_slice+1:min(s*tmp_height*tmp_width*size_slice, P); 48 | idx_batch = (s-1)*size_slice+1:min(s*size_slice, batch_size); 49 | if numel(idx_batch)==0 50 | break; 51 | else 52 | param_grad.batch_size = numel(idx_batch); 53 | 54 | [tmp_grad_A, tmp_grad_lambda, tmp_grad_input] = get_gradient_batch( X_col(:, idx), Y_col(:, idx), A, lambda1, grad_output(:, idx), param_grad ); 55 | grad_A = grad_A + tmp_grad_A; 56 | grad_lambda = grad_lambda + tmp_grad_lambda; 57 | grad_input(:, :, :, idx_batch) = tmp_grad_input; 58 | end 59 | 60 | end 61 | 62 | 63 | 64 | grad_A = reshape(grad_A, [ksize, ksize, nchannel, N]); 65 | 66 | 67 | end 68 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/euclideanloss.m: -------------------------------------------------------------------------------- 1 | function Y = euclideanloss(X, c, dzdy) 2 | %EUCLIDEANLOSS Summary of this function goes here 3 | % Detailed explanation goes here 4 | 5 | assert(numel(X) == numel(c)); 6 | 7 | d = size(X); 8 | 9 | assert(all(d == size(c))); 10 | 11 | if nargin == 2 || (nargin == 3 && isempty(dzdy)) 12 | 13 | Y = 1 / 2 * sum(subsref((X - c) .^ 2, substruct('()', {':'}))); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m. 14 | % Y = 1 / (2 * prod(d(1 : 3))) * sum(subsref((X - c) .^ 2, substruct('()', {':'}))); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate. 15 | 16 | elseif nargin == 3 && ~isempty(dzdy) 17 | 18 | assert(numel(dzdy) == 1); 19 | 20 | Y = dzdy * (X - c); % Y is divided by d(4) in cnn_train.m / cnn_train_mgpu.m. 21 | % Y = dzdy / prod(d(1 : 3)) * (X - c); % Should Y be divided by prod(d(1 : 3))? It depends on the learning rate. 22 | 23 | end 24 | 25 | end 26 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/fista_nonnegative_l1_gpu.m: -------------------------------------------------------------------------------- 1 | function x_old = fista_nonnegative_l1_gpu(D, Y, lambda) 2 | 3 | DtD = D'*D; 4 | DtY = D'*Y; 5 | 6 | maxIter = 50; 7 | L = max(eig(DtD)); 8 | Linv = 1/L; 9 | lambdaLinv = lambda*Linv; 10 | [M, N] = size(D); 11 | 12 | P = size(Y, 2); 13 | x_old = zeros(N, P, 'double', 'gpuArray'); 14 | y_old = x_old; 15 | t_old = 1; 16 | 17 | %% MAIN LOOP 18 | A = eye(N, 'double', 'gpuArray') - Linv*(DtD); 19 | const_x = Linv*DtY - lambdaLinv; 20 | 21 | for iter = 1:maxIter 22 | x_new = A*y_old + const_x; 23 | x_new = max(x_new, 0); 24 | t_new = 0.5*(1 + sqrt(1 + 4*t_old^2)); 25 | y_new = (1 + (t_old - 1)/t_new) * x_new - (t_old - 1)/t_new *x_old; 26 | %% update 27 | x_old = x_new; 28 | t_old = t_new; 29 | y_old = y_new; 30 | end 31 | 32 | end 33 | 34 | 35 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/forward_sparse_coding_layer.m: -------------------------------------------------------------------------------- 1 | function X = forward_sparse_coding_layer( Y, A, lambda, stride, testMode ) 2 | %forward_sparse_coding_layer Perform forward pass of sparse coding layer 3 | % input: 4 | % A: dictionary (weights), ksize x ksize x nchannel x N 5 | % Y: input image, height x width x num_channel x batch_size 6 | % output: 7 | 8 | % all data must be single format 9 | % if ~isa(Y, 'single') 10 | % error('Input image must be single'); 11 | % end 12 | % 13 | % if ~isa(A, 'single') 14 | % error('Input dictionary must be single'); 15 | % end 16 | 17 | % kernel_size = param.kernel_size; 18 | % K = param.K; 19 | % error_rate = 0; 20 | % nonzero_rate = 0; 21 | 22 | [height, width, ~, batch_size] = size(Y); 23 | [ksize, ~, nchannel, N] = size(A); 24 | 25 | A = reshape(A, ksize^2*nchannel, N); 26 | 27 | % if M~=nchannel*kernel_size^2 28 | % error('kernel size and dictionary feature number mismatch!'); 29 | % end 30 | 31 | % convert image to column, M x height*width*batch_size (in order) 32 | % Y_col = xx_im2col(Y, ksize, stride); 33 | 34 | % if height*width==ksize^2 35 | % Y_col = reshape(Y, height*width*nchannel, batch_size); 36 | % elseif ksize==1 37 | % Y = permute(Y, [3, 1, 2, 4]); 38 | % Y_col = reshape(Y, nchannel, height*width*batch_size); 39 | % else 40 | Y_col = mexIm2col((Y), ksize, stride); 41 | % end 42 | % Y_col = double(Y_col); 43 | % X_col is sparse code 44 | % X_col = batch_omp( Y_col, A, K); % N x height*width*batch_size 45 | % X_col = batch_omp( Y_col, A, 15); % N x height*width*batch_size 46 | % X_col = batch_omp_group_accurate( Y_col, A, K, group_size); 47 | 48 | % Y_col = Y_col ./ max(sum(Y_col.^2).^(1/2)+1e-4); 49 | % if(testMode) 50 | X_col = fista_nonnegative_l1_gpu(A, Y_col, lambda); 51 | % else 52 | % X_col = fista_nonnegative_l1_dropout_gpu(A, Y_col, lambda); 53 | % end 54 | % X_col = ista_nonnegative_l1_gpu(A, Y_col, lambda); 55 | % [X_col, error_rate, nonzero_rate] = admm_lasso_gpu(A, Y_col, lambda, 0); 56 | 57 | % size(Y) 58 | % size(A) 59 | 60 | % if height*width==ksize^2 61 | % X = reshape(X_col, [1, 1, N, batch_size]); 62 | % % size(Y) 63 | % % size(A) 64 | % elseif ksize==1 65 | % X = reshape(X_col, [N, height, width, batch_size]); 66 | % X = permute(X, [2, 3, 1, 4]); % height x width x N x batch_size 67 | % else 68 | % convert sparse codes back to image 69 | h_out = floor((height+2*floor(ksize/2) - ksize) / stride) + 1; 70 | w_out = floor((width+2*floor(ksize/2) - ksize) / stride) + 1; 71 | X = reshape(X_col, [N, h_out, w_out, batch_size]); 72 | X = permute(X, [2, 3, 1, 4]); % height x width x N x batch_size 73 | % end 74 | 75 | 76 | end 77 | 78 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/gradient_check_sparse_coding_layer.m: -------------------------------------------------------------------------------- 1 | % clear; clc; close all; 2 | 3 | % gpuDevice(1) 4 | T = 100; 5 | err = zeros(T, 1); 6 | ave = 0; 7 | for t = 1:T 8 | 9 | N = 512; 10 | height = 14; 11 | width = 14; 12 | nchannel = 64; 13 | batch_size = 3; 14 | kernel_size =5; 15 | P = height*width*batch_size; 16 | M = kernel_size^2 * nchannel; 17 | lambda = 0.01; 18 | stride = 2; 19 | 20 | A = gpuArray((randn(M, N, 'single'))); 21 | % A(A<0) = 0; 22 | A = reshape(A, kernel_size, kernel_size, nchannel, N); 23 | % A = randn(kernel_size, kernel_size, nchannel, N, 'single', 'gpuArray'); 24 | Y = (randn(nchannel, height*width*batch_size, 'single', 'gpuArray')); 25 | % Y = abs(Y); 26 | Y = reshape(Y, [nchannel, height, width, batch_size]); 27 | Y = permute(Y, [2, 3, 1, 4]); 28 | % Y = gpuArray(randn(height, width, nchannel, batch_size, 'single')); 29 | eta = 1e-5; 30 | noise = zeros(height, width, nchannel, batch_size, 'single', 'gpuArray'); 31 | i = randi(height); j = randi(width); c = randi(nchannel); b = randi(batch_size); 32 | noise(i, j, c, b) = eta; 33 | 34 | % noise = randn(height, width, nchannel, batch_size, 'single', 'gpuArray'); 35 | 36 | % param.lambda = lambda; 37 | % param.height = height; 38 | % param.width = width; 39 | % param.nchannel = nchannel; 40 | % param.kernel_size = kernel_size; 41 | % param.batch_size = batch_size; 42 | %% 43 | 44 | height = floor((height+2*floor(kernel_size/2) - kernel_size) / stride) + 1; 45 | width = floor((width+2*floor(kernel_size/2) - kernel_size) / stride) + 1; 46 | grad_output = randn(height, width, N, batch_size, 'single', 'gpuArray'); 47 | 48 | %% 49 | % gradient checking for grad_input 50 | 51 | 52 | 53 | %% 54 | X_eta = forward_sparse_coding_layer( Y + noise, A, lambda, stride ); 55 | X = forward_sparse_coding_layer( Y - noise, A, lambda, stride ); 56 | 57 | % X_eta = single(X_eta); 58 | % X = single(X); 59 | 60 | grad_input_empirical = sum(grad_output(:) .* (X_eta(:) - X(:))/(2*eta)); 61 | 62 | 63 | %% 64 | X_clean = forward_sparse_coding_layer_single( Y, A, lambda, stride ); 65 | [ grad_input, grad_A ] = backward_sparse_coding_layer( Y, X_clean, A, lambda, grad_output, stride ); 66 | grad_input_computed = (grad_input(i, j, c, b)); 67 | err(t) = gather(abs(1 - grad_input_empirical/grad_input_computed)*100); 68 | fprintf(... 69 | 'der: empirical: %f, computed: %f, error: %.2f %%\n', ... 70 | grad_input_empirical, grad_input_computed, ... 71 | abs(1 - grad_input_empirical/grad_input_computed)*100) ; 72 | end 73 | 74 | mean(err) 75 | 76 | %% 77 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/gradient_check_sparse_coding_layer_dictionary.m: -------------------------------------------------------------------------------- 1 | clear; clc; close all; 2 | 3 | gpuDevice(1) 4 | 5 | N = 64; 6 | height = 32; 7 | width = 32; 8 | nchannel = 3; 9 | batch_size = 2; 10 | kernel_size = 5; 11 | P = height*width*batch_size; 12 | M = kernel_size^2 * nchannel; 13 | K = 40; 14 | stride = 2; 15 | 16 | A = gpuArray(normc(randn(M, N, 'single'))); 17 | A = reshape(A, kernel_size, kernel_size, nchannel, N); 18 | % A = randn(kernel_size, kernel_size, nchannel, N, 'single', 'gpuArray'); 19 | Y = gpuArray(randn(height, width, nchannel, batch_size, 'single')); 20 | 21 | eta = 1e-4; 22 | noise = zeros(kernel_size, kernel_size, nchannel, N, 'single', 'gpuArray'); 23 | i = 2; j = 3; c = 2; n = 6; 24 | noise(i, j, c, n) = eta; 25 | % noise = randn(height, width, nchannel, batch_size, 'single', 'gpuArray'); 26 | 27 | param.K = K; 28 | param.height = height; 29 | param.width = width; 30 | param.nchannel = nchannel; 31 | param.kernel_size = kernel_size; 32 | param.batch_size = batch_size; 33 | 34 | height = floor((height+2*floor(kernel_size/2) - kernel_size) / stride) + 1; 35 | width = floor((width+2*floor(kernel_size/2) - kernel_size) / stride) + 1; 36 | grad_output = randn(height, width, N, batch_size, 'single', 'gpuArray'); 37 | 38 | % gradient checking for grad_input 39 | 40 | X_eta = forward_sparse_coding_layer( Y, A+noise, K, stride ); 41 | X = forward_sparse_coding_layer( Y, A - noise, K, stride ); 42 | grad_input_empirical = sum(grad_output(:) .* (X_eta(:) - X(:))/(2*eta)); 43 | 44 | 45 | %% 46 | X_clean = forward_sparse_coding_layer( Y, A, K, stride ); 47 | [ grad_input, grad_A ] = backward_sparse_coding_layer( Y, X_clean, A, grad_output, K, stride ); 48 | grad_input_computed = (grad_A(i, j, c, n)); 49 | 50 | fprintf(... 51 | 'der: empirical: %f, computed: %f, error: %.2f %%\n', ... 52 | grad_input_empirical, grad_input_computed, ... 53 | abs(1 - grad_input_empirical/grad_input_computed)*100) ; 54 | 55 | %% 56 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexCol2im.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexCol2im.mexa64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexCol2im.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexCol2im.mexw64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexCol2im.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexCol2im.o -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexGetActiveAtA.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexGetActiveAtA.mexa64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexGetGradA3D.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexGetGradA3D.mexa64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexIm2col.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexIm2col.mexa64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexIm2col.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexIm2col.mexw64 -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/mexIm2col.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/matlab/sparse_coding_layer/mexIm2col.o -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/test_backward_sparse_coding_layer.m: -------------------------------------------------------------------------------- 1 | clear; clc; close all; 2 | 3 | N = 64; 4 | height = 32; 5 | width = 32; 6 | nchannel = 3; 7 | batch_size = 64; 8 | kernel_size = 5; 9 | P = height*width*batch_size; 10 | M = kernel_size^2 * nchannel; 11 | K = 15; 12 | 13 | A = gpuArray(normc(randn(M, N, 'single'))); 14 | Y = gpuArray(randn(height, width, nchannel, batch_size, 'single')); 15 | 16 | param.K = K; 17 | param.height = height; 18 | param.width = width; 19 | param.nchannel = nchannel; 20 | param.kernel_size = kernel_size; 21 | param.batch_size = batch_size; 22 | 23 | grad_output = randn(N, P, 'single', 'gpuArray'); 24 | 25 | tic 26 | [ grad_A, grad_input ] = backward_sparse_coding_layer( Y, A, grad_output, param ); 27 | toc -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/test_mexCol2im.m: -------------------------------------------------------------------------------- 1 | clear; clc; close all; 2 | H = 7; 3 | W = 7; 4 | nchannel_in = 3; 5 | num_batch = 5; 6 | ksize = 3; 7 | stride =1; 8 | nchannel_out = ksize^2 * nchannel_in; 9 | 10 | X = randn(H, W, nchannel_in, num_batch, 'single', 'gpuArray'); 11 | 12 | 13 | %% 14 | 15 | % X_col = xx_im2col(X, ksize, stride); 16 | X_col = mexIm2col(X, ksize, stride); 17 | X_rec = mexCol2im(X_col, H, W, nchannel_in, num_batch, ksize, stride); 18 | 19 | % start testing mexCol2im 20 | % X_rec = xx_col2im(X_col, ksize, stride, H, W, nchannel_in, num_batch); 21 | % X_rec = mexCol2im(Y, H, W, nchannel_in, num_batch, ksize); 22 | 23 | % 24 | % X_col1 = xx_im2col(X, ksize); 25 | % 26 | % 27 | % % start testing mexCol2im 28 | % X_rec1 = xx_col2im(X_col1, ksize, H, W, nchannel_in, num_batch); 29 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/test_mexGetGradA3D.m: -------------------------------------------------------------------------------- 1 | clear; clc; close all; 2 | 3 | M = 4; 4 | N = 10; 5 | P = 3; 6 | 7 | A = randn(M, 1, P, 'single', 'gpuArray'); 8 | X = randn(1, N, P, 'single', 'gpuArray'); 9 | E = randn(M, 1, P, 'single', 'gpuArray'); 10 | B = randn(1, N, P, 'single', 'gpuArray'); 11 | 12 | grad = pagefun(@mtimes, A, X) + pagefun(@mtimes, E, B); 13 | 14 | 15 | A = squeeze(A); 16 | X = squeeze(X); 17 | E = squeeze(E); 18 | B = squeeze(B); 19 | temp = mexGetGradA3D(A, X, E, B); 20 | 21 | norm(grad(:) - temp(:)) -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/test_mexIm2col.m: -------------------------------------------------------------------------------- 1 | clear; clc; close all; 2 | H = 32; 3 | W = 32; 4 | stride = 2; 5 | nchannel_in = 3; 6 | num_batch = 1; 7 | ksize = 1; 8 | pad_size = floor(ksize/2); 9 | nchannel_out = ksize^2 * nchannel_in; 10 | 11 | X = randn(H, W, nchannel_in, num_batch, 'single', 'gpuArray'); 12 | Xt = permute(X, [2, 1, 3, 4]); 13 | 14 | %% 15 | Y = mexIm2col(Xt, ksize, stride); 16 | H_out = floor((H+2*pad_size)/stride); 17 | W_out = floor((W+2*pad_size)/stride); 18 | % Y = reshape(Y, nchannel_out, H_out*W_out*num_batch); 19 | 20 | 21 | % Y_cpu = im2col_cpu( X, ksize ); 22 | 23 | 24 | -------------------------------------------------------------------------------- /matlab/sparse_coding_layer/vec.m: -------------------------------------------------------------------------------- 1 | function [ x ] = vec( x ) 2 | %UNTITLED Summary of this function goes here 3 | % Detailed explanation goes here 4 | x = x(:); 5 | 6 | end 7 | 8 | -------------------------------------------------------------------------------- /matlab/src/bits/data.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "data.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/datamex.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "datamex.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/datamex.hpp: -------------------------------------------------------------------------------- 1 | // @file datamex.hpp 2 | // @brief Basic data structures (MEX support) 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__datamex__ 14 | #define __vl__datamex__ 15 | 16 | #include "mex.h" 17 | 18 | #if ENABLE_GPU 19 | #include "gpu/mxGPUArray.h" 20 | #endif 21 | 22 | #include "data.hpp" 23 | 24 | namespace vl { 25 | 26 | class MexTensor ; 27 | 28 | class MexContext : public Context 29 | { 30 | public: 31 | MexContext() ; 32 | ~MexContext() ; 33 | 34 | protected: 35 | #if ENABLE_GPU 36 | vl::ErrorCode initGpu() ; 37 | vl::ErrorCode validateGpu() ; 38 | mxArray * canary ; // if it breathes, the GPU state is valid 39 | bool gpuIsInitialized ; 40 | #endif 41 | 42 | friend class MexTensor ; 43 | } ; 44 | 45 | class MexTensor : public Tensor 46 | { 47 | public: 48 | MexTensor(MexContext & context) ; 49 | vl::ErrorCode init(mxArray const * array) ; 50 | vl::ErrorCode init(DeviceType deviceType, DataType dataType, TensorShape const & shape) ; 51 | vl::ErrorCode initWithZeros(DeviceType deviceType, DataType dataType, TensorShape const & shape) ; 52 | vl::ErrorCode initWithValue(DeviceType deviceType, DataType dataType, TensorShape const & shape, double value) ; 53 | 54 | void makePersistent() ; 55 | mxArray * relinquish() ; 56 | void clear() ; 57 | ~MexTensor() ; 58 | 59 | size_t getMemorySize() const ; 60 | 61 | protected: 62 | MexContext & context ; 63 | mxArray const * array ; 64 | #ifdef ENABLE_GPU 65 | mxGPUArray const * gpuArray ; 66 | #endif 67 | bool isArrayOwner ; 68 | 69 | private: // prevention 70 | MexTensor(MexTensor const &) ; 71 | MexTensor & operator= (MexTensor & tensor) ; 72 | vl::ErrorCode initHelper(DeviceType deviceType, DataType dataType, TensorShape const & shape, bool fillWithZeros = false) ; 73 | } ; 74 | 75 | void print(char const * str, MexTensor const & tensor) ; 76 | 77 | void mexThrowError(Context const& context, vl::ErrorCode error) ; 78 | } 79 | 80 | 81 | #endif /* defined(__vl__datamex__) */ 82 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/bilinearsampler.hpp: -------------------------------------------------------------------------------- 1 | // @file bilinearsampler.hpp 2 | // @brief Bilinear sampler implementation 3 | // @author Ankush Gupta 4 | // @author Andrea Vedaldi 5 | 6 | /* 7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef VL_BILINEARSAMPLER_H 15 | #define VL_BILINEARSAMPLER_H 16 | 17 | #include "../data.hpp" 18 | #include 19 | 20 | // defines the dispatcher for CUDA kernels: 21 | namespace vl { namespace impl { 22 | 23 | template 24 | struct bilinearsampler { 25 | 26 | static vl::ErrorCode 27 | forward(Context& context, 28 | type* output, 29 | type const* data, 30 | type const* grid, 31 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality, 32 | size_t inHeight, size_t inWidth, size_t inCardinality) ; 33 | 34 | 35 | static vl::ErrorCode 36 | backward(Context& context, 37 | type* derData, 38 | type* derGrid, 39 | type const* data, 40 | type const* grid, 41 | type const* derOutput, 42 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality, 43 | size_t inHeight, size_t inWidth, size_t inCardinality) ; 44 | } ; 45 | 46 | } } 47 | 48 | #endif /* defined(VL_BILINEARSAMPLER_H) */ 49 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/bnorm.hpp: -------------------------------------------------------------------------------- 1 | // @file bnorm.hpp 2 | // @brief Batch Normalization block implementation 3 | // @author Sebastien Ehrhardt 4 | 5 | /* 6 | Copyright (C) 2015-16 Sebastien Ehrhardt. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__bnorm__ 14 | #define __vl__bnorm__ 15 | 16 | #include "../data.hpp" 17 | #include 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct bnorm 23 | { 24 | static vl::ErrorCode 25 | forward(Context& context, 26 | type* output, 27 | type* moments, // can be null and it will be allocated size_ternally 28 | type const* data, 29 | type const* multipliers, 30 | type const* biases, 31 | size_t height, size_t width, size_t depth, size_t size, 32 | type epsilon) ; 33 | 34 | static vl::ErrorCode 35 | forward_given_moments(Context& context, 36 | type* output, 37 | type const* moments, 38 | type const* data, 39 | type const* multipliers, 40 | type const* biases, 41 | size_t height, size_t width, size_t depth, size_t size) ; 42 | 43 | static vl::ErrorCode 44 | backward(Context& context, 45 | type* derData, 46 | type* derMultipliers, 47 | type* derBiases, 48 | type* moments, // can be null and it will be allocated size_ternally 49 | type const* data, 50 | type const* multipliers, 51 | type const* biases, 52 | type const* derOutput, 53 | size_t height, size_t width, size_t depth, size_t size, 54 | type epsilon) ; 55 | 56 | static vl::ErrorCode 57 | backward_given_moments(Context& context, 58 | type* derData, 59 | type* derMultipliers, 60 | type* derBiases, 61 | type const* moments, 62 | type const* data, 63 | type const* multipliers, 64 | type const* biases, 65 | type const* derOutput, 66 | size_t height, size_t width, size_t depth, size_t size, 67 | type epsilon) ; 68 | } ; 69 | 70 | } } 71 | #endif /* __vl__bnorm__ */ 72 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/copy.hpp: -------------------------------------------------------------------------------- 1 | // @file copy.hpp 2 | // @brief Copy and other data operations 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__copy__ 14 | #define __vl__copy__ 15 | 16 | #include "../data.hpp" 17 | 18 | namespace vl { namespace impl { 19 | 20 | template 21 | struct operations 22 | { 23 | typedef type data_type ; 24 | static vl::ErrorCode copy(data_type * dest, data_type const * src, size_t numElements) ; 25 | static vl::ErrorCode fill(data_type * dest, size_t numElements, data_type value) ; 26 | } ; 27 | } } 28 | 29 | #endif /* defined(__vl__copy__) */ 30 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/copy_cpu.cpp: -------------------------------------------------------------------------------- 1 | // @file copy_cpu.cpp 2 | // @brief Copy and other data operations (CPU) 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #include "copy.hpp" 14 | #include 15 | 16 | namespace vl { namespace impl { 17 | 18 | template 19 | struct operations 20 | { 21 | typedef type data_type ; 22 | 23 | static vl::ErrorCode 24 | copy(data_type * dest, 25 | data_type const * src, 26 | size_t numElements) 27 | { 28 | memcpy(dest, src, numElements * sizeof(data_type)) ; 29 | return VLE_Success ; 30 | } 31 | 32 | static vl::ErrorCode 33 | fill(data_type * dest, 34 | size_t numElements, 35 | data_type value) 36 | { 37 | for (size_t k = 0 ; k < numElements ; ++k) { 38 | dest[k] = value ; 39 | } 40 | return VLE_Success ; 41 | } 42 | } ; 43 | 44 | } } 45 | 46 | template struct vl::impl::operations ; 47 | 48 | #ifdef ENABLE_DOUBLE 49 | template struct vl::impl::operations ; 50 | #endif 51 | 52 | 53 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/copy_gpu.cu: -------------------------------------------------------------------------------- 1 | // @file copy_gpu.cu 2 | // @brief Copy and other data operations (GPU) 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #include "copy.hpp" 14 | #include "../datacu.hpp" 15 | #include 16 | 17 | template __global__ void 18 | fill_kernel (type * data, type value, size_t size) 19 | { 20 | int index = threadIdx.x + blockIdx.x * blockDim.x ; 21 | if (index < size) data[index] = value ; 22 | } 23 | 24 | namespace vl { namespace impl { 25 | 26 | template 27 | struct operations 28 | { 29 | typedef type data_type ; 30 | 31 | static vl::ErrorCode 32 | copy(data_type * dest, 33 | data_type const * src, 34 | size_t numElements) 35 | { 36 | cudaMemcpy(dest, src, numElements * sizeof(data_type), cudaMemcpyDeviceToDevice) ; 37 | return VLE_Success ; 38 | } 39 | 40 | static vl::ErrorCode 41 | fill(data_type * dest, 42 | size_t numElements, 43 | data_type value) 44 | { 45 | fill_kernel 46 | <<>> 47 | (dest, numElements, value) ; 48 | 49 | cudaError_t error = cudaGetLastError() ; 50 | if (error != cudaSuccess) { 51 | return VLE_Cuda ; 52 | } 53 | return VLE_Success ; 54 | } 55 | } ; 56 | 57 | } } 58 | 59 | template struct vl::impl::operations ; 60 | 61 | #ifdef ENABLE_DOUBLE 62 | template struct vl::impl::operations ; 63 | #endif -------------------------------------------------------------------------------- /matlab/src/bits/impl/im2row.hpp: -------------------------------------------------------------------------------- 1 | // @file im2row.hpp 2 | // @brief Stack image patches as matrix rows 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__im2row__ 14 | #define __vl__im2row__ 15 | 16 | #include "../data.hpp" 17 | #include 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct im2row { 23 | 24 | static vl::ErrorCode 25 | forward(vl::Context& context, 26 | type* stacked, 27 | type const* data, 28 | size_t height, size_t width, size_t depth, 29 | size_t windowHeight, size_t windowWidth, 30 | size_t strideY, size_t strideX, 31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight, 32 | int dilateY, int dialteX) ; 33 | 34 | static vl::ErrorCode 35 | backward(vl::Context& context, 36 | type* data, 37 | type const* stacked, 38 | size_t height, size_t width, size_t depth, 39 | size_t windowHeight, size_t windowWidth, 40 | size_t strideY, size_t strideX, 41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight, 42 | int dilateY, int dilateX) ; 43 | } ; 44 | 45 | } } 46 | 47 | #endif /* defined(__vl__im2row__) */ 48 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/nnbias_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbias_blas.hpp 2 | // @brief biasolution block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbias_cudnn__ 14 | #define __vl__nnbias_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | // todo: data type should be handled internally? 22 | 23 | template 24 | struct nnbias_cudnn 25 | { 26 | static vl::ErrorCode 27 | forward(vl::Context& context, 28 | vl::Tensor output, double outputMult, 29 | vl::Tensor data, double dataMult, 30 | vl::Tensor biases, double biasesMult) ; 31 | 32 | static vl::ErrorCode 33 | backward(vl::Context& context, 34 | vl::Tensor derData, double derDataMult, 35 | vl::Tensor derBiases, double derBiasesMult, 36 | vl::Tensor derOutput, double derOutputMult) ; 37 | } ; 38 | 39 | } } 40 | 41 | #endif /* defined(__vl__nnbias_cudnn__) */ 42 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/nnbilinearsampler_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbilinearsampler_cudnn.hpp 2 | // @brief BilinearSampler CuDNN-based implementation. 3 | // @author Ankush Gupta, Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__bilinearsampler_cudnn__ 14 | #define __vl__bilinearsampler_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct nnbilinearsampler_cudnn 23 | { 24 | static vl::ErrorCode 25 | forward(Context& context, 26 | Tensor output, 27 | Tensor data, 28 | Tensor grid) ; 29 | 30 | static vl::ErrorCode 31 | backward(Context& context, 32 | Tensor derData, 33 | Tensor derGrid, 34 | Tensor data, 35 | Tensor grid, 36 | Tensor derOutput) ; 37 | } ; 38 | 39 | } } 40 | 41 | #endif /* defined(__vl__nnbilinearsampler_cudnn__) */ 42 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/nnbnorm_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbnorm_cudnn.hpp 2 | // @brief bnorm CuDNN-based implementation. 3 | // @author Ankush Gupta, Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__bnorm_cudnn__ 14 | #define __vl__bnorm_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct nnbnorm_cudnn 23 | { 24 | static vl::ErrorCode 25 | forward(vl::Context& context, 26 | vl::Tensor output, 27 | vl::Tensor moments, 28 | vl::Tensor data, 29 | vl::Tensor multipliers, 30 | vl::Tensor biases, 31 | double epsilon) ; 32 | 33 | static vl::ErrorCode 34 | forward_given_moments(vl::Context& context, 35 | vl::Tensor output, 36 | vl::Tensor moments, 37 | vl::Tensor data, 38 | vl::Tensor multipliers, 39 | vl::Tensor biases) ; 40 | 41 | static vl::ErrorCode 42 | backward(Context& context, 43 | vl::Tensor derData, 44 | vl::Tensor derMultipliers, 45 | vl::Tensor derBiases, 46 | vl::Tensor moments, 47 | vl::Tensor data, 48 | vl::Tensor multipliers, 49 | vl::Tensor biases, 50 | vl::Tensor derOutput, 51 | double epsilon) ; 52 | 53 | static vl::ErrorCode 54 | backward_given_moments(Context& context, 55 | vl::Tensor derData, 56 | vl::Tensor derMultipliers, 57 | vl::Tensor derBiases, 58 | vl::Tensor moments, 59 | vl::Tensor data, 60 | vl::Tensor multipliers, 61 | vl::Tensor biases, 62 | vl::Tensor derOutput, 63 | double epsilon) ; 64 | } ; 65 | 66 | } } 67 | 68 | #endif /* defined(__vl__nnbnorm_cudnn__) */ 69 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/nnconv_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnconv_blas.hpp 2 | // @brief Convolution block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnconv_cudnn__ 14 | #define __vl__nnconv_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct nnconv_cudnn 23 | { 24 | static vl::ErrorCode 25 | forward(Context& context, 26 | Tensor output, double outputMult, 27 | Tensor data, double dataMult, 28 | Tensor filters, 29 | Tensor biases, 30 | int strideX, int strideY, 31 | int padLeft, int padRight, 32 | int padTop, int padBottom, 33 | int dilateX, int dilateY) ; 34 | 35 | static vl::ErrorCode 36 | backward(Context& context, 37 | Tensor derData, 38 | Tensor derFilters, 39 | Tensor derBiases, 40 | Tensor data, 41 | Tensor filters, 42 | Tensor derOutput, 43 | int strideX, int strideY, 44 | int padLeft, int padRight, 45 | int padTop, int padBottom, 46 | int dilateX, int dilateY) ; 47 | } ; 48 | 49 | } } 50 | #endif /* defined(__vl__nnconv_cudnn__) */ 51 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/nnpooling_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnpooling_blas.hpp 2 | // @brief Pooling block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnpooling_cudnn__ 14 | #define __vl__nnpooling_cudnn__ 15 | 16 | #include "../nnpooling.hpp" 17 | #include "../data.hpp" 18 | #include "cudnn.h" 19 | 20 | 21 | namespace vl { namespace impl { 22 | 23 | // todo: data type should be handled internally? 24 | 25 | template 26 | struct nnpooling_cudnn 27 | { 28 | static vl::ErrorCode 29 | forward(Context& context, 30 | Tensor output, 31 | Tensor data, 32 | vl::PoolingMethod method, 33 | int poolHeight, int poolWidth, 34 | int strideY, int strideX, 35 | int padTop, int padBottom, 36 | int padLeft, int padRight) ; 37 | 38 | static vl::ErrorCode 39 | backward(Context& context, 40 | Tensor derData, 41 | Tensor data, 42 | Tensor output, 43 | Tensor derOutput, 44 | vl::PoolingMethod method, 45 | int poolHeight, int poolWidth, 46 | int strideY, int strideX, 47 | int padTop, int padBottom, 48 | int padLeft, int padRight) ; 49 | }; 50 | 51 | } } 52 | 53 | #endif /* defined(__vl__nnpooling_cudnn__) */ 54 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/normalize.hpp: -------------------------------------------------------------------------------- 1 | // @file normalize.hpp 2 | // @brief Normalize block implementation 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__normalize__ 14 | #define __vl__normalize__ 15 | 16 | #include "../data.hpp" 17 | #include 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct lrn 23 | { 24 | static vl::ErrorCode 25 | forward(type* output, 26 | type const* data, 27 | size_t height, size_t width, size_t depth, size_t size, 28 | size_t normDetph, 29 | type kappa, type alpha, type beta) ; 30 | 31 | static vl::ErrorCode 32 | backward(type* derData, 33 | type const* data, 34 | type const* derOutput, 35 | size_t height, size_t width, size_t depth, size_t size, 36 | size_t normDetph, 37 | type kappa, type alpha, type beta) ; 38 | } ; 39 | 40 | } } 41 | 42 | #endif /* __vl__normalize__ */ 43 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/pooling.hpp: -------------------------------------------------------------------------------- 1 | // @file pooling.hpp 2 | // @brief Pooling block implementation 3 | // @author Andrea Vedaldi 4 | // @author Karel Lenc 5 | 6 | /* 7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef VL_POOLING_H 15 | #define VL_POOLING_H 16 | 17 | #include "../data.hpp" 18 | #include 19 | 20 | namespace vl { namespace impl { 21 | 22 | template 23 | struct pooling_max { 24 | typedef type data_type ; 25 | 26 | static vl::ErrorCode 27 | forward(data_type* output, 28 | data_type const* data, 29 | size_t height, size_t width, size_t depth, 30 | size_t poolHeight, size_t poolWidth, 31 | size_t strideY, size_t strideX, 32 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 33 | 34 | static vl::ErrorCode 35 | backward(data_type* derData, 36 | data_type const* data, 37 | data_type const* derOutput, 38 | size_t height, size_t width, size_t depth, 39 | size_t poolHeight, size_t poolWidth, 40 | size_t strideY, size_t strideX, 41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 42 | } ; 43 | 44 | template 45 | struct pooling_average { 46 | typedef type data_type ; 47 | 48 | static vl::ErrorCode 49 | forward(data_type* output, 50 | data_type const* data, 51 | size_t height, size_t width, size_t depth, 52 | size_t poolHeight, size_t poolWidth, 53 | size_t strideY, size_t strideX, 54 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 55 | 56 | static vl::ErrorCode 57 | backward(type* derData, 58 | type const* derOutput, 59 | size_t height, size_t width, size_t depth, 60 | size_t poolHeight, size_t poolWidth, 61 | size_t strideY, size_t strideX, 62 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 63 | } ; 64 | 65 | } } 66 | 67 | #endif /* defined(VL_POOLING_H) */ 68 | -------------------------------------------------------------------------------- /matlab/src/bits/impl/subsample.hpp: -------------------------------------------------------------------------------- 1 | // @file subsampling.hpp 2 | // @brief Subsampling block implementation 3 | // @author Andrea Vedaldi 4 | // @author Karel Lenc 5 | 6 | /* 7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef VL_NNSUBSAMPLE_H 15 | #define VL_NNSUBSAMPLE_H 16 | 17 | #include "../data.hpp" 18 | #include 19 | 20 | namespace vl { namespace impl { 21 | 22 | template 23 | struct subsample { 24 | 25 | static vl::ErrorCode 26 | forward(vl::Context& context, 27 | type* output, 28 | type const* data, 29 | size_t height, size_t width, size_t depth, 30 | size_t strideY, size_t strideX, 31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 32 | 33 | static vl::ErrorCode 34 | backward(vl::Context& context, 35 | type* derData, 36 | type const* derOutput, 37 | size_t height, size_t width, size_t depth, 38 | size_t strideY, size_t strideX, 39 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 40 | } ; 41 | 42 | } } 43 | 44 | #endif /* defined(VL_NNSUBSAMPLE_H) */ 45 | -------------------------------------------------------------------------------- /matlab/src/bits/imread.cpp: -------------------------------------------------------------------------------- 1 | // @file imread.cpp 2 | // @brief Image reader 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #include "imread.hpp" 14 | #include 15 | 16 | vl::ImageShape::ImageShape() 17 | : height(0), width(0), depth(0) 18 | { } 19 | 20 | vl::ImageShape::ImageShape(size_t height, size_t width, size_t depth) 21 | : height(height), width(width), depth(depth) 22 | { } 23 | 24 | vl::ImageShape::ImageShape(ImageShape const & im) 25 | : height(im.height), width(im.width), depth(im.depth) 26 | { } 27 | 28 | vl::ImageShape & vl::ImageShape::operator =(vl::ImageShape const & im) 29 | { 30 | height = im.height ; 31 | width = im.width ; 32 | depth = im.depth ; 33 | return *this ; 34 | } 35 | 36 | bool vl::ImageShape::operator == (vl::ImageShape const & im) 37 | { 38 | return 39 | (height == im.height) & 40 | (width == im.width) & 41 | (depth == im.depth) ; 42 | } 43 | 44 | size_t vl::ImageShape::getNumElements() const 45 | { 46 | return height*width*depth ; 47 | } 48 | 49 | void vl::ImageShape::clear() 50 | { 51 | height = 0 ; 52 | width = 0 ; 53 | depth = 0 ; 54 | } 55 | 56 | vl::Image::Image() 57 | : shape(), memory(NULL) 58 | { } 59 | 60 | vl::Image::Image(Image const & im) 61 | : shape(im.shape), memory(im.memory) 62 | { } 63 | 64 | vl::Image::Image(vl::ImageShape const & shape, float * memory) 65 | : shape(shape), memory(memory) 66 | { } 67 | 68 | vl::ImageShape const & vl::Image::getShape() const { return shape ; } 69 | float * vl::Image::getMemory() const { return memory ; } 70 | 71 | void vl::Image::clear() 72 | { 73 | shape.clear() ; 74 | memory = 0 ; 75 | } 76 | -------------------------------------------------------------------------------- /matlab/src/bits/imread.hpp: -------------------------------------------------------------------------------- 1 | // @file imread.hpp 2 | // @brief Image reader 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__imread__ 14 | #define __vl__imread__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | #define VL_IMAGE_ERROR_MSG_MAX_LENGTH 256 21 | 22 | struct ImageShape 23 | { 24 | size_t height ; 25 | size_t width ; 26 | size_t depth ; 27 | 28 | ImageShape() ; 29 | ImageShape(size_t height, size_t width, size_t depth) ; 30 | ImageShape(ImageShape const & im) ; 31 | ImageShape & operator = (ImageShape const & im) ; 32 | bool operator == (ImageShape const & im) ; 33 | 34 | size_t getNumElements() const ; 35 | void clear() ; 36 | } ; 37 | 38 | class Image 39 | { 40 | public: 41 | Image() ; 42 | Image(Image const & im) ; 43 | Image(ImageShape const & shape, float * memory = NULL) ; 44 | ImageShape const & getShape() const ; 45 | float * getMemory() const ; 46 | void clear() ; 47 | 48 | protected: 49 | ImageShape shape ; 50 | float * memory ; 51 | } ; 52 | 53 | class ImageReader 54 | { 55 | public: 56 | ImageReader() ; 57 | ~ImageReader() ; 58 | vl::ErrorCode readShape(ImageShape & image, char const * fileName) ; 59 | vl::ErrorCode readPixels(float * memory, char const * fileName) ; 60 | char const * getLastErrorMessage() const ; 61 | 62 | private: 63 | class Impl ; 64 | Impl * impl ; 65 | } ; 66 | } 67 | 68 | #endif 69 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbias.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnsubsample.cu should be compiled instead" 3 | #endif 4 | #include "nnbias.cu" 5 | 6 | /** 7 | @brief nnbias_forward 8 | @param context context. 9 | @param output output tensor $\by$ [output]. 10 | @param outputMult output tensor multiplier $\alpha$. 11 | @param data data tensor $\bx$. 12 | @param dataMult data tensor multiplier $\beta$. 13 | @param biases biases tensor $\bb$. 14 | @param biasesMult biases tensor multiplier $\gamma$. 15 | 16 | The function computes 17 | @f[ 18 | y_{ijkd} \leftarrow 19 | \alpha y_{ijkd} + 20 | \beta x_{ijkd} + 21 | \gamma b_k. 22 | @f] 23 | 24 | @a data can be the null tensor, in which case this tensor 25 | is dropped in the summation. 26 | */ 27 | 28 | /** 29 | @brief nnbias_backward 30 | @param context context. 31 | @param derData data derivative tensor $d\bx$ [output]. 32 | @param derDataMult data derivative tensor multiplier $\eta$. 33 | @param derBiases biases derivative tensor $d\bb$ [output]. 34 | @param derBiasesMult biased derivative tensor multiplier $\tau$. 35 | @param data data tensor $\bx$. 36 | @param dataMult data tensor multiplier $\beta$. 37 | @param biases biases tensor $\bb$. 38 | @param biasesMult biases tensor multiplier $\gamma$. 39 | 40 | If @a derData is the null tensor, this derivative is not comptued and 41 | @param biases can also be null. 42 | 43 | If @a derBiases is the null tensor, this derivative is not computed and 44 | @param data can also be null. 45 | */ 46 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbias.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbias.hpp 2 | // @brief Bias block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbias__ 14 | #define __vl__nnbias__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | vl::ErrorCode 21 | nnbias_forward(vl::Context& context, 22 | vl::Tensor output, double outputMult, 23 | vl::Tensor data, double dataMult, 24 | vl::Tensor biases, double biasesMult) ; 25 | 26 | vl::ErrorCode 27 | nnbias_backward(vl::Context& context, 28 | vl::Tensor derData, double derDataMult, 29 | vl::Tensor derBiases, double derBiasesMult, 30 | vl::Tensor derOutput, double derOutputMult) ; 31 | } 32 | 33 | #endif /* defined(__vl__nnbias__) */ 34 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbilinearsampler.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnbnorm.cu should be compiled instead" 3 | #endif 4 | #include "nnbilinearsampler.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbilinearsampler.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbilinearsampler.hpp 2 | // @brief Bilinear sampler block 3 | // @author Ankush Gupta 4 | // @author Andrea Vedaldi 5 | 6 | /* 7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi. 8 | All rights reserved. 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbilinearsampler__ 14 | #define __vl__nnbilinearsampler__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | vl::ErrorCode 21 | nnbilinearsampler_forward(vl::Context& context, 22 | vl::Tensor output, 23 | vl::Tensor data, 24 | vl::Tensor grid) ; 25 | 26 | vl::ErrorCode 27 | nnbilinearsampler_backward(vl::Context& context, 28 | vl::Tensor derData, 29 | vl::Tensor derGrid, 30 | vl::Tensor data, 31 | vl::Tensor grid, 32 | vl::Tensor derOutput) ; 33 | } 34 | 35 | #endif /* defined(__vl__nnbilinearsampler__) */ 36 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbnorm.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnbnorm.cu should be compiled instead" 3 | #endif 4 | #include "nnbnorm.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnbnorm.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbnorm.hpp 2 | // @brief Batch normalizatoion block 3 | // @author Sebastien Ehrhardt 4 | // @author Andrea Vedaldi 5 | 6 | /* 7 | Copyright (C) 2015-16 Sebastien Ehrhardt and Andrea Vedaldi. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef __vl__nnbnorm__ 15 | #define __vl__nnbnorm__ 16 | 17 | #include "data.hpp" 18 | #include 19 | 20 | namespace vl { 21 | 22 | // This version computes mean and sigma 23 | vl::ErrorCode 24 | nnbnorm_forward(vl::Context& context, 25 | vl::Tensor output, 26 | vl::Tensor moments, // [output: can pass null] 27 | vl::Tensor data, 28 | vl::Tensor filters, 29 | vl::Tensor biases, 30 | double epsilon) ; 31 | 32 | // This version uses the mean and sigma specified 33 | vl::ErrorCode 34 | nnbnorm_forward_given_moments(vl::Context& context, 35 | vl::Tensor output, 36 | vl::Tensor moments, // input 37 | vl::Tensor data, 38 | vl::Tensor filters, 39 | vl::Tensor biases) ; 40 | 41 | vl::ErrorCode 42 | nnbnorm_backward(vl::Context& context, 43 | vl::Tensor derData, 44 | vl::Tensor derFilters, 45 | vl::Tensor derBiaises, 46 | vl::Tensor moments, 47 | vl::Tensor data, 48 | vl::Tensor filters, 49 | vl::Tensor biases, 50 | vl::Tensor derOutput, 51 | double epsilon) ; 52 | 53 | vl::ErrorCode 54 | nnbnorm_backward_given_moments(vl::Context& context, 55 | vl::Tensor derData, 56 | vl::Tensor derFilters, 57 | vl::Tensor derBiaises, 58 | vl::Tensor moments, 59 | vl::Tensor data, 60 | vl::Tensor filters, 61 | vl::Tensor biases, 62 | vl::Tensor derOutput, 63 | double epsilon) ; 64 | } 65 | 66 | #endif /* defined(__vl__nnbnorm__) */ 67 | -------------------------------------------------------------------------------- /matlab/src/bits/nnconv.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnconv.cu should be compiled instead" 3 | #endif 4 | #include "nnconv.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnconv.hpp: -------------------------------------------------------------------------------- 1 | // @file nnconv.cu 2 | // @brief Convolution block 3 | // @author Andrea Vedaldi 4 | // @author Max Jaderberg 5 | 6 | /* 7 | Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg 8 | Copyright (C) 2015-16 Andrea Vedaldi. 9 | 10 | All rights reserved. 11 | 12 | This file is part of the VLFeat library and is made available under 13 | the terms of the BSD license (see the COPYING file). 14 | */ 15 | 16 | #ifndef __vl__nnconv__ 17 | #define __vl__nnconv__ 18 | 19 | #include "data.hpp" 20 | 21 | namespace vl { 22 | 23 | vl::ErrorCode 24 | nnconv_forward(vl::Context& context, 25 | vl::Tensor output, double outputMult, 26 | vl::Tensor data, double dataMult, 27 | vl::Tensor filters, 28 | vl::Tensor biases, 29 | int strideY, int strideX, 30 | int padTop, int padBottom, 31 | int padLeft, int padRight, 32 | int dilateY, int dilateX) ; 33 | 34 | vl::ErrorCode 35 | nnconv_backward(vl::Context& context, 36 | vl::Tensor derData, 37 | vl::Tensor derFilters, 38 | vl::Tensor derBiases, 39 | vl::Tensor data, 40 | vl::Tensor filters, 41 | vl::Tensor derOutput, 42 | int strideY, int strideX, 43 | int padTop, int padBottom, 44 | int padLeft, int padRight, 45 | int dilateY, int dilateX) ; 46 | 47 | vl::ErrorCode 48 | nnconvt_forward(vl::Context& context, 49 | vl::Tensor output, 50 | vl::Tensor data, 51 | vl::Tensor filters, 52 | vl::Tensor biases, 53 | int upsampleY, int upsampleX, 54 | int cropTop, int cropBottom, 55 | int cropLeft, int cropRight) ; 56 | 57 | vl::ErrorCode 58 | nnconvt_backward(vl::Context& context, 59 | vl::Tensor derData, 60 | vl::Tensor derFilters, 61 | vl::Tensor derBiases, 62 | vl::Tensor data, 63 | vl::Tensor filters, 64 | vl::Tensor derOutput, 65 | int upsampleY, int upsampleX, 66 | int cropTop, int cropBottom, 67 | int cropLeft, int cropRight) ; 68 | } 69 | 70 | 71 | #endif /* defined(__vl__nnconv__) */ 72 | -------------------------------------------------------------------------------- /matlab/src/bits/nnfullyconnected.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnfullyconnected.cu should be compiled instead" 3 | #endif 4 | #include "nnfullyconnected.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnfullyconnected.hpp: -------------------------------------------------------------------------------- 1 | // @file nnfullyconnected.hpp 2 | // @brief Fully-connected block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | 14 | #ifndef __vl__nnfullyconnected__ 15 | #define __vl__nnfullyconnected__ 16 | 17 | #include "data.hpp" 18 | 19 | namespace vl { 20 | 21 | vl::ErrorCode 22 | nnfullyconnected_forward(vl::Context& context, 23 | vl::Tensor output, 24 | vl::Tensor data, 25 | vl::Tensor filters, 26 | vl::Tensor biases) ; 27 | 28 | vl::ErrorCode 29 | nnfullyconnected_backward(vl::Context& context, 30 | vl::Tensor derData, 31 | vl::Tensor derFilters, 32 | vl::Tensor derBiases, 33 | vl::Tensor data, 34 | vl::Tensor filters, 35 | vl::Tensor derOutput) ; 36 | } 37 | 38 | 39 | #endif /* defined(__vl__nnfullyconnected__) */ 40 | -------------------------------------------------------------------------------- /matlab/src/bits/nnnormalize.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnnormalize.cu should be compiled instead" 3 | #endif 4 | #include "nnnormalize.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnnormalize.hpp: -------------------------------------------------------------------------------- 1 | // @file nnnormalize.hpp 2 | // @brief Normalization block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnnormalize__ 14 | #define __vl__nnnormalize__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | 21 | vl::ErrorCode 22 | nnlrn_forward(vl::Context& context, 23 | vl::Tensor output, 24 | vl::Tensor data, 25 | size_t normDetph, 26 | double kappa, double alpha, double beta) ; 27 | 28 | vl::ErrorCode 29 | nnlrn_backward(vl::Context& context, 30 | vl::Tensor derData, 31 | vl::Tensor data, 32 | vl::Tensor derOutput, 33 | size_t normDetph, 34 | double kappa, double alpha, double beta) ; 35 | } 36 | 37 | #endif /* defined(__vl__nnnormalize__) */ 38 | -------------------------------------------------------------------------------- /matlab/src/bits/nnpooling.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnpooling.cu should be compiled instead" 3 | #endif 4 | #include "nnpooling.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/bits/nnpooling.hpp: -------------------------------------------------------------------------------- 1 | // @file nnpooling.hpp 2 | // @brief Pooling block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnpooling__ 14 | #define __vl__nnpooling__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | 21 | enum PoolingMethod { vlPoolingMax, vlPoolingAverage } ; 22 | 23 | vl::ErrorCode 24 | nnpooling_forward(vl::Context& context, 25 | vl::Tensor output, 26 | vl::Tensor data, 27 | PoolingMethod method, 28 | int poolHeight, int poolWidth, 29 | int strideY, int strideX, 30 | int padTop, int padBottom, 31 | int padLeft, int padRight) ; 32 | 33 | vl::ErrorCode 34 | nnpooling_backward(vl::Context& context, 35 | vl::Tensor derData, 36 | vl::Tensor data, 37 | vl::Tensor derOutput, 38 | PoolingMethod method, 39 | int poolHeight, int poolWidth, 40 | int strideY, int strideX, 41 | int padTop, int padBottom, 42 | int padLeft, int padRight) ; 43 | } 44 | 45 | #endif /* defined(__vl__nnpooling__) */ 46 | -------------------------------------------------------------------------------- /matlab/src/bits/nnsubsample.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnsubsample.cu should be compiled instead" 3 | #endif 4 | #include "nnsubsample.cu" 5 | 6 | -------------------------------------------------------------------------------- /matlab/src/bits/nnsubsample.hpp: -------------------------------------------------------------------------------- 1 | // @file nnsubsample.hpp 2 | // @brief Subsamping block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnsubsample__ 14 | #define __vl__nnsubsample__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | vl::ErrorCode 21 | nnsubsample_forward(vl::Context& context, 22 | vl::Tensor output, 23 | vl::Tensor data, 24 | vl::Tensor biases, 25 | int strideY, int strideX, 26 | int padTop, int padBottom, 27 | int padLeft, int padRight) ; 28 | 29 | vl::ErrorCode 30 | nnsubsample_backward(vl::Context& context, 31 | vl::Tensor derData, 32 | vl::Tensor derBiases, 33 | vl::Tensor derOutput, 34 | int strideY, int strideX, 35 | int padTop, int padBottom, 36 | int padLeft, int padRight) ; 37 | } 38 | 39 | #endif /* defined(__vl__nnsubsample__) */ 40 | -------------------------------------------------------------------------------- /matlab/src/vl_cudatool.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_cudatool.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_imreadjpeg.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_imreadjpeg.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_imreadjpeg_old.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_imreadjpeg.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnbilinearsampler.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnbilinearsampler.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnbnorm.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnbnorm.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnconv.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnconv.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnconvt.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnconvt.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnnormalize.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnnormalize.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_nnpool.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnpool.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_taccummex.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_taccummex.cu" 5 | -------------------------------------------------------------------------------- /matlab/src/vl_tmove.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_tmove.cu" 5 | -------------------------------------------------------------------------------- /matlab/vl_nnabs.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnabs(x,dzdy) 2 | %VL_NNRELU CNN rectified linear unit. 3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data 4 | % X. X can have arbitrary size. 5 | % 6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block 7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as 8 | % X and Y respectively. 9 | % 10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options: 11 | % 12 | % `Leak`:: 0 13 | % Set the leak factor, a non-negative number. Y is equal to X if 14 | % X is not smaller than zero; otherwise, Y is equal to X 15 | % multipied by the leak factor. By default, the leak factor is 16 | % zero; for values greater than that one obtains the leaky ReLU 17 | % unit. 18 | % 19 | % ADVANCED USAGE 20 | % 21 | % As a further optimization, in the backward computation it is 22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then 23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY). 24 | % This is useful because it means that the buffer X does not need to 25 | % be remembered in the backward pass. 26 | 27 | % Copyright (C) 2014-15 Andrea Vedaldi. 28 | % All rights reserved. 29 | % 30 | % This file is part of the VLFeat library and is made available under 31 | % the terms of the BSD license (see the COPYING file). 32 | 33 | % opts.leak = 0 ; 34 | % opts = vl_argparse(opts, varargin, 'nonrecursive') ; 35 | 36 | % if opts.leak == 0 37 | if nargin <= 1 || isempty(dzdy) 38 | y = abs(x) ; 39 | else 40 | y = dzdy .* (sign(x)) ; 41 | end 42 | 43 | end 44 | -------------------------------------------------------------------------------- /matlab/vl_nnbilinearsampler.m: -------------------------------------------------------------------------------- 1 | %VL_NNBILIEARSAMPLER CNN spatial bilinear resampling 2 | % Y = VL_NNBILINEARSAMPLER(X,GRID) resamples image X at the spatial 3 | % locations specified by GRID using bilinear interpolation. 4 | % 5 | % X is a array of dimension H x W x C x N, where (H,W) are the 6 | % height and width of the image, C is the number of feature 7 | % channels, and N is the number of images in the batch. 8 | % 9 | % GRID is an array of dimension 2 x Ho x Wo x No, where (Ho,Wo) are 10 | % the height and width of the output image and No the number of 11 | % output images in the output batch Y. The output array Y has 12 | % dimensions Ho x Wo x C x No. The same resampling grid is used for 13 | % all input feature channels, but each output image in the batchY 14 | % uses its own grid. 15 | % 16 | % For output image n, GRID(1,:,:,n) specifies the vertical location 17 | % v of a sample in the input image X and GRID(2,:,:,n) the 18 | % horizontal location u. The convention follows standard 19 | % impelemntations of this operator in the literature. Namely: 20 | % 21 | % 1. The grid coordinates are normalized in the range [-1,1]. This 22 | % means that (-1,-1) is the center of the upper-left pixel in the 23 | % input image and (+1,+1) the center of the bottom-right pixel. 24 | % 25 | % 2. The V,U coordiante planes are stacked in the fisrt dimension of 26 | % GRID instead of in the third, as it would be more natural in 27 | % MatConvNet (as these could be interpreted as 'channels' in 28 | % GRID). 29 | % 30 | % Furthre, No can be a multiple of N; in this case, it is assumed 31 | % that there are No/N transforms per input image, hence, the 32 | % transforms [1 ... No/N] are applied to the first image, [No/N+1 33 | % ... 2*No/N] are applied to the second image, etc. 34 | % 35 | % [DX, DGRID] = VL_NNBILINEARSAMPLER(X, GRID, DY) computes the 36 | % derivatives of the block projected onto DY. DX, DGRID, DY have the 37 | % same dimensions as X, GRID and Y, respectively. 38 | % 39 | % ## CUDNN SUPPORT 40 | % 41 | % If compiled in, the function will use cuDNN's 42 | % implementation. Note, cuDNN v5 or higher is required. 43 | % You can use the 'NoCudnn' option to disable 44 | % cuDNN or 'CuDNN' to activate it back again (the 45 | % choice sticks until MATLAB purges the MEX files for any reason). 46 | 47 | % Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi. 48 | % All rights reserved. 49 | % 50 | % This file is part of the VLFeat library and is made available under 51 | % the terms of the BSD license (see the COPYING file). 52 | -------------------------------------------------------------------------------- /matlab/vl_nnbnorm.m: -------------------------------------------------------------------------------- 1 | %VL_NNBNORM CNN batch normalisation. 2 | % Y = VL_NNBNORM(X,G,B) applies batch normalization to the input 3 | % X. Batch normalization is defined as: 4 | % 5 | % Y(i,j,k,t) = G(k) * (X(i,j,k,t) - mu(k)) / sigma(k) + B(k) 6 | % 7 | % where: 8 | % 9 | % mu(k) = mean_ijt X(i,j,k,t), 10 | % sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2, 11 | % sigma(k) = sqrt(sigma2(k) + EPSILON) 12 | % 13 | % are respectively the per-channel mean, variance, and standard 14 | % deviation of each feature channel in the data X. The parameters 15 | % G(k) and B(k) are multiplicative and additive constants use to 16 | % scale each data channel. 17 | % 18 | % Means and variances are accumulated across all the data items 19 | % (images) stored in the 4D tensor X (from which the name batch 20 | % normalization is derived). The constant EPSILON is used to 21 | % regularize the computation of sigma(k) and to avoid division by 22 | % zero. 23 | % 24 | % [DZDX,DZDG,DZDB] = VL_NNBNORM(X,G,B,DZDY) computes the derviatives 25 | % of the block projected onto DZDY. DZDX, DZDG, DZDB and DZDY have 26 | % the same dimensions as X, G, B, and Y respectivey. 27 | % 28 | % Optionally, [Y,MOMENTS] = VL_NNBNORM(...) and 29 | % [DZDX,DZDG,DZDB,MOMENTS] = VL_NNBNORM(...,DZDY) return the values 30 | % of the vectors mu and sigma in the formulas above. Here, MOMENTS 31 | % is a DEPTH x 2 array [MU, SIGMA]. 32 | % 33 | % VL_NNBNROM(..., 'Option', value) takes the following options: 34 | % 35 | % `Epsilon`:: 1e-4 36 | % Specifies the constant EPSILON in the formuals above. 37 | % 38 | % `Moments`:: unspecified 39 | % Specifies an array MOMENTS with the values of mu and sigma to 40 | % use instead of computing them according to the equations 41 | % above. This is useful to disable batch normalization during 42 | % testing. 43 | % 44 | % `CuDNN`:: specified 45 | % If specified, turns on CuDNN. CuDNN is on by default. This 46 | % option can be useful to undo the effect of a previous 47 | % `NoCuDNN` option in the argument list. 48 | % 49 | % `NoCuDNN`:: not specified 50 | % If specified, turns off CuDNN. 51 | % 52 | % See also: VL_NNNORMALIZE(). 53 | 54 | % Copyright (C) 2015 Sébastien Ehrhardt, Karel Lenc and Andrea Vedaldi. 55 | % All rights reserved. 56 | % 57 | % This file is part of the VLFeat library and is made available under 58 | % the terms of the BSD license (see the COPYING file). 59 | -------------------------------------------------------------------------------- /matlab/vl_nnconcat.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin) 2 | %VL_NNCONCAT CNN concatenate multiple inputs. 3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell 4 | % array INPUTS along dimension DIM generating an output Y. 5 | % 6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives 7 | % of the block projected onto DZDY. DZDINPUTS has one element for 8 | % each element of INPUTS, each of which is an array that has the same 9 | % dimensions of the corresponding array in INPUTS. 10 | 11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 12 | % All rights reserved. 13 | % 14 | % This file is part of the VLFeat library and is made available under 15 | % the terms of the BSD license (see the COPYING file). 16 | 17 | opts.inputSizes = [] ; 18 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 19 | 20 | if nargin < 2, dim = 3; end; 21 | if nargin < 3, dzdy = []; end; 22 | 23 | if isempty(dzdy) 24 | y = cat(dim, inputs{:}); 25 | else 26 | if isempty(opts.inputSizes) 27 | opts.inputSizes = cellfun(@(inp) [size(inp,1),size(inp,2),size(inp,3),size(inp,4)], inputs, 'UniformOutput', false) ; 28 | end 29 | start = 1 ; 30 | y = cell(1, numel(opts.inputSizes)) ; 31 | s.type = '()' ; 32 | s.subs = {':', ':', ':', ':'} ; 33 | for i = 1:numel(opts.inputSizes) 34 | stop = start + opts.inputSizes{i}(dim) ; 35 | s.subs{dim} = start:stop-1 ; 36 | y{i} = subsref(dzdy,s) ; 37 | start = stop ; 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /matlab/vl_nncrop.m: -------------------------------------------------------------------------------- 1 | function y = vl_nncrop(x, crop, dzdy, inputSize) 2 | %VL_NNCROP CNN crop. 3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the 4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT]. 5 | % 6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the 7 | % function projected on the output derivative DZDY. DZDX has the same 8 | % dimension as X and DZDY the same dimension as Y. 9 | % 10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to 11 | % the previous call in which X is omitted and its size is passed as 12 | % INPUTSIZE. 13 | 14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi. 15 | % All rights reserved. 16 | % 17 | % This file is part of the VLFeat library and is made available under 18 | % the terms of the BSD license (see the COPYING file). 19 | 20 | if nargin < 4 21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ; 22 | else 23 | sz = inputSize ; 24 | end 25 | 26 | sv = 1 + crop(1) : sz(1) - crop(2) ; 27 | su = 1 + crop(3) : sz(2) - crop(4) ; 28 | 29 | if nargin <= 2 || isempty(dzdy) 30 | y = x(sv, su, :, :) ; 31 | else 32 | if isa(dzdy, 'gpuArray') 33 | y = gpuArray.zeros(sz, classUnderlying(dzdy)) ; 34 | else 35 | y = zeros(sz, class(dzdy)) ; 36 | end 37 | y(sv, su, :, :) = dzdy ; 38 | end 39 | -------------------------------------------------------------------------------- /matlab/vl_nndropout.m: -------------------------------------------------------------------------------- 1 | function [y,mask] = vl_nndropout(x,varargin) 2 | %VL_NNDROPOUT CNN dropout. 3 | % [Y,MASK] = VL_NNDROPOUT(X) applies dropout to the data X. MASK 4 | % is the randomly sampled dropout mask. Both Y and MASK have the 5 | % same size as X. 6 | % 7 | % VL_NNDROPOUT(X, 'rate', R) sets the dropout rate to R. Rate is defined 8 | % as a probability of a variable *not* to be zeroed (i.e. it is the 9 | % expected value of MASK). 10 | % 11 | % [DZDX] = VL_NNDROPOUT(X, DZDY, 'mask', MASK) computes the 12 | % derivatives of the blocks projected onto DZDY. Note that MASK must 13 | % be specified in order to compute the derivative consistently with 14 | % the MASK randomly sampled in the forward pass. DZDX and DZDY have 15 | % the same dimesnions as X and Y respectivey. 16 | % 17 | % Note that in the original paper on dropout, at test time the 18 | % network weights for the dropout layers are scaled down to 19 | % compensate for having all the neurons active. In this 20 | % implementation the dropout function itself already does this 21 | % compensation during training. So at test time no alterations are 22 | % required. 23 | 24 | % Copyright (C) 2014-16 Andrea Vedaldi, Karel Lenc. 25 | % All rights reserved. 26 | % 27 | % This file is part of the VLFeat library and is made available under 28 | % the terms of the BSD license (see the COPYING file). 29 | 30 | opts.rate = 0.5 ; 31 | opts.mask = [] ; 32 | 33 | backMode = numel(varargin) > 0 && ~ischar(varargin{1}) ; 34 | if backMode 35 | dzdy = varargin{1} ; 36 | opts = vl_argparse(opts, varargin(2:end)) ; 37 | else 38 | opts = vl_argparse(opts, varargin) ; 39 | end 40 | 41 | % determine mask 42 | scale = 1 / (1 - opts.rate) ; 43 | if isa(x, 'gpuArray') 44 | dataType = classUnderlying(x) ; 45 | else 46 | dataType = class(x) ; 47 | end 48 | switch dataType 49 | case 'single' 50 | scale = single(scale) ; 51 | case 'double' 52 | scale = double(scale) ; 53 | end 54 | 55 | if backMode && isempty(opts.mask) 56 | warning('vl_nndropout: when using in backward mode, the mask should be specified') ; 57 | end 58 | if isempty(opts.mask) 59 | % product determines data type 60 | if isa(x,'gpuArray') 61 | opts.mask = scale * (gpuArray.rand(size(x), 'single') >= opts.rate) ; 62 | else 63 | opts.mask = scale * (rand(size(x), 'single') >= opts.rate) ; 64 | end 65 | end 66 | 67 | % Apply dropout mask. Note that mask is either `single` or `double` 68 | % and a CPU or GPU array like the input argument `x`. 69 | if ~backMode 70 | y = opts.mask .* x ; 71 | else 72 | y = opts.mask .* dzdy ; 73 | end 74 | mask = opts.mask ; 75 | -------------------------------------------------------------------------------- /matlab/vl_nnnoffset.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnnoffset(x, param, dzdy) 2 | %VL_NNNOFFSET CNN norm-dependent offset. 3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the 4 | % weighted norm of the feature channels: 5 | % 6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2) 7 | % 8 | % where 9 | % 10 | % L(i,j) = sum_K X(i,j,k)^2 11 | % 12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the 13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions 14 | % as X and Y respectively. 15 | 16 | % Copyright (C) 2014 Andrea Vedaldi. 17 | % All rights reserved. 18 | % 19 | % This file is part of the VLFeat library and is made available under 20 | % the terms of the BSD license (see the COPYING file). 21 | 22 | L = sum(x.^2,3) ; 23 | L = max(L, 1e-8) ; 24 | 25 | if nargin <= 2 26 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ; 27 | else 28 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ; 29 | end 30 | -------------------------------------------------------------------------------- /matlab/vl_nnnormalize.m: -------------------------------------------------------------------------------- 1 | %VL_NNNORMALIZE CNN Local Response Normalization (LRN) 2 | % Y = VL_NNORMALIZE(X, PARAM) computes the so-called Local Response 3 | % Normalization (LRN) operator. This operator performs a 4 | % channel-wise sliding window normalization of each column of the 5 | % input array X. The normalized output is given by: 6 | % 7 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA 8 | % 9 | % where the normalization factor is given by 10 | % 11 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2, 12 | % 13 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The 14 | % window Q(k) is defined as: 15 | % 16 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))]. 17 | % 18 | % where D is the number of feature channels in X. Note in particular 19 | % that, by setting N >= 2D, the function can be used to normalize 20 | % all the channels as a single group (useful to achieve L2 21 | % normalization). 22 | % 23 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of 24 | % the block projected onto DZDY. DZDX and DZDY have the same 25 | % dimensions as X and Y respectively. 26 | % 27 | % **Remark:** Some CNN libraries (e.g. Caffe) use a slightly 28 | % different convention for the parameters of the LRN. Caffe in 29 | % particular uses the convention: 30 | % 31 | % PARAM_CAFFE = [N KAPPA N*ALPHA BETA] 32 | % 33 | % i.e. the ALPHA paramter is multiplied by N. 34 | 35 | % Copyright (C) 2014 Andrea Vedaldi. 36 | % All rights reserved. 37 | % 38 | % This file is part of the VLFeat library and is made available under 39 | % the terms of the BSD license (see the COPYING file). 40 | -------------------------------------------------------------------------------- /matlab/vl_nnnormalizelp.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnnormalizelp(x,dzdy,varargin) 2 | %VL_NNNORMALIZELP CNN Lp normalization 3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial 4 | % location in the array X: 5 | % 6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p) 7 | % 8 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the 9 | % function with respect to X projected onto DZDY. 10 | % 11 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options: 12 | % 13 | % `p`:: 2 14 | % The exponent of the Lp norm. Warning: currently only even 15 | % exponents are supported. 16 | % 17 | % `epsilon`:: 0.01 18 | % The constant added to the sum of p-powers before taking the 19 | % 1/p square root (see the formula above). 20 | % 21 | % `spatial`:: `false` 22 | % If `true`, sum along the two spatial dimensions instead of 23 | % along the feature channels. 24 | % 25 | % See also: VL_NNNORMALIZE(). 26 | 27 | opts.epsilon = 1e-2 ; 28 | opts.p = 2 ; 29 | opts.spatial = false ; 30 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 31 | 32 | if ~opts.spatial 33 | massp = sum(x.^opts.p,3) + opts.epsilon ; 34 | else 35 | massp = sum(sum(x.^opts.p,1),2) + opts.epsilon ; 36 | end 37 | mass = massp.^(1/opts.p) ; 38 | y = bsxfun(@rdivide, x, mass) ; 39 | 40 | if nargin < 2 || isempty(dzdy) 41 | return ; 42 | else 43 | dzdy = bsxfun(@rdivide, dzdy, mass) ; 44 | if ~opts.spatial 45 | tmp = sum(dzdy .* x, 3) ; 46 | else 47 | tmp = sum(sum(dzdy .* x, 1),2); 48 | end 49 | y = dzdy - bsxfun(@times, tmp, bsxfun(@rdivide, x.^(opts.p-1), massp)) ; 50 | end 51 | -------------------------------------------------------------------------------- /matlab/vl_nnrelu.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnrelu(x,dzdy,varargin) 2 | %VL_NNRELU CNN rectified linear unit. 3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data 4 | % X. X can have arbitrary size. 5 | % 6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block 7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as 8 | % X and Y respectively. 9 | % 10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options: 11 | % 12 | % `Leak`:: 0 13 | % Set the leak factor, a non-negative number. Y is equal to X if 14 | % X is not smaller than zero; otherwise, Y is equal to X 15 | % multipied by the leak factor. By default, the leak factor is 16 | % zero; for values greater than that one obtains the leaky ReLU 17 | % unit. 18 | % 19 | % ADVANCED USAGE 20 | % 21 | % As a further optimization, in the backward computation it is 22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then 23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY). 24 | % This is useful because it means that the buffer X does not need to 25 | % be remembered in the backward pass. 26 | 27 | % Copyright (C) 2014-15 Andrea Vedaldi. 28 | % All rights reserved. 29 | % 30 | % This file is part of the VLFeat library and is made available under 31 | % the terms of the BSD license (see the COPYING file). 32 | 33 | opts.leak = 0 ; 34 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 35 | 36 | if opts.leak == 0 37 | if nargin <= 1 || isempty(dzdy) 38 | y = max(x, 0) ; 39 | else 40 | y = dzdy .* (x > 0) ; 41 | end 42 | else 43 | if nargin <= 1 || isempty(dzdy) 44 | y = x .* (opts.leak + (1 - opts.leak) * (x > 0)) ; 45 | else 46 | y = dzdy .* (opts.leak + (1 - opts.leak) * (x > 0)) ; 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /matlab/vl_nnsigmoid.m: -------------------------------------------------------------------------------- 1 | function out = vl_nnsigmoid(x,dzdy) 2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit. 3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can 4 | % have an arbitrary size. The sigmoid is defined as follows: 5 | % 6 | % SIGMOID(X) = 1 / (1 + EXP(-X)). 7 | % 8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the 9 | % block projected onto DZDY. DZDX and DZDY have the same 10 | % dimensions as X and Y respectively. 11 | 12 | % Copyright (C) 2015 Karel Lenc. 13 | % All rights reserved. 14 | % 15 | % This file is part of the VLFeat library and is made available under 16 | % the terms of the BSD license (see the COPYING file). 17 | 18 | y = 1 ./ (1 + exp(-x)); 19 | 20 | if nargin <= 1 || isempty(dzdy) 21 | out = y ; 22 | else 23 | out = dzdy .* (y .* (1 - y)) ; 24 | end 25 | -------------------------------------------------------------------------------- /matlab/vl_nnsoftmax.m: -------------------------------------------------------------------------------- 1 | function Y = vl_nnsoftmax(X,dzdY) 2 | %VL_NNSOFTMAX CNN softmax. 3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X 4 | % has dimension H x W x D x N, packing N arrays of W x H 5 | % D-dimensional vectors. 6 | % 7 | % D can be thought of as the number of possible classes and the 8 | % function computes the softmax along the D dimension. Often W=H=1, 9 | % but this is not a requirement, as the operator is applied 10 | % convolutionally at all spatial locations. 11 | % 12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block 13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as 14 | % X and Y respectively. 15 | 16 | % Copyright (C) 2014 Andrea Vedaldi. 17 | % All rights reserved. 18 | % 19 | % This file is part of the VLFeat library and is made available under 20 | % the terms of the BSD license (see the COPYING file). 21 | 22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ; 23 | L = sum(E,3) ; 24 | Y = bsxfun(@rdivide, E, L) ; 25 | 26 | if nargin <= 1, return ; end 27 | 28 | % backward 29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ; 30 | -------------------------------------------------------------------------------- /matlab/vl_nnspnorm.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnspnorm(x, param, dzdy) 2 | %VL_NNSPNORM CNN spatial normalization. 3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of 4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and 5 | % PW define the size of the spatial neighbourhood used for 6 | % nomalization. 7 | % 8 | % For each feature channel, the function computes the sum of squares 9 | % of X inside each rectangle, N2(i,j). It then divides each element 10 | % of X as follows: 11 | % 12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA. 13 | % 14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the 15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions 16 | % as X and Y respectively. 17 | 18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 19 | % All rights reserved. 20 | % 21 | % This file is part of the VLFeat library and is made available under 22 | % the terms of the BSD license (see the COPYING file). 23 | 24 | pad = floor((param(1:2)-1)/2) ; 25 | pad = [pad ; param(1:2)-1-pad] ; 26 | 27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ; 28 | f = 1 + param(3) * n2 ; 29 | 30 | if nargin <= 2 || isempty(dzdy) 31 | y = f.^(-param(4)) .* x ; 32 | else 33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ; 34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ; 35 | end -------------------------------------------------------------------------------- /matlab/vl_rootnn.m: -------------------------------------------------------------------------------- 1 | function root = vl_rootnn() 2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox. 3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox. 4 | 5 | % Copyright (C) 2014 Andrea Vedaldi. 6 | % All rights reserved. 7 | % 8 | % This file is part of the VLFeat library and is made available under 9 | % the terms of the BSD license (see the COPYING file). 10 | 11 | root = fileparts(fileparts(mfilename('fullpath'))) ; 12 | -------------------------------------------------------------------------------- /matlab/vl_setupnn.m: -------------------------------------------------------------------------------- 1 | function vl_setupnn() 2 | %VL_SETUPNN Setup the MatConvNet toolbox. 3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path. 4 | 5 | % Copyright (C) 2014-15 Andrea Vedaldi. 6 | % All rights reserved. 7 | % 8 | % This file is part of the VLFeat library and is made available under 9 | % the terms of the BSD license (see the COPYING file). 10 | 11 | root = vl_rootnn() ; 12 | addpath(fullfile(root, 'matlab')) ; 13 | addpath(fullfile(root, 'matlab', 'mex')) ; 14 | addpath(fullfile(root, 'matlab', 'simplenn')) ; 15 | addpath(fullfile(root, 'matlab', 'xtest')) ; 16 | addpath(fullfile(root, 'examples')) ; 17 | 18 | if ~exist('gather') 19 | warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ; 20 | addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ; 21 | end 22 | 23 | if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0 24 | warning('MatConvNet is not compiled. Consider running `vl_compilenn`.'); 25 | end 26 | -------------------------------------------------------------------------------- /matlab/vl_taccum.m: -------------------------------------------------------------------------------- 1 | function a = vl_taccum(alpha, a, beta, b) 2 | %VL_TACCUM Compute A = alpha A + beta B 3 | % A = VL_TACCUM(ALPHA, A, BETA, B) computes efficiently A = alpha A 4 | % + beta B. For GPU arrays, it performs its computation in place, by 5 | % modifiying A without creating an additional copy. 6 | 7 | % Copyright (C) 2016 Andrea Vedaldi. 8 | % All rights reserved. 9 | % 10 | % This file is part of the VLFeat library and is made available under 11 | % the terms of the BSD license (see the COPYING file). 12 | 13 | if isscalar(a) 14 | a = alpha * a + beta * b ; 15 | return ; 16 | elseif isa(a, 'gpuArray') 17 | vl_taccummex(alpha, a, beta, b, 'inplace') ; 18 | else 19 | a = vl_taccummex(alpha, a, beta, b) ; 20 | end 21 | -------------------------------------------------------------------------------- /matlab/xtest/suite/Scale.m: -------------------------------------------------------------------------------- 1 | classdef Scale < nntest 2 | properties 3 | x 4 | a 5 | b 6 | end 7 | 8 | properties (TestParameter) 9 | dim = {1 2 3 4} 10 | end 11 | 12 | methods (TestClassSetup) 13 | function data(test,device) 14 | test.x = test.randn(15,14,3,2) ; 15 | test.a = test.randn(15,14,3,2) ; 16 | test.b = test.randn(15,14,3,2) ; 17 | end 18 | end 19 | 20 | methods (Test) 21 | function data_and_parameters(test, dim) 22 | x = test.x ; 23 | a = test.a ; 24 | b = test.b ; 25 | 26 | a = sum(a, dim) ; 27 | b = sum(b, dim) ; 28 | 29 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ; 30 | 31 | output = scale.forward({x}, {a,b}) ; 32 | dzdy = test.randn(size(output{1})) ; 33 | [derInputs, derParams] = scale.backward({x}, {a,b}, {dzdy}) ; 34 | 35 | pick = @(x) x{1} ; 36 | dzdx = derInputs{1} ; 37 | dzda = derParams{1} ; 38 | dzdb = derParams{2} ; 39 | 40 | test.der(@(x) pick(scale.forward({x},{a,b})), x, dzdy, dzdx, 1e-2 * test.range) ; 41 | test.der(@(a) pick(scale.forward({x},{a,b})), a, dzdy, dzda, 1e-2 * test.range) ; 42 | test.der(@(b) pick(scale.forward({x},{a,b})), b, dzdy, dzdb, 1e-2 * test.range) ; 43 | end 44 | 45 | function data_only(test, dim) 46 | x = test.x ; 47 | a = test.a ; 48 | b = test.b ; 49 | 50 | a = sum(a, dim) ; 51 | b = sum(b, dim) ; 52 | 53 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ; 54 | 55 | output = scale.forward({x,a,b}, {}) ; 56 | dzdy = test.randn(size(output{1})) ; 57 | [derInputs, derParams] = scale.backward({x,a,b}, {}, {dzdy}) ; 58 | 59 | pick = @(x) x{1} ; 60 | dzdx = derInputs{1} ; 61 | dzda = derInputs{2} ; 62 | dzdb = derInputs{3} ; 63 | 64 | test.der(@(x) pick(scale.forward({x,a,b},{})), x, dzdy, dzdx, 1e-2 * test.range) ; 65 | test.der(@(a) pick(scale.forward({x,a,b},{})), a, dzdy, dzda, 1e-2 * test.range) ; 66 | test.der(@(b) pick(scale.forward({x,a,b},{})), b, dzdy, dzdb, 1e-2 * test.range) ; 67 | end 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnbnorm.m: -------------------------------------------------------------------------------- 1 | classdef nnbnorm < nntest 2 | properties (TestParameter) 3 | rows = {2 8 13} 4 | cols = {2 8 17} 5 | numDims = {1 3 4} 6 | batchSize = {2 7} 7 | end 8 | methods (Test) 9 | function basic(test, rows, cols, numDims, batchSize) 10 | r = rows ; 11 | c = cols ; 12 | nd = numDims ; 13 | bs = batchSize ; 14 | x = test.randn(r, c, nd, bs) ; 15 | %g = test.randn(1, 1, nd, 1) ; 16 | %b = test.randn(1, 1, nd, 1) ; 17 | g = test.randn(nd, 1) / test.range ; 18 | b = test.randn(nd, 1) / test.range ; 19 | 20 | y = vl_nnbnorm(x,g,b) ; 21 | dzdy = test.randn(size(y)) ; 22 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ; 23 | 24 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-3) ; 25 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, 1e-2) ; 26 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, 1e-3) ; 27 | end 28 | end 29 | end -------------------------------------------------------------------------------- /matlab/xtest/suite/nnconcat.m: -------------------------------------------------------------------------------- 1 | classdef nnconcat < nntest 2 | methods (Test) 3 | function basic(test) 4 | pick = @(i,x) x{i} ; 5 | sz = [4,5,10,3] ; 6 | for dim = 1:3 7 | sz1 = sz ; sz1(dim) = 3 ; 8 | sz2 = sz ; sz2(dim) = 7 ; 9 | sz3 = sz ; sz3(dim) = 2 ; 10 | x1 = test.randn(sz1) ; 11 | x2 = test.randn(sz2) ; 12 | x3 = test.randn(sz3) ; 13 | 14 | y = vl_nnconcat({x1, x2, x3}, dim) ; 15 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ; 16 | dzdy = test.randn(size(y)) ; 17 | dzdx = vl_nnconcat({x1, x2, x3} ,dim, dzdy) ; 18 | 19 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ; 20 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ; 21 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ; 22 | end 23 | end 24 | 25 | function by_size(test) 26 | pick = @(i,x) x{i} ; 27 | sz = [4,5,10,3] ; 28 | for dim = 1:3 29 | sz1 = sz ; sz1(dim) = 3 ; 30 | sz2 = sz ; sz2(dim) = 7 ; 31 | sz3 = sz ; sz3(dim) = 2 ; 32 | x1 = test.randn(sz1) ; 33 | x2 = test.randn(sz2) ; 34 | x3 = test.randn(sz3) ; 35 | 36 | y = vl_nnconcat({x1, x2, x3}, dim) ; 37 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ; 38 | dzdy = test.randn(size(y)) ; 39 | dzdx = vl_nnconcat({}, dim, dzdy, 'inputSizes', {sz1, sz2, sz3}) ; 40 | 41 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ; 42 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ; 43 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ; 44 | end 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nndropout.m: -------------------------------------------------------------------------------- 1 | classdef nndropout < nntest 2 | methods (Test) 3 | function basic(test) 4 | x = test.randn(4,5,10,3) ; 5 | [y,mask] = vl_nndropout(x) ; 6 | dzdy = test.randn(size(y)) ; 7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ; 8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ; 9 | end 10 | end 11 | end 12 | 13 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnmnist.m: -------------------------------------------------------------------------------- 1 | classdef nnmnist < nntest 2 | properties (TestParameter) 3 | networkType = {'dagnn', 'simplenn'} 4 | end 5 | 6 | methods (TestClassSetup) 7 | function init(test) 8 | addpath(fullfile(vl_rootnn, 'examples', 'mnist')); 9 | end 10 | end 11 | 12 | methods (Test) 13 | function valErrorRate(test, networkType) 14 | clear mex ; % will reset GPU, remove MCN to avoid crashing 15 | % MATLAB on exit (BLAS issues?) 16 | if strcmp(test.dataType, 'double'), return ; end 17 | switch test.currentDevice 18 | case 'cpu' 19 | gpus = []; 20 | case 'gpu' 21 | gpus = 1; 22 | end 23 | trainOpts = struct('numEpochs', 1, 'continue', false, 'gpus', gpus, ... 24 | 'plotStatistics', false); 25 | if strcmp(networkType, 'simplenn') 26 | trainOpts.errorLabels = {'error', 'top5err'} ; 27 | end 28 | [~, info] = cnn_mnist('train', trainOpts, 'networkType', networkType); 29 | test.verifyLessThan(info.train.error, 0.08); 30 | test.verifyLessThan(info.val.error, 0.025); 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnnormalize.m: -------------------------------------------------------------------------------- 1 | classdef nnnormalize < nntest 2 | properties (TestParameter) 3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17} 4 | sgroup = {2 3 4 5 6 7} 5 | end 6 | 7 | methods (Test) 8 | function basic(test, group) 9 | param = [group, .1, .5, .75] ; 10 | x = test.randn(3,2,10,4) ; 11 | y = vl_nnnormalize(x,param) ; 12 | dzdy = test.rand(size(y))-0.5 ; 13 | dzdx = vl_nnnormalize(x,param,dzdy) ; 14 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ; 15 | end 16 | 17 | function compare_to_naive(test, sgroup) 18 | param = [sgroup, .1, .5, .75] ; 19 | x = test.randn(3,2,10,4) ; 20 | y = vl_nnnormalize(gather(x),param) ; 21 | y_ = test.zeros(size(y)) ; 22 | x_ = gather(x) ; 23 | for i=1:size(x,1) 24 | for j=1:size(x,2) 25 | for n=1:size(x,4) 26 | t = test.zeros(1,1,size(x,3),1) ; 27 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ... 28 | ones(param(1),1), 'same')).^(-param(4)) ; 29 | y_(i,j,:,n) = x_(i,j,:,n) .* t ; 30 | end 31 | end 32 | end 33 | test.eq(y,y_) ; 34 | end 35 | 36 | function l2(test) 37 | x = test.randn(1,1,10,1) ; 38 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ; 39 | test.eq(sum(y(:).^2), test.toDataType(1), 1e-2) ; 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnnormalizelp.m: -------------------------------------------------------------------------------- 1 | classdef nnnormalizelp < nntest 2 | properties (TestParameter) 3 | h = {1 2 3 4} 4 | w = {1 2 3 4} 5 | d = {2 3 4} 6 | p = {2 4} 7 | end 8 | 9 | methods (Test) 10 | function basicl2(test, h,w,d) 11 | x = test.randn(h,w,d,3) ; 12 | y = vl_nnnormalizelp(x) ; 13 | dzdy = test.rand(size(y))-0.5 ; 14 | dzdx = vl_nnnormalizelp(x,dzdy) ; 15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, 1e-4, 0.3) ; 16 | end 17 | 18 | function lp(test, p) 19 | x = test.randn(2,3,5,3) / test.range ; 20 | y = vl_nnnormalizelp(x, [], 'p', p) ; 21 | dzdy = test.rand(size(y))-0.5 ; 22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ; 23 | test.der(@(x) vl_nnnormalizelp(x,[],'p',p), x, dzdy, dzdx, 1e-4, 0.3) ; 24 | end 25 | 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnoffset.m: -------------------------------------------------------------------------------- 1 | classdef nnoffset < nntest 2 | methods (Test) 3 | function basic(test) 4 | param = [.34, .5] ; 5 | x = test.randn(4,5,10,3) ; 6 | y = vl_nnnoffset(x,param) ; 7 | dzdy = test.randn(size(y)) ; 8 | dzdx = vl_nnnoffset(x,param,dzdy) ; 9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ; 10 | end 11 | end 12 | end -------------------------------------------------------------------------------- /matlab/xtest/suite/nnpdist.m: -------------------------------------------------------------------------------- 1 | classdef nnpdist < nntest 2 | properties (TestParameter) 3 | oneToOne = {false, true} 4 | noRoot = {false, true} 5 | p = {.5 1 2 3} 6 | aggregate = {false, true} 7 | end 8 | methods (Test) 9 | function basic(test,oneToOne, noRoot, p, aggregate) 10 | if aggregate 11 | % make it smaller to avoid numerical derivative issues with 12 | % float 13 | h = 3 ; 14 | w = 2 ; 15 | else 16 | h = 13 ; 17 | w = 17 ; 18 | end 19 | d = 4 ; 20 | n = 5 ; 21 | x = test.randn(h,w,d,n) ; 22 | if oneToOne 23 | x0 = test.randn(h,w,d,n) ; 24 | else 25 | x0 = test.randn(1,1,d,n) ; 26 | end 27 | opts = {'noRoot', noRoot, 'aggregate', aggregate} ; 28 | 29 | y = vl_nnpdist(x, x0, p, opts{:}) ; 30 | 31 | % make sure they are not too close in any dimension as this may be a 32 | % problem for the finite difference dereivatives as one could 33 | % approach 0 which is not differentiable for some p-norms 34 | 35 | s = abs(bsxfun(@minus, x, x0)) < test.range*1e-1 ; 36 | x(s) = x(s) + 5*test.range ; 37 | 38 | dzdy = test.rand(size(y)) ; 39 | [dzdx, dzdx0] = vl_nnpdist(x,x0,p,dzdy,opts{:}) ; 40 | test.der(@(x) vl_nnpdist(x,x0,p,opts{:}), x, dzdy, dzdx, test.range * 1e-3) ; 41 | if oneToOne 42 | % Pdist does not implement backprop of the bsxfun 43 | test.der(@(x0) vl_nnpdist(x,x0,p,opts{:}), x0, dzdy, dzdx0, test.range * 1e-3) ; 44 | end 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnrelu.m: -------------------------------------------------------------------------------- 1 | classdef nnrelu < nntest 2 | properties 3 | x 4 | end 5 | 6 | methods (TestClassSetup) 7 | function data(test,device) 8 | % make sure that all elements in x are different. in this way, 9 | % we can compute numerical derivatives reliably by adding a delta < .5. 10 | x = test.randn(15,14,3,2) ; 11 | x(:) = randperm(numel(x))' ; 12 | % avoid non-diff value for test 13 | x(x==0)=1 ; 14 | test.x = x ; 15 | test.range = 10 ; 16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end 17 | end 18 | end 19 | 20 | methods (Test) 21 | function basic(test) 22 | x = test.x ; 23 | y = vl_nnrelu(x) ; 24 | dzdy = test.randn(size(y)) ; 25 | dzdx = vl_nnrelu(x,dzdy) ; 26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ; 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnsigmoid.m: -------------------------------------------------------------------------------- 1 | classdef nnsigmoid < nntest 2 | methods (Test) 3 | function basic(test) 4 | x = test.randn(5,5,1,1)/test.range ; 5 | y = vl_nnsigmoid(x) ; 6 | dzdy = test.randn(size(y)) ; 7 | dzdx = vl_nnsigmoid(x,dzdy) ; 8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ; 9 | end 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnsoftmax.m: -------------------------------------------------------------------------------- 1 | classdef nnsoftmax < nntest 2 | properties (TestParameter) 3 | h = {1 2 3} 4 | w = {1 2} 5 | end 6 | methods (Test) 7 | function basic(test,h,w) 8 | d = 10 ; 9 | n = 3 ; 10 | x = test.randn(h,w,d,n)/test.range ; 11 | y = vl_nnsoftmax(x) ; 12 | dzdy = test.randn(size(y)) ; 13 | dzdx = vl_nnsoftmax(x, dzdy) ; 14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ; 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnsoftmaxloss.m: -------------------------------------------------------------------------------- 1 | classdef nnsoftmaxloss < nntest 2 | properties (TestParameter) 3 | weighed = {false true} 4 | multilab = {false true} 5 | end 6 | 7 | methods (Test) 8 | function basic(test, multilab, weighed) 9 | C = 10 ; 10 | n = 3 ; 11 | if multilab 12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ; 13 | else 14 | c = reshape([7 2 1],1,1,1,[]) ; 15 | end 16 | if weighed 17 | c = cat(3, c, test.rand(size(c))) ; 18 | end 19 | 20 | % compare direct and indirect composition; this cannot 21 | % take large test.ranges 22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative 23 | y = vl_nnsoftmaxloss(x,c) ; 24 | if size(c,3) == 1 25 | opts = {'loss','log'} ; 26 | else 27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ; 28 | end 29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ; 30 | dzdy = test.randn(size(y)) ; 31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ; 32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ; 33 | test.eq(y,y_) ; 34 | test.eq(dzdx,dzdx_) ; 35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ; 36 | 37 | % now larger input range 38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative 39 | y = vl_nnsoftmaxloss(x,c) ; 40 | dzdy = test.randn(size(y)) ; 41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ; 42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ... 43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ; 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /matlab/xtest/suite/nnspnorm.m: -------------------------------------------------------------------------------- 1 | classdef nnspnorm < nntest 2 | methods (Test) 3 | function basic(test) 4 | h = 13 ; 5 | w = 17 ; 6 | d = 4 ; 7 | n = 5 ; 8 | param = [3, 3, 0.1, 0.75] ; 9 | x = test.randn(h,w,d,n) ; 10 | y = vl_nnspnorm(x, param) ; 11 | dzdy = test.rand(h, w, d, n) ; 12 | dzdx = vl_nnspnorm(x, param, dzdy) ; 13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ; 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /matlab/xtest/vl_bench_bnorm.m: -------------------------------------------------------------------------------- 1 | function vl_bench_bnorm(gpu) 2 | if nargin < 1 3 | gpu = false ; 4 | end 5 | 6 | T = 100 ; 7 | x = randn(64,64,32,32,'single') ; 8 | g = randn(32,1,'single') ; 9 | b = randn(32,1,'single') ; 10 | 11 | if gpu 12 | x = gpuArray(x) ; 13 | g = gpuArray(g) ; 14 | b = gpuArray(b) ; 15 | end 16 | 17 | tic 18 | for t=1:T 19 | y = vl_nnbnorm(x,g,b) ; 20 | end 21 | if gpu, wait(gpuDevice) ; end 22 | fprintf('new: %f\n',toc); 23 | 24 | tic 25 | for t=1:T 26 | y_ = vl_nnbnorm_old(x,g,b) ; 27 | end 28 | if gpu, wait(gpuDevice) ; end 29 | fprintf('old: %f\n',toc); 30 | 31 | dzdy = randn(size(y),'single') ; 32 | if gpu 33 | dzdy = gpuArray(dzdy) ; 34 | end 35 | 36 | tic 37 | for t=1:T 38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ; 39 | end 40 | if gpu, wait(gpuDevice) ; end 41 | fprintf('new deriv: %f\n',toc); 42 | 43 | tic 44 | for t=1:T 45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ; 46 | end 47 | if gpu, wait(gpuDevice) ; end 48 | fprintf('old deriv: %f\n',toc); 49 | 50 | vl_testsim(y,y_); 51 | vl_testsim(a,a_); 52 | vl_testsim(b,b_); 53 | vl_testsim(c,c_); 54 | end 55 | -------------------------------------------------------------------------------- /matlab/xtest/vl_bench_imreadjpeg.m: -------------------------------------------------------------------------------- 1 | % VL_BENCH_IMREADJPEG Evaluates the speed of imreadjpeg 2 | 3 | numThreads = 4 ; 4 | base = 'data/bench-imreadjpeg' ; 5 | 6 | files = {} ; 7 | files = dir(fullfile(base,'*.jpg')) ; 8 | files = fullfile(base, {files.name}) ; 9 | if numel(files) > 256, files = files(1:256) ; end 10 | 11 | for preallocate = [true, false] 12 | opts={'verbose','verbose', 'preallocate', preallocate} ; 13 | for t=1:4 14 | % simple read 15 | fprintf('direct read single thread\n') ; 16 | clear ims ; 17 | tic ; 18 | ims = vl_imreadjpeg(files, 'numThreads', 1, opts{:}) ; 19 | directSingle(t) = toc ; 20 | fprintf(' done\n') ; 21 | pause(1) ; 22 | 23 | % simple read 24 | fprintf('direct read multi thread\n') ; 25 | clear ims ; 26 | tic ; 27 | ims = vl_imreadjpeg(files, 'numThreads', numThreads, opts{:}) ; 28 | direct(t) = toc ; 29 | fprintf(' done\n') ; 30 | pause(1) ; 31 | 32 | % threaded read 33 | fprintf('issue prefetch\n') ; 34 | tic ; 35 | vl_imreadjpeg(files, 'prefetch', opts{:}) ; 36 | prefetch(t) = toc ; 37 | fprintf(' done [pause 6]\n') ; 38 | pause(6) 39 | 40 | fprintf('prefetched read\n') ; 41 | clear ims_ ; % do not accoutn for the time requried to delete this 42 | tic ; 43 | ims_ = vl_imreadjpeg(files, opts{:}) ; 44 | indirect(t) = toc ; 45 | pause(1) ; 46 | end 47 | 48 | n = numel(ims) ; 49 | fprintf('** test results preallcoate %d\n', preallocate) ; 50 | fprintf('\tsingle tread: %.1f pm %.1f\n', mean(n./directSingle), std(n./directSingle)) ; 51 | fprintf('\t%d threads: %.1f pm %.1f\n', numThreads, mean(n./direct), std(n./direct)) ; 52 | fprintf('\tissue prefetch: %.1f pm %.1f\n', mean(n./prefetch), std(n./prefetch)) ; 53 | fprintf('\tretrieve prefetched: %.1f pm %.1f\n', mean(n./indirect), std(n./indirect)) ; 54 | fprintf('\n\n') ; 55 | end 56 | 57 | return 58 | -------------------------------------------------------------------------------- /matlab/xtest/vl_test_bnorm.m: -------------------------------------------------------------------------------- 1 | %% 2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with 3 | % using VLFEAT 4 | %% 5 | 6 | gpu = false; 7 | gpu = true ; 8 | 9 | T = 1 ; 10 | x = randn(64,64,32,32,'single') ; 11 | g = randn(32,1,'single') ; 12 | b = randn(32,1,'single') ; 13 | 14 | if gpu 15 | x = gpuArray(x) ; 16 | g = gpuArray(g) ; 17 | b = gpuArray(b) ; 18 | end 19 | 20 | a=vl_nnbnorm(x,g,b); 21 | a_=vl_nnbnorm_old(x,g,b); 22 | 23 | vl_testsim(a,a_) 24 | -------------------------------------------------------------------------------- /matlab/xtest/vl_test_economic_relu.m: -------------------------------------------------------------------------------- 1 | % VL_TEST_ECONOMIC_RELU 2 | function vl_test_economic_relu() 3 | 4 | x = randn(11,12,8,'single'); 5 | w = randn(5,6,8,9,'single'); 6 | b = randn(1,9,'single') ; 7 | 8 | net.layers{1} = struct('type', 'conv', ... 9 | 'filters', w, ... 10 | 'biases', b, ... 11 | 'stride', 1, ... 12 | 'pad', 0); 13 | net.layers{2} = struct('type', 'relu') ; 14 | 15 | res = vl_simplenn(net, x) ; 16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ; 17 | clear res ; 18 | 19 | res_ = vl_simplenn(net, x, dzdy) ; 20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ; 21 | 22 | a=whos('res_') ; 23 | b=whos('res__') ; 24 | assert(a.bytes > b.bytes) ; 25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ; 26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ; 27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ; 28 | -------------------------------------------------------------------------------- /matlab/xtest/vl_test_gpureset.m: -------------------------------------------------------------------------------- 1 | for explictMexReset = [false] 2 | 3 | % reset the same GPU device 4 | for t = 1:6 5 | if explictMexReset, clear mex ; end 6 | if mod(t-1,2) == 0 7 | disp('vl_test_gpureset: resetting GPU') ; 8 | gpuDevice(1) ; 9 | else 10 | disp('vl_test_gpureset: not resetting GPU') ; 11 | end 12 | if t > 1, disp(a) ; end 13 | a = gpuArray(single(ones(10))) ; 14 | b = gpuArray(single(ones(5))) ; 15 | c = vl_nnconv(a,b,[],'nocudnn') ; 16 | end 17 | 18 | % resetting GPU arguments to a MEX file should fail properly 19 | a = gpuArray(single(ones(10))) ; 20 | b = gpuArray(single(ones(5))) ; 21 | c = vl_nnconv(a,b,[],'nocudnn') ; 22 | 23 | gpuDevice(1) ; 24 | disp(a) ; 25 | try 26 | c = vl_nnconv(a,b,[],'nocudnn') ; 27 | catch e 28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ; 29 | end 30 | 31 | % switch GPU devices 32 | if gpuDeviceCount > 1 33 | disp('vl_text_gpureset: test switching GPU device') ; 34 | for t = 1:gpuDeviceCount 35 | if explictMexReset, clear mex ; end 36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ; 37 | gpuDevice(t) ; 38 | a = gpuArray(single(ones(10))) ; 39 | b = gpuArray(single(ones(5))) ; 40 | c = vl_nnconv(a,b,[],'nocudnn') ; 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /matlab/xtest/vl_test_imreadjpeg.m: -------------------------------------------------------------------------------- 1 | function vl_test_imreadjpeg 2 | % VL_TEST_IMREADJPEG 3 | 4 | % Test basic file reading capability 5 | for t=1:6 6 | files{t} = which(sprintf('office_%d.jpg', t)) ; 7 | end 8 | ims = vl_imreadjpeg(files) ; 9 | 10 | % Test inserting a non-image file 11 | files_ = files ; 12 | files_{3} = [mfilename('fullpath') '.m']; 13 | ims_ = vl_imreadjpeg(files_) ; 14 | for t=setdiff(1:6,3) 15 | assert(isequal(ims{t},ims_{t})) ; 16 | end 17 | 18 | % Test inserting a non-esiting file 19 | files__ = files_ ; 20 | files__{4} = 'idontexist.jpg' ; 21 | ims__ = vl_imreadjpeg(files__) ; 22 | for t=setdiff(1:6,[3 4]) 23 | assert(isequal(ims{t},ims__{t})) ; 24 | end 25 | 26 | for n = 1:4 27 | % Test prefetching 28 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ; 29 | ims___ = vl_imreadjpeg(files) ; 30 | assert(isequal(ims,ims___)) ; 31 | 32 | % Hardening: test prefetching, clearing mex, fetching 33 | vl_imreadjpeg(files,'prefetch') ; 34 | clear mex ; 35 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ; 36 | assert(isequal(ims,ims___)) ; 37 | end 38 | -------------------------------------------------------------------------------- /matlab/xtest/vl_testnn.m: -------------------------------------------------------------------------------- 1 | function vl_testnn(varargin) 2 | %VL_TESTNN Run MatConvNet test suite 3 | % VL_TESTNN('option', value, ...) takes the following options: 4 | % `cpu`:: true 5 | % Run the CPU tests. 6 | % 7 | % `gpu`:: false 8 | % Run the GPU tests. 9 | % 10 | % `single`:: true 11 | % Perform tests in single precision. 12 | % 13 | % `double`:: false 14 | % Perform tests in double precision. 15 | % 16 | % `command`:: `'nn'` 17 | % Run only tests which name starts with the specified substring. 18 | % E.g. `vl_testnn('command', 'nnloss') would run only the nnloss tests. 19 | % 20 | % `break`:: false 21 | % Stop tests in case of error. 22 | % 23 | % `tapFile`:: '' 24 | % Output the test results to a file. If a specified file does 25 | % exist it is overwritten. 26 | % 27 | % This function uses the Matlab unit testing framework which was 28 | % introduced in Matlab R2013a (v8.1). 29 | 30 | % Copyright (C) 2015-16 Andrea Vedaldi, Karel Lenc. 31 | % All rights reserved. 32 | % 33 | % This file is part of the VLFeat library and is made available under 34 | % the terms of the BSD license (see the COPYING file). 35 | 36 | opts.cpu = true ; 37 | opts.gpu = false ; 38 | opts.single = true ; 39 | opts.double = false ; 40 | opts.command = 'nn' ; 41 | opts.break = false ; 42 | opts.tapFile = ''; 43 | opts = vl_argparse(opts, varargin) ; 44 | 45 | import matlab.unittest.constraints.* ; 46 | import matlab.unittest.selectors.* ; 47 | import matlab.unittest.plugins.TAPPlugin; 48 | import matlab.unittest.plugins.ToFile; 49 | 50 | % Choose which tests to run 51 | sel = HasName(StartsWithSubstring(opts.command)) ; 52 | if ~opts.gpu 53 | sel = sel & ~HasName(ContainsSubstring('device=gpu')) ; 54 | end 55 | if ~opts.cpu 56 | sel = sel & ~HasName(ContainsSubstring('device=cpu')) ; 57 | end 58 | if ~opts.double 59 | sel = sel & ~HasName(ContainsSubstring('dataType=double')) ; 60 | end 61 | if ~opts.single 62 | sel = sel & ~HasName(ContainsSubstring('dataType=single')) ; 63 | end 64 | 65 | % Run tests 66 | root = fileparts(mfilename('fullpath')) ; 67 | suite = matlab.unittest.TestSuite.fromFolder(fullfile(root, 'suite'), sel) ; 68 | runner = matlab.unittest.TestRunner.withTextOutput('Verbosity',3); 69 | if opts.break 70 | runner.addPlugin(matlab.unittest.plugins.StopOnFailuresPlugin) ; 71 | end 72 | if ~isempty(opts.tapFile) 73 | if exist(opts.tapFile, 'file') 74 | delete(opts.tapFile); 75 | end 76 | runner.addPlugin(TAPPlugin.producingOriginalFormat(ToFile(opts.tapFile))); 77 | end 78 | result = runner.run(suite); 79 | display(result) 80 | -------------------------------------------------------------------------------- /utils/get-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | local_dir="$1" 4 | url="$2" 5 | 6 | function get_filename_from_url() { 7 | regexp='^([^\/]*\/)+' 8 | echo -n "$1" | sed -r "s/$regexp//g" 9 | } 10 | 11 | function get_remote_file_size() { 12 | curl -sI "$1" | grep Content-Length | grep -o '[0-9][0-9]*' 13 | } 14 | 15 | filename=$(get_filename_from_url "$url") 16 | local_path="$local_dir/$filename" 17 | remote_size=$(get_remote_file_size "$url") 18 | 19 | echo "Getting: $url" 20 | echo " File: $filename" 21 | echo " Local file path: $local_path" 22 | echo " Remote file size: $remote_size" 23 | 24 | if [ -e "$local_path" ] 25 | then 26 | local_size=$(stat -c%s "$local_path") 27 | echo " Local file size: $local_size" 28 | if [[ "$local_size" -eq "$remote_size" ]] 29 | then 30 | echo " Local and remote file sizes match: not downloading" 31 | exit 0 32 | else 33 | echo " Trying to resume partial download" 34 | if curl -f -C - -o "$local_path" "$url" 35 | then 36 | echo " Download completed successfully" 37 | exit 0 38 | else 39 | echo " Could not resume" 40 | fi 41 | fi 42 | fi 43 | 44 | echo " Downloading the whole file" 45 | curl -f -o "$local_path" "$url" 46 | exit $? 47 | -------------------------------------------------------------------------------- /utils/import-googlenet.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # brief: Import various CNN models from the web 3 | # author: Karel Lenc and Andrea Vedaldi 4 | 5 | # Models are written to /data/models 6 | # You can delete /data/tmp after conversion 7 | 8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?) 9 | 10 | overwrite=yes 11 | 12 | CAFFE_URL=http://dl.caffe.berkeleyvision.org/ 13 | GOOGLENET_PROTO_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/train_val_googlenet.prototxt 14 | GOOGLENET_MODEL_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_googlenet.caffemodel 15 | GOOGLENET_MEAN_URL=http://vision.princeton.edu/pvt/GoogLeNet/ImageNet/imagenet_mean.binaryproto 16 | 17 | # Obtain the path of this script 18 | pushd `dirname $0` > /dev/null 19 | SCRIPTPATH=`pwd` 20 | popd > /dev/null 21 | 22 | #converter="python -m pdb $SCRIPTPATH/import-caffe.py" 23 | converter="python $SCRIPTPATH/import-caffe.py" 24 | data="$SCRIPTPATH/../data/models-import" 25 | 26 | mkdir -pv "$data/tmp/googlenet" 27 | 28 | function get() 29 | { 30 | "$SCRIPTPATH/get-file.sh" "$data/tmp/googlenet" "$1" 31 | } 32 | 33 | # -------------------------------------------------------------------- 34 | # GoogLeNet 35 | # -------------------------------------------------------------------- 36 | 37 | get "$CAFFE_URL/caffe_ilsvrc12.tar.gz" 38 | (cd "$data/tmp/googlenet" ; tar xzvf caffe_ilsvrc12.tar.gz) 39 | 40 | get "$GOOGLENET_PROTO_URL" 41 | get "$GOOGLENET_MODEL_URL" 42 | get "$GOOGLENET_MEAN_URL" 43 | 44 | ( 45 | cd "$data/tmp/googlenet" ; 46 | cp -v train_val_googlenet.prototxt train_val_googlenet_patched.prototxt 47 | patch -Np0 < "$SCRIPTPATH/proto/googlenet_prototxt_patch.diff" 48 | ) 49 | 50 | base="$data/tmp/googlenet" 51 | out="$data/imagenet-googlenet-dag.mat" 52 | 53 | if test -f "$out" -a -z "$overwrite" 54 | then 55 | echo "$out exists; skipping." 56 | else 57 | $converter \ 58 | --caffe-variant=caffe_0115 \ 59 | --preproc=vgg-caffe \ 60 | --remove-dropout \ 61 | --remove-loss \ 62 | --append-softmax="cls3_fc" \ 63 | --average-image="$base/imagenet_mean.binaryproto" \ 64 | --synsets="$base/synset_words.txt" \ 65 | --caffe-data="$base/imagenet_googlenet.caffemodel" \ 66 | "$base/train_val_googlenet_patched.prototxt" \ 67 | "$out" 68 | fi 69 | -------------------------------------------------------------------------------- /utils/import-resnet.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # brief: Import various CNN models from the web 3 | # author: Karel Lenc and Andrea Vedaldi 4 | 5 | # Models are written to /data/models 6 | # You can delete /data/tmp after conversion 7 | 8 | # TODO apply patch to prototxt which will resize the outputs of cls layers from 205 -> 1000 (maybe sed?) 9 | 10 | overwrite=yes 11 | 12 | CAFFE_URL=http://dl.caffe.berkeleyvision.org/ 13 | RESNET_URL=http://research.microsoft.com/en-us/um/people/kahe/resnet/models.zip 14 | 15 | # Obtain the path of this script 16 | pushd `dirname $0` > /dev/null 17 | SCRIPTPATH=`pwd` 18 | popd > /dev/null 19 | 20 | converter="python $SCRIPTPATH/import-caffe.py" 21 | data="$SCRIPTPATH/../data/models-import" 22 | 23 | mkdir -pv "$data/tmp/resnet" 24 | 25 | function get() 26 | { 27 | "$SCRIPTPATH/get-file.sh" "$data/tmp/resnet" "$1" 28 | } 29 | 30 | # -------------------------------------------------------------------- 31 | # Resnet 32 | # -------------------------------------------------------------------- 33 | 34 | get "$CAFFE_URL/caffe_ilsvrc12.tar.gz" 35 | (cd "$data/tmp/resnet" ; tar xzvf caffe_ilsvrc12.tar.gz) 36 | 37 | get "$RESNET_URL" 38 | (cd "$data/tmp/resnet" ; unzip -n models.zip) 39 | 40 | for t in 50 101 152 41 | do 42 | base="$data/tmp/resnet" 43 | out="$data/imagenet-resnet-$t-dag.mat" 44 | cdata=--caffe-data="$base/ResNet-$t-model.caffemodel" 45 | 46 | if test -f "$out" -a -z "$overwrite" 47 | then 48 | echo "$out exists; skipping." 49 | else 50 | $converter \ 51 | --caffe-variant=caffe_b590f1d \ 52 | --preproc=vgg-caffe \ 53 | --remove-dropout \ 54 | --remove-loss \ 55 | --average-image="$base/ResNet_mean.binaryproto" \ 56 | --synsets="$base/synset_words.txt" \ 57 | $cdata \ 58 | "$base/ResNet-$t-deploy.prototxt" \ 59 | "$out" 60 | fi 61 | done 62 | -------------------------------------------------------------------------------- /utils/model2dot.m: -------------------------------------------------------------------------------- 1 | function model2dot(modelPath, outPath, varargin) 2 | %MODEL2DOT Convert a model to Graphviz dot 3 | % MODEL2DOT(MODEL_PATH, OUT_PATH) Generate a graphviz dot file OUT_PATH 4 | % of MatConvNet model MODEL_PATH. 5 | % 6 | % By default, the scripts attempts to guess the input sizes based on the 7 | % network normalization options and the parameter `batchSize`. However if 8 | % network has multiple inputs, the parameter `inputs` should be specified, 9 | % without that the output dot graph does not contain the variable sizes. 10 | % 11 | % MODEL2DOT(..., 'Option', value) takes the following options: 12 | % 13 | % `BatchSize`:: 256 14 | % Default batch size in case the input size guessed from net normalization. 15 | % 16 | % `inputs`:: [] 17 | % When specified, passed to `dagnn.DagNN.print` as inputs. 18 | 19 | % Copyright (C) 2015 Karel Lenc. 20 | % All rights reserved. 21 | % 22 | % This file is part of the VLFeat library and is made available under 23 | % the terms of the BSD license (see the COPYING file). 24 | run(fullfile(fileparts(fileparts(mfilename('fullpath'))), 'matlab', 'vl_setupnn.m')); 25 | 26 | opts.batchSize = 256; 27 | opts.inputs = []; 28 | opts = vl_argparse(opts, varargin); 29 | 30 | if ~exist(modelPath, 'file') 31 | error('Model %s does not exist.', modelPath); 32 | end 33 | fprintf('Loading %s.\n', modelPath); 34 | obj = load(modelPath); 35 | 36 | if isstruct(obj.layers) % DAGnn format 37 | net = dagnn.DagNN.loadobj(obj); 38 | elseif iscell(obj.layers) 39 | net = dagnn.DagNN.fromSimpleNN(obj); 40 | else 41 | error('Invalid model.'); 42 | end 43 | 44 | inputs = opts.inputs; 45 | inputNames = net.getInputs(); 46 | if isempty(inputs) && numel(inputNames) == 1 ... 47 | && isfield(obj, 'meta') && isfield(obj.meta, 'normalization') ... 48 | && isfield(obj.meta.normalization, 'imageSize') 49 | inputSize = [obj.meta.normalization.imageSize(1:3), opts.batchSize]; 50 | fprintf('Input %s guessed to be: %s.\n', inputNames{1}, mat2str(inputSize)); 51 | inputs = {inputNames{1}, inputSize}; 52 | end 53 | 54 | if isempty(inputs) 55 | warning('Input sizes not specified.'); 56 | dot_c = net.print('format', 'dot'); 57 | else 58 | dot_c = net.print(inputs, 'format', 'dot'); 59 | end 60 | 61 | out_f = fopen(outPath, 'w'); 62 | if out_f == -1, error('Unable to open %s.', outPath); end; 63 | fprintf(out_f, dot_c); 64 | fclose(out_f); 65 | fprintf('Model %s exported to %s.\n', modelPath, outPath); 66 | -------------------------------------------------------------------------------- /utils/proto/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XiaoxiaSun/supervised-deep-sparse-coding-networks/cd87093d26f1dff522c544d812118792855d3986/utils/proto/__init__.py -------------------------------------------------------------------------------- /utils/proto/get-protos.sh: -------------------------------------------------------------------------------- 1 | # FCN 2 | wget -nc "https://raw.githubusercontent.com/longjon/caffe/6e3916766c6b63bff07e2cfadf210ee5e46af807/src/caffe/proto/caffe.proto" --output-document=./caffe_6e3916.proto 3 | protoc ./caffe_6e3916.proto --python_out=./ 4 | 5 | # b590f1d (ResNet) 6 | wget -nc "https://raw.githubusercontent.com/BVLC/caffe/b590f1d27eb5cbd9bc7b9157d447706407c68682/src/caffe/proto/caffe.proto" --output-document=./caffe_b590f1d.proto 7 | protoc ./caffe_b590f1d.proto --python_out=./ 8 | -------------------------------------------------------------------------------- /utils/test_examples.m: -------------------------------------------------------------------------------- 1 | function test_examples() 2 | %TEST_EXAMPLES Test some of the examples in the `examples/` directory 3 | 4 | addpath examples/mnist ; 5 | addpath examples/cifar ; 6 | 7 | trainOpts.gpus = [] ; 8 | trainOpts.continue = true ; 9 | num = 1 ; 10 | 11 | exps = {} ; 12 | for networkType = {'dagnn', 'simplenn'} 13 | for index = 1:4 14 | clear ex ; 15 | ex.trainOpts = trainOpts ; 16 | ex.networkType = char(networkType) ; 17 | ex.index = index ; 18 | exps{end+1} = ex ; 19 | end 20 | end 21 | 22 | if num > 1 23 | if isempty(gcp('nocreate')), 24 | parpool('local',num) ; 25 | end 26 | parfor e = 1:numel(exps) 27 | test_one(exps{e}) ; 28 | end 29 | else 30 | for e = 1:numel(exps) 31 | test_one(exps{e}) ; 32 | end 33 | end 34 | 35 | % ------------------------------------------------------------------------ 36 | function test_one(ex) 37 | % ------------------------------------------------------------------------- 38 | 39 | suffix = ['-' ex.networkType] ; 40 | switch ex.index 41 | case 1 42 | cnn_mnist(... 43 | 'expDir', ['data/test-mnist' suffix], ... 44 | 'batchNormalization', false, ... 45 | 'networkType', ex.networkType, ... 46 | 'train', ex.trainOpts) ; 47 | 48 | case 2 49 | cnn_mnist(... 50 | 'expDir', ['data/test-mnist-bnorm' suffix], ... 51 | 'batchNormalization', true, ... 52 | 'networkType', ex.networkType, ... 53 | 'train', ex.trainOpts) ; 54 | 55 | case 3 56 | cnn_cifar(... 57 | 'expDir', ['data/test-cifar-lenet' suffix], ... 58 | 'modelType', 'lenet', ... 59 | 'networkType', ex.networkType, ... 60 | 'train', ex.trainOpts) ; 61 | 62 | case 4 63 | cnn_cifar(... 64 | 'expDir', ['data/test-cifar-nin' suffix], ... 65 | 'modelType', 'nin', ... 66 | 'networkType', ex.networkType, ... 67 | 'train', ex.trainOpts) ; 68 | end 69 | -------------------------------------------------------------------------------- /utils/tidy_ref_models.m: -------------------------------------------------------------------------------- 1 | function tidy_ref_models() 2 | % Update reference models to latest MatConvNet version 3 | 4 | run(fullfile(fileparts(mfilename('fullpath')), '..', 'matlab', 'vl_setupnn.m')) ; 5 | 6 | models = {... 7 | 'imagenet-resnet-152-dag', ... 8 | 'imagenet-resnet-101-dag', ... 9 | 'imagenet-resnet-50-dag', ... 10 | 'imagenet-matconvnet-alex', ... 11 | 'imagenet-matconvnet-vgg-f', ... 12 | 'imagenet-matconvnet-vgg-m', ... 13 | 'imagenet-matconvnet-vgg-m', ... 14 | 'imagenet-matconvnet-vgg-s', ... 15 | 'imagenet-matconvnet-vgg-verydeep-16', ... 16 | 'imagenet-caffe-ref', ... 17 | 'imagenet-caffe-alex', ... 18 | 'imagenet-vgg-s', ... 19 | 'imagenet-vgg-m', ... 20 | 'imagenet-vgg-f', ... 21 | 'imagenet-vgg-m-128', ... 22 | 'imagenet-vgg-m-1024', ... 23 | 'imagenet-vgg-m-2048', ... 24 | 'imagenet-vgg-verydeep-19', ... 25 | 'imagenet-vgg-verydeep-16', ... 26 | 'imagenet-googlenet-dag', ... 27 | 'pascal-fcn16s-dag', ... 28 | 'pascal-fcn32s-dag', ... 29 | 'pascal-fcn8s-dag', ... 30 | 'pascal-fcn8s-tvg-dag', ... 31 | 'vgg-face', ... 32 | } ; 33 | 34 | mkdir(fullfile('data', 'models')) ; 35 | 36 | for i = 1:numel(models) 37 | inPath = fullfile('data', 'models-import', [models{i} '.mat']) ; 38 | outPath = fullfile('data', 'models', [models{i} '.mat']) ; 39 | if exist(outPath), continue ; end 40 | 41 | fprintf('%s: loading ''%s''\n', mfilename, inPath) ; 42 | net = load(inPath) ; 43 | % Cannot use isa('dagnn.DagNN') because it is not an object yet 44 | isDag = isfield(net, 'params') ; 45 | 46 | if isDag 47 | net = dagnn.DagNN.loadobj(net) ; 48 | net = net.saveobj() ; 49 | else 50 | net = vl_simplenn_tidy(net) ; 51 | end 52 | 53 | fprintf('%s: saving ''%s''\n', mfilename, outPath) ; 54 | save(fullfile('data', 'models', [models{i} '.mat']), '-struct', 'net') ; 55 | end 56 | --------------------------------------------------------------------------------