├── GM-SOP_arch.png
├── README.md
├── examples
├── GM
│ ├── cnn_imagenet64.m
│ ├── cnn_imagenet64_init_resnet.m
│ ├── cnn_imagenet_init_resnet.m
│ ├── cnn_init_WRN_GM.m
│ ├── cnn_init_WRN_baseline.m
│ ├── cnn_init_resnet_GM.m
│ ├── cnn_init_resnet_baseline.m
│ ├── dDiag.m
│ ├── diagInv.m
│ ├── diag_fun.m
│ ├── diag_fun_deri.m
│ ├── getImageBatch.m
│ ├── getImageStats.m
│ ├── imdb.mat
│ ├── symmetric.m
│ └── xmode2vector.m
└── cnn_train_GM_dag.m
└── matlab
├── +dagnn
├── @DagNN
│ ├── DagNN.m
│ ├── ExecutionOrder_GM.m
│ ├── addLayer.m
│ ├── eval.m
│ ├── fromSimpleNN.m
│ ├── getVarReceptiveFields.m
│ ├── getVarSizes.m
│ ├── initParams.m
│ ├── loadobj.m
│ ├── move.m
│ ├── print.m
│ ├── rebuild.m
│ ├── removeLayer.m
│ ├── renameLayer.m
│ ├── renameParam.m
│ ├── renameVar.m
│ ├── reset.m
│ ├── saveobj.m
│ ├── setLayerInputs.m
│ ├── setLayerOutputs.m
│ └── setLayerParams.m
├── AffineGridGenerator.m
├── Balance_loss.m
├── BatchNorm.m
├── BilinearSampler.m
├── CM_out.m
├── Concat.m
├── Conv.m
├── ConvTranspose.m
├── Crop.m
├── DropOut.m
├── ElementWise.m
├── Filter.m
├── H_x.m
├── LRN.m
├── Layer.m
├── Layer_old.m
├── Loss.m
├── MPN_COV_Pool.m
├── MPN_COV_Pool_C.m
├── NormOffset.m
├── OBJ_ConvNet_COV_Pool.m
├── OBJ_ConvNet_COV_ScaleTr.m
├── OBJ_ConvNet_Cov_Sqrtm.m
├── OBJ_ConvNet_Cov_TraceNorm.m
├── PDist.m
├── Pooling.m
├── ROIPooling.m
├── ReLU.m
├── Scale.m
├── Sigmoid.m
├── SoftMax.m
├── SpatialNorm.m
├── Sum.m
├── UniformScalingGridGenerator.m
└── gating.m
├── EIG.m
├── ParameterServer.m
├── compatibility
└── parallel
│ ├── gather.m
│ ├── labindex.m
│ └── numlabs.m
├── functionSignatures.json
├── simplenn
├── vl_simplenn.m
├── vl_simplenn_diagnose.m
├── vl_simplenn_display.m
├── vl_simplenn_move.m
├── vl_simplenn_start_parserv.m
└── vl_simplenn_tidy.m
├── src
├── bits
│ ├── config
│ │ ├── mex_CUDA_glnxa64.sh
│ │ ├── mex_CUDA_glnxa64.xml
│ │ ├── mex_CUDA_maci64.sh
│ │ └── mex_CUDA_maci64.xml
│ ├── data.cpp
│ ├── data.cu
│ ├── data.hpp
│ ├── datacu.cu
│ ├── datacu.hpp
│ ├── datamex.cpp
│ ├── datamex.cu
│ ├── datamex.hpp
│ ├── impl
│ │ ├── bilinearsampler.hpp
│ │ ├── bilinearsampler_cpu.cpp
│ │ ├── bilinearsampler_gpu.cu
│ │ ├── blashelper.hpp
│ │ ├── blashelper_cpu.hpp
│ │ ├── blashelper_gpu.hpp
│ │ ├── bnorm.hpp
│ │ ├── bnorm_cpu.cpp
│ │ ├── bnorm_gpu.cu
│ │ ├── compat.h
│ │ ├── copy.hpp
│ │ ├── copy_cpu.cpp
│ │ ├── copy_gpu.cu
│ │ ├── cov_pool_cpu.cpp
│ │ ├── cov_pool_gpu.cu
│ │ ├── cov_sqrtm_cpu.cpp
│ │ ├── cov_sqrtm_gpu.cu
│ │ ├── cov_traceNorm_cpu.cpp
│ │ ├── cov_traceNorm_gpu.cu
│ │ ├── cudnnhelper.hpp
│ │ ├── dispatcher.hpp
│ │ ├── fast_mutex.h
│ │ ├── im2row.hpp
│ │ ├── im2row_cpu.cpp
│ │ ├── im2row_gpu.cu
│ │ ├── imread_gdiplus.cpp
│ │ ├── imread_helpers.hpp
│ │ ├── imread_libjpeg.cpp
│ │ ├── imread_quartz.cpp
│ │ ├── imsn_cov_cpu.cpp
│ │ ├── imsn_cov_gpu.cu
│ │ ├── mpn_cov_cpu.cpp
│ │ ├── mpn_cov_gpu.cu
│ │ ├── nnbias_blas.hpp
│ │ ├── nnbias_cudnn.cu
│ │ ├── nnbias_cudnn.hpp
│ │ ├── nnbilinearsampler_cudnn.cu
│ │ ├── nnbilinearsampler_cudnn.hpp
│ │ ├── nnbnorm_cudnn.cu
│ │ ├── nnbnorm_cudnn.hpp
│ │ ├── nnconv_blas.hpp
│ │ ├── nnconv_cudnn.cu
│ │ ├── nnconv_cudnn.hpp
│ │ ├── nncov_pool_blas.hpp
│ │ ├── nncov_sqrtm_blas.hpp
│ │ ├── nncov_traceNorm_blas.hpp
│ │ ├── nnimsn_cov_blas.hpp
│ │ ├── nnmpn_cov_blas.hpp
│ │ ├── nnpooling_cudnn.cu
│ │ ├── nnpooling_cudnn.hpp
│ │ ├── normalize.hpp
│ │ ├── normalize_cpu.cpp
│ │ ├── normalize_gpu.cu
│ │ ├── pooling.hpp
│ │ ├── pooling_cpu.cpp
│ │ ├── pooling_gpu.cu
│ │ ├── sharedmem.cuh
│ │ ├── subsample.hpp
│ │ ├── subsample_cpu.cpp
│ │ ├── subsample_gpu.cu
│ │ ├── tinythread.cpp
│ │ └── tinythread.h
│ ├── imread.cpp
│ ├── imread.hpp
│ ├── mexutils.h
│ ├── nnbias.cpp
│ ├── nnbias.cu
│ ├── nnbias.hpp
│ ├── nnbias_cudnn.cu
│ ├── nnbilinearsampler.cpp
│ ├── nnbilinearsampler.cu
│ ├── nnbilinearsampler.hpp
│ ├── nnbilinearsampler_cudnn.cu
│ ├── nnbilinearsampler_gpu.cu
│ ├── nnbnorm.cpp
│ ├── nnbnorm.cu
│ ├── nnbnorm.hpp
│ ├── nnbnorm_cudnn.cu
│ ├── nnbnorm_gpu.cu
│ ├── nnconv.cpp
│ ├── nnconv.cu
│ ├── nnconv.hpp
│ ├── nnconv_blas.hpp
│ ├── nnconv_cudnn.cu
│ ├── nnconv_cudnn.hpp
│ ├── nncov_pool.cpp
│ ├── nncov_pool.cu
│ ├── nncov_pool.hpp
│ ├── nncov_sqrtm.cpp
│ ├── nncov_sqrtm.cu
│ ├── nncov_sqrtm.hpp
│ ├── nncov_traceNorm.cpp
│ ├── nncov_traceNorm.cu
│ ├── nncov_traceNorm.hpp
│ ├── nnfullyconnected.cpp
│ ├── nnfullyconnected.cu
│ ├── nnfullyconnected.hpp
│ ├── nnimsn_cov.cpp
│ ├── nnimsn_cov.cu
│ ├── nnimsn_cov.hpp
│ ├── nnmpn_cov.cpp
│ ├── nnmpn_cov.cu
│ ├── nnmpn_cov.hpp
│ ├── nnnormalize.cpp
│ ├── nnnormalize.cu
│ ├── nnnormalize.hpp
│ ├── nnnormalize_gpu.cu
│ ├── nnnormalizelp.cpp
│ ├── nnnormalizelp.cu
│ ├── nnnormalizelp.hpp
│ ├── nnnormalizelp_gpu.cu
│ ├── nnpooling.cpp
│ ├── nnpooling.cu
│ ├── nnpooling.hpp
│ ├── nnpooling_cudnn.cu
│ ├── nnpooling_gpu.cu
│ ├── nnroipooling.cpp
│ ├── nnroipooling.cu
│ ├── nnroipooling.hpp
│ ├── nnroipooling_gpu.cu
│ ├── nnsubsample.cpp
│ ├── nnsubsample.cu
│ ├── nnsubsample.hpp
│ └── nnsubsample_gpu.cu
├── config
│ ├── mex_CUDA_glnxa64.sh
│ ├── mex_CUDA_glnxa64.xml
│ ├── mex_CUDA_maci64.sh
│ └── mex_CUDA_maci64.xml
├── vl_cudatool.cpp
├── vl_cudatool.cu
├── vl_imreadjpeg.cpp
├── vl_imreadjpeg.cu
├── vl_imreadjpeg_old.cpp
├── vl_imreadjpeg_old.cu
├── vl_nnbilinearsampler.cpp
├── vl_nnbilinearsampler.cu
├── vl_nnbnorm.cpp
├── vl_nnbnorm.cu
├── vl_nnconv.cpp
├── vl_nnconv.cu
├── vl_nnconvt.cpp
├── vl_nnconvt.cu
├── vl_nncov_pool.cpp
├── vl_nncov_pool.cu
├── vl_nncov_sqrtm.cpp
├── vl_nncov_sqrtm.cu
├── vl_nncov_traceNorm.cpp
├── vl_nncov_traceNorm.cu
├── vl_nnimsn_cov.cpp
├── vl_nnimsn_cov.cu
├── vl_nnmpn_cov.cpp
├── vl_nnmpn_cov.cu
├── vl_nnnormalize.cpp
├── vl_nnnormalize.cu
├── vl_nnnormalizelp.cpp
├── vl_nnnormalizelp.cu
├── vl_nnpool.cpp
├── vl_nnpool.cu
├── vl_nnroipool.cpp
├── vl_nnroipool.cu
├── vl_taccummex.cpp
├── vl_taccummex.cu
├── vl_tmove.cpp
└── vl_tmove.cu
├── vl_argparse.m
├── vl_compilenn.m
├── vl_compilenn_beta22_isqrt.m
├── vl_compilenn_beta25_old.m
├── vl_contrib.m
├── vl_imreadjpeg.m
├── vl_nnbilinearsampler.m
├── vl_nnbnorm.m
├── vl_nnconcat.m
├── vl_nnconv.m
├── vl_nnconvt.m
├── vl_nncrop.m
├── vl_nndropout.m
├── vl_nnloss.m
├── vl_nnnoffset.m
├── vl_nnnormalize.m
├── vl_nnnormalizelp.m
├── vl_nnnotfound.m
├── vl_nnpdist.m
├── vl_nnpool.m
├── vl_nnrelu.m
├── vl_nnroipool.m
├── vl_nnsigmoid.m
├── vl_nnsoftmax.m
├── vl_nnsoftmaxloss.m
├── vl_nnspnorm.m
├── vl_rootnn.m
├── vl_setupnn.m
├── vl_taccum.m
├── vl_tmove.m
├── vl_tshow.m
└── xtest
├── cmyk.jpg
├── suite
├── Scale.m
├── nnbilinearsampler.m
├── nnbnorm.m
├── nnconcat.m
├── nnconv.m
├── nnconvt.m
├── nndagnn.m
├── nndropout.m
├── nnloss.m
├── nnmnist.m
├── nnnormalize.m
├── nnnormalizelp.m
├── nnoffset.m
├── nnpdist.m
├── nnpool.m
├── nnrelu.m
├── nnroipool.m
├── nnsigmoid.m
├── nnsimplenn.m
├── nnsoftmax.m
├── nnsoftmaxloss.m
├── nnsolvers.m
├── nnspnorm.m
├── nntest.m
└── tmovemex.m
├── vl_bench_bnorm.m
├── vl_bench_imreadjpeg.m
├── vl_nnbnorm_old.m
├── vl_nnnormalizelp_old.m
├── vl_test_bnorm.m
├── vl_test_economic_relu.m
├── vl_test_gpureset.m
├── vl_test_imreadjpeg.m
├── vl_test_print.m
└── vl_testnn.m
/GM-SOP_arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZilinGao/GM-SOP/5922792067a4d6e5ca057b5429a221f2ea101a19/GM-SOP_arch.png
--------------------------------------------------------------------------------
/examples/GM/dDiag.m:
--------------------------------------------------------------------------------
1 | function M = dDiag(M)
2 | % double diag function i.e. return a matrix with all the elements except
3 | % for the diagonal
4 |
5 | if isa(M,'gpuArray')
6 | I = eye(size(M),'single','gpuArray');
7 | else
8 | I = eye(size(M), class(M));
9 | end
10 |
11 | M = I .* M;
--------------------------------------------------------------------------------
/examples/GM/diagInv.m:
--------------------------------------------------------------------------------
1 | function invX = diagInv(X)
2 | % compute the inverse of a diagonal matrix
3 | % no verification for speed
4 | % FIXME add some checks for zeros on the diagonal
5 |
6 | diagX = diag(X);
7 | invX = diag(1./diagX);
8 | end
--------------------------------------------------------------------------------
/examples/GM/diag_fun.m:
--------------------------------------------------------------------------------
1 | % function out_S = diag_fun(S, epsilon, type, par) %lph 2016/11/11 5:50
2 | function out_S = diag_fun(S, opts)
3 |
4 | switch opts.regu_method
5 | case {'power', 'power+1st_order', 'AF', 'power+mat-fro'}
6 | if isfield(opts, 'alpha')
7 | out_S = (S + opts.epsilon) .^ opts.alpha;
8 | else
9 | out_S = sqrt(S + opts.epsilon);
10 | end
11 |
12 | case {'power+mat-l2', 'mat-l2'}
13 | out_S = (S + opts.epsilon) .^ opts.alpha;
14 | out_S = out_S / out_S(1);
15 |
16 | case {'log'}
17 | % out_S = log(S+epsilon);
18 | out_S = log(S + opts.epsilon);
19 |
20 | case {'Burg'}
21 | k = length(S);
22 | beta = (1 - opts.alpha) / (2 * opts.alpha);
23 | P = exp(opts.weights{1}(1:k));
24 | out_S = sqrt(beta ^ 2 .* P .^ 2 + P .* S + opts.epsilon) - beta .* P;
25 |
26 | otherwise
27 | error('diagonal matrix function not supported!');
28 | end
29 | end
--------------------------------------------------------------------------------
/examples/GM/diag_fun_deri.m:
--------------------------------------------------------------------------------
1 | % function out_S = diag_fun_deri(S, epsilon, type, par) %lph 2016/11/11 5:50
2 | function out_S = diag_fun_deri(S, opts, wrt_which)
3 |
4 | switch opts.regu_method
5 |
6 | case {'power', 'power+1st_order', 'AF', 'power+mat-fro'}
7 | if isfield(opts, 'alpha')
8 | out_S = opts.alpha .* ((S + opts.epsilon) .^ (opts.alpha - 1));
9 | else
10 | out_S = 0.5 .* ( 1 ./ sqrt(S + opts.epsilon) );
11 | end
12 |
13 | case {'power+mat-l2', 'mat-l2'} % cannot be here
14 | error('Should not be here !');
15 |
16 | case {'log'}
17 | % out_S = 1 ./ (S + epsilon);
18 | out_S = 1 ./ (S + opts.epsilon);
19 |
20 | case {'Burg'}
21 | k = length(S);
22 | beta = (1 - opts.alpha) / (2 * opts.alpha);
23 | P = exp(opts.weights{1}(1:k));
24 | if strcmp(wrt_which, 'wrt_S')
25 | out_S = 0.5 * P ./ sqrt(beta^2 .* P .^ 2 + P .* S + opts.epsilon);
26 | else
27 | out_S = 0.5 * (2 * beta^2 .* P .^ 2 + P .* S) ./ sqrt(beta^2 .* P .^ 2 + P .* S + opts.epsilon) - beta .* P;
28 | end
29 |
30 | otherwise
31 | error('derivative of diagonal matrix function not supported!');
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/examples/GM/getImageBatch.m:
--------------------------------------------------------------------------------
1 | function data = getImageBatch(imagePaths, varargin)
2 | % GETIMAGEBATCH Load and jitter a batch of images
3 |
4 | opts.useGpu = false ;
5 | opts.prefetch = false ;
6 | opts.numThreads = 1 ;
7 |
8 | opts.imageSize = [227, 227] ;
9 | opts.cropSize = 227 / 256 ;
10 | opts.keepAspect = true ;
11 | opts.subtractAverage = [] ;
12 |
13 | opts.jitterFlip = false ;
14 | opts.jitterLocation = false ;
15 | opts.jitterAspect = 1 ;
16 | opts.jitterScale = 1 ;
17 | opts.jitterBrightness = 0 ;
18 | opts.jitterContrast = 0 ;
19 | opts.jitterSaturation = 0 ;
20 |
21 | opts = vl_argparse(opts, varargin);
22 |
23 | args{1} = {imagePaths, ...
24 | 'NumThreads', opts.numThreads, ...
25 | 'Pack', ...
26 | 'Interpolation', 'bicubic', ...
27 | 'Resize', opts.imageSize(1:2), ...
28 | 'CropSize', opts.cropSize * opts.jitterScale, ...
29 | 'CropAnisotropy', opts.jitterAspect, ...
30 | 'Brightness', opts.jitterBrightness, ...
31 | 'Contrast', opts.jitterContrast, ...
32 | 'Saturation', opts.jitterSaturation} ;
33 |
34 | if ~opts.keepAspect
35 | % Squashign effect
36 | args{end+1} = {'CropAnisotropy', 0} ;
37 | end
38 |
39 | if opts.jitterFlip
40 | args{end+1} = {'Flip'} ;
41 | end
42 |
43 | if opts.jitterLocation
44 | args{end+1} = {'CropLocation', 'random'} ;
45 | else
46 | args{end+1} = {'CropLocation', 'center'} ;
47 | end
48 |
49 | if opts.useGpu
50 | args{end+1} = {'Gpu'} ;
51 | end
52 |
53 | if ~isempty(opts.subtractAverage)
54 | args{end+1} = {'SubtractAverage', opts.subtractAverage} ;
55 | end
56 |
57 | args = horzcat(args{:}) ;
58 |
59 | if opts.prefetch
60 | vl_imreadjpeg(args{:}, 'prefetch') ;
61 | data = [] ;
62 | else
63 | data = vl_imreadjpeg(args{:}) ;
64 | data = data{1} ;
65 | end
66 |
--------------------------------------------------------------------------------
/examples/GM/getImageStats.m:
--------------------------------------------------------------------------------
1 | function [averageImage, rgbMean, rgbCovariance] = getImageStats(images, varargin)
2 | %GETIMAGESTATS Get image statistics
3 |
4 | opts.gpus = [] ;
5 | opts.batchSize = 256 ;
6 | opts.imageSize = [256 256] ;
7 | opts.numThreads = 6 ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | avg = {} ;
11 | rgbm1 = {} ;
12 | rgbm2 = {} ;
13 |
14 | numGpus = numel(opts.gpus) ;
15 | if numGpus > 0
16 | fprintf('%s: resetting GPU device\n', mfilename) ;
17 | clear mex ;
18 | gpuDevice(opts.gpus(1))
19 | end
20 |
21 | for t=1:opts.batchSize:numel(images)
22 | time = tic ;
23 | batch = t : min(t+opts.batchSize-1, numel(images)) ;
24 | fprintf('collecting image stats: batch starting with image %d ...', batch(1)) ;
25 |
26 | data = getImageBatch(images(batch), ...
27 | 'numThreads', opts.numThreads, ...
28 | 'imageSize', opts.imageSize, ...
29 | 'useGpu', numGpus > 0) ;
30 |
31 | z = reshape(shiftdim(data,2),3,[]) ;
32 | rgbm1{end+1} = mean(z,2) ;
33 | rgbm2{end+1} = z*z'/size(z,2) ;
34 | avg{end+1} = mean(data, 4) ;
35 | time = toc(time) ;
36 | fprintf(' %.1f Hz\n', numel(batch) / time) ;
37 | end
38 |
39 | averageImage = gather(mean(cat(4,avg{:}),4)) ;
40 | rgbm1 = gather(mean(cat(2,rgbm1{:}),2)) ;
41 | rgbm2 = gather(mean(cat(3,rgbm2{:}),3)) ;
42 | rgbMean = rgbm1 ;
43 | rgbCovariance = rgbm2 - rgbm1*rgbm1' ;
44 |
45 | if numGpus > 0
46 | fprintf('%s: finished with GPU device, resetting again\n', mfilename) ;
47 | clear mex ;
48 | gpuDevice(opts.gpus(1)) ;
49 | end
50 | fprintf('%s: all done\n', mfilename) ;
51 |
--------------------------------------------------------------------------------
/examples/GM/imdb.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZilinGao/GM-SOP/5922792067a4d6e5ca057b5429a221f2ea101a19/examples/GM/imdb.mat
--------------------------------------------------------------------------------
/examples/GM/symmetric.m:
--------------------------------------------------------------------------------
1 | function a_sym = symmetric(a)
2 | if size(a,1) ~= size(a,2)
3 | error('symmetric demanding a square matrix')
4 | end
5 | a_sym = 0.5* (a + a');
--------------------------------------------------------------------------------
/examples/GM/xmode2vector.m:
--------------------------------------------------------------------------------
1 | dim = 6;
2 | %different index in 3 dim
3 | index = nchoosek([1:dim] , 3);
4 | %1 or 2 index are same
5 | x= [1:dim * dim]';
6 | y = ceil(1./dim .* x);
7 | y(:,2) = y;
8 | r = ones(dim,2);
9 | pp = mod(x , dim);
10 | y(:,3) = (pp == 0) .* dim + pp;
11 | index(end+1:end+size(y,1) , :) = y ;
12 | index_mode =
13 |
14 | data = rand(9,9,9)*100;
15 | get = data(index);
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/ExecutionOrder_GM.m:
--------------------------------------------------------------------------------
1 | function ExecutionOrder_GM(obj, varargin)
2 | %rerank executionOrder for the multi-branch module
3 | opts.sectionLen = 2;
4 | opts = vl_argparse(opts, varargin) ;
5 |
6 | b5 = obj.getLayerIndex(['conv5_' num2str(opts.sectionLen) '_relu']);
7 | if isnan(b5)
8 | error('please check the number of last layer before gating module')
9 | end
10 | obj.executionOrder(b5+1:end) = [b5+1 : numel(obj.executionOrder) ];
11 |
12 |
13 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/addLayer.m:
--------------------------------------------------------------------------------
1 | function addLayer(obj, name, block, inputs, outputs, params, varargin)
2 | %ADDLAYER Adds a layer to a DagNN
3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the
4 | % specified layer to the network. NAME is a string with the layer
5 | % name, used as a unique indentifier. BLOCK is the object
6 | % implementing the layer, which should be a subclass of the
7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and
8 | % PARAMS of parameter names.
9 | %
10 | % See Also REMOVELAYER().
11 | opts.skipRebuild = false;
12 | opts = vl_argparse(opts, varargin);
13 |
14 | index = find(strcmp(name, {obj.layers.name})) ;
15 | if ~isempty(index), error('There is already a layer with name ''%s''.', name), end
16 | index = numel(obj.layers) + 1 ;
17 |
18 | if nargin < 6, params = {} ; end
19 | if ischar(inputs), inputs = {inputs} ; end
20 | if ischar(outputs), outputs = {outputs} ; end
21 | if ischar(params), params = {params} ; end
22 |
23 | obj.layers(index) = struct(...
24 | 'name', {name}, ...
25 | 'inputs', {inputs}, ...
26 | 'outputs', {outputs}, ...
27 | 'params', {params}, ...
28 | 'inputIndexes', {[]}, ...
29 | 'outputIndexes', {[]}, ...
30 | 'paramIndexes', {[]}, ...
31 | 'forwardTime', {[]}, ...
32 | 'backwardTime', {[]}, ...
33 | 'block', {block}) ;
34 | obj.layers(index).block.attach(obj, index) ;
35 | if ~opts.skipRebuild, obj.rebuild() ; end ;
36 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/getVarSizes.m:
--------------------------------------------------------------------------------
1 | function sizes = getVarSizes(obj, inputSizes)
2 | %GETVARSIZES Get the size of the variables
3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the
4 | % DagNN variables given the size of the inputs. `inputSizes` is
5 | % a cell array of the type `{'inputName', inputSize, ...}`
6 | % Returns a cell array with sizes of all network variables.
7 | %
8 | % Example, compute the storage needed for a batch size of 256 for an
9 | % imagenet-like network:
10 | % ```
11 | % batch_size = 256; single_num_bytes = 4;
12 | % input_size = [net.meta.normalization.imageSize, batch_size];
13 | % var_sizes = net.getVarSizes({'data', input_size});
14 | % fprintf('Network activations will take %.2fMiB in single.\n', ...
15 | % sum(prod(cell2mat(var_sizes, 1))) * single_num_bytes ./ 1024^3);
16 | % ```
17 |
18 | % Copyright (C) 2015 Andrea Vedaldi, Karel Lenc.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | nv = numel(obj.vars) ;
25 | sizes = num2cell(NaN(nv, 4),2)' ;
26 |
27 | for i = 1:2:numel(inputSizes)
28 | v = obj.getVarIndex(inputSizes{i}) ;
29 | if isnan(v)
30 | error('Variable `%s` not found in the network.', inputSizes{i});
31 | end;
32 | if isempty(inputSizes{i+1})
33 | sizes{v} = [0 0 0 0] ;
34 | else
35 | sizes{v} = [inputSizes{i+1}(:)' ones(1, 4 - numel(inputSizes{i+1}))] ;
36 | end
37 | end
38 |
39 | for layer = obj.layers(obj.executionOrder)
40 | in = layer.inputIndexes ;
41 | out = layer.outputIndexes ;
42 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ;
43 | end
44 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/initParams.m:
--------------------------------------------------------------------------------
1 | function initParams(obj)
2 | % INITPARAM Initialize the paramers of the DagNN
3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize
4 | % the corresponding parameters (usually randomly).
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | for l = 1:numel(obj.layers)
13 | p = obj.getParamIndex(obj.layers(l).params) ;
14 | params = obj.layers(l).block.initParams() ;
15 | switch obj.device
16 | case 'cpu'
17 | params = cellfun(@gather, params, 'UniformOutput', false) ;
18 | case 'gpu'
19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ;
20 | end
21 | [obj.params(p).value] = deal(params{:}) ;
22 | end
23 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/loadobj.m:
--------------------------------------------------------------------------------
1 | function obj = loadobj(s)
2 | % LOADOBJ Initialize a DagNN object from a structure.
3 | % OBJ = LOADOBJ(S) initializes a DagNN objet from the structure
4 | % S. It is the opposite of S = OBJ.SAVEOBJ().
5 | % If S is a string, initializes the DagNN object with data
6 | % from a mat-file S. Otherwise, if S is an instance of `dagnn.DagNN`,
7 | % returns S.
8 |
9 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | if ischar(s) s = load(s); end
16 | if isstruct(s)
17 | assert(isfield(s, 'layers'), 'Invalid model.');
18 | if ~isstruct(s.layers)
19 | warning('The model appears to be `simplenn` model. Using `fromSimpleNN` instead.');
20 | obj = dagnn.DagNN.fromSimpleNN(s);
21 | return;
22 | end
23 | obj = dagnn.DagNN() ;
24 | for l = 1:numel(s.layers)
25 | constr = str2func(s.layers(l).type) ;
26 | block = constr() ;
27 | block.load(struct(s.layers(l).block)) ;
28 | obj.addLayer(...
29 | s.layers(l).name, ...
30 | block, ...
31 | s.layers(l).inputs, ...
32 | s.layers(l).outputs, ...
33 | s.layers(l).params,...
34 | 'skipRebuild', true) ;
35 | end
36 | obj.rebuild();
37 | if isfield(s, 'params')
38 | for f = setdiff(fieldnames(s.params)','name')
39 | f = char(f) ;
40 | for i = 1:numel(s.params)
41 | p = obj.getParamIndex(s.params(i).name) ;
42 | obj.params(p).(f) = s.params(i).(f) ;
43 | end
44 | end
45 | end
46 | if isfield(s, 'vars')
47 | for f = setdiff(fieldnames(s.vars)','name')
48 | f = char(f) ;
49 | for i = 1:numel(s.vars)
50 | p = obj.getVarIndex(s.vars(i).name) ;
51 | obj.vars(p).(f) = s.vars(i).(f) ;
52 | end
53 | end
54 | end
55 | for f = setdiff(fieldnames(s)', {'vars','params','layers'})
56 | f = char(f) ;
57 | obj.(f) = s.(f) ;
58 | end
59 | elseif isa(s, 'dagnn.DagNN')
60 | obj = s ;
61 | else
62 | error('Unknown data type %s for `loadobj`.', class(s));
63 | end
64 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/move.m:
--------------------------------------------------------------------------------
1 | function move(obj, device)
2 | %MOVE Move the DagNN to either CPU or GPU
3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU.
4 | %
5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU.
6 |
7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | obj.reset() ;
14 | obj.device = device ;
15 | switch device
16 | case 'gpu'
17 | for i=1:numel(obj.params)
18 | obj.params(i).value = gpuArray(obj.params(i).value) ;
19 | end
20 | case 'cpu'
21 | for i=1:numel(obj.params)
22 | obj.params(i).value = gather(obj.params(i).value) ;
23 | end
24 | otherwise
25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ;
26 | end
27 | for l = 1:numel(obj.layers)
28 | obj.layers(l).block.move(device) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/removeLayer.m:
--------------------------------------------------------------------------------
1 | function removeLayer(obj, layerName)
2 | %REMOVELAYER Remove a layer from the network
3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object
4 | % OBJ. NAME can be a string or a cell array of strings.
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | if ischar(layerName), layerName = {layerName}; end;
13 | idxs = obj.getLayerIndex(layerName);
14 | if any(isnan(idxs))
15 | error('Invalid layer name `%s`', ...
16 | strjoin(layerName(isnan(idxs)), ', '));
17 | end
18 | obj.layers(idxs) = [] ;
19 | obj.rebuild() ;
20 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/renameLayer.m:
--------------------------------------------------------------------------------
1 | function renameLayer(obj, oldName, newName, varargin)
2 | %RENAMELAYER Rename a layer
3 | % RENAMELAYER(OLDNAME, NEWNAME) changes the name of the layer
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing layer.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the layer to rename
11 | v = obj.getLayerIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such layer, nothing to do
14 | if ~opts.quiet
15 | warning('There is no layer ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing layer
21 | newNameExists = any(strcmp(newName, {obj.layers.name})) ;
22 | if newNameExists
23 | error('There is already a layer ''%s''.', newName) ;
24 | end
25 |
26 | % Replace oldName with newName in all the layers
27 | obj.layers(v).name = newName ;
28 | obj.rebuild() ;
29 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/renameParam.m:
--------------------------------------------------------------------------------
1 | function renameParam(obj, oldName, newName, varargin)
2 | %RENAMELAYER Rename a parameter
3 | % RENAMEPARAM(OLDNAME, NEWNAME) changes the name of the parameter
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing parameter.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the param to rename
11 | v = obj.getParamIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such param, nothing to do
14 | if ~opts.quiet
15 | warning('There is no parameter ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing param
21 | newNameExists = any(strcmp(newName, {obj.params.name})) ;
22 | if newNameExists
23 | error('There is already a layer ''%s''.', newName) ;
24 | end
25 |
26 | % Replace oldName with newName in all the layers
27 | for l = 1:numel(obj.layers)
28 | sel = find(strcmp(oldName, obj.layers(l).params));
29 | [obj.layers(l).params{sel}] = deal(newName) ;
30 | end
31 |
32 | if ~newNameExists
33 | obj.params(v).name = newName ;
34 | obj.paramNames.(newName) = v ;
35 | end
36 |
37 | obj.rebuild() ;
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/renameVar.m:
--------------------------------------------------------------------------------
1 | function renameVar(obj, oldName, newName, varargin)
2 | %RENAMEVAR Rename a variable
3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing variable.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the variable to rename
11 | v = obj.getVarIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such a variable, nothing to do
14 | if ~opts.quiet
15 | warning('There is no variable ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing variable
21 | newNameExists = any(strcmp(newName, {obj.vars.name})) ;
22 |
23 | % Replace oldName with newName in all the layers
24 | for l = 1:numel(obj.layers)
25 | for f = {'inputs', 'outputs'}
26 | f = char(f) ;
27 | sel = find(strcmp(oldName, obj.layers(l).(f))) ;
28 | [obj.layers(l).(f){sel}] = deal(newName) ;
29 | end
30 | end
31 |
32 | % If newVariable is a variable in the graph, then there is not
33 | % anything else to do. obj.rebuild() will remove the slot
34 | % in obj.vars() for oldName as that variable becomes unused.
35 | %
36 | % If, however, newVariable is not in the graph already, then
37 | % the slot in obj.vars() is preserved and only the variable name
38 | % is changed.
39 |
40 | if ~newNameExists
41 | obj.vars(v).name = newName ;
42 | % update variable name hash otherwise rebuild() won't find this var
43 | % corectly
44 | obj.varNames.(newName) = v ;
45 | end
46 |
47 | obj.rebuild() ;
48 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/reset.m:
--------------------------------------------------------------------------------
1 | function reset(obj)
2 | %RESET Reset the DagNN
3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN
4 | % object, including parameter gradients. It also calls the reset
5 | % function of every layer.
6 |
7 | obj.clearParameterServer() ;
8 | [obj.vars.value] = deal([]) ;
9 | [obj.vars.der] = deal([]) ;
10 | [obj.params.der] = deal([]) ;
11 | for l = 1:numel(obj.layers)
12 | obj.layers(l).block.reset() ;
13 | end
14 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/saveobj.m:
--------------------------------------------------------------------------------
1 | function s = saveobj(obj)
2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure
3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB
4 | % structure S. This is particularly convenient to preserve future
5 | % compatibility and to ship networks that are pure structures,
6 | % instead of embedding dependencies to code.
7 | %
8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`.
9 | %
10 | % As a side-effect the network is being reset (all variables are cleared)
11 | % and is transfered to CPU.
12 | %
13 | % See Also: dagnn.DagNN.loadobj, dagnn.DagNN.reset
14 |
15 | % Copyright (C) 2015-2016 Karel Lenc and Andrea Vedaldi.
16 | % All rights reserved.
17 | %
18 | % This file is part of the VLFeat library and is made available under
19 | % the terms of the BSD license (see the COPYING file).
20 |
21 | device = obj.device ;
22 | obj.move('cpu') ;
23 | s.vars = struct(...
24 | 'name', {obj.vars.name}, ...
25 | 'precious', {obj.vars.precious}) ;
26 | s.params = struct(...
27 | 'name', {obj.params.name}, ...
28 | 'value', {obj.params.value}, ...
29 | 'learningRate', {obj.params.learningRate}, ...
30 | 'weightDecay', {obj.params.weightDecay}) ;
31 | s.layers = struct(...
32 | 'name', {obj.layers.name}, ...
33 | 'type', {[]}, ...
34 | 'inputs', {obj.layers.inputs}, ...
35 | 'outputs', {obj.layers.outputs}, ...
36 | 'params', {obj.layers.params}, ...
37 | 'block', {[]}) ;
38 | s.meta = obj.meta ;
39 |
40 | for l = 1:numel(obj.layers)
41 | block = obj.layers(l).block ;
42 | slayer = block.save() ;
43 | s.layers(l).type = class(block) ;
44 | s.layers(l).block = slayer ;
45 | end
46 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/setLayerInputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerInputs(obj, layer, inputs)
2 | %SETLAYERINPUTS Set or change the inputs to a layer
3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for input = inputs
8 | v(end+1) = obj.addVar(char(input)) ;
9 | end
10 | obj.layers(l).inputs = inputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/setLayerOutputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerOutputs(obj, layer, outputs)
2 | %SETLAYEROUTPUTS Set or change the outputs of a layer
3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for output = outputs
8 | v(end+1) = obj.addVar(char(output)) ;
9 | end
10 | obj.layers(l).outputs = outputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matlab/+dagnn/@DagNN/setLayerParams.m:
--------------------------------------------------------------------------------
1 | function v = setLayerParams(obj, layer, params)
2 | %SETLAYEPARAMS Set or change the parameters of a layer
3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for param = params
8 | v(end+1) = obj.addParam(char(param)) ;
9 | end
10 | obj.layers(l).params = params ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Balance_loss.m:
--------------------------------------------------------------------------------
1 | classdef Balance_loss < dagnn.ElementWise
2 | %Extra loss to ensure balance selection in one mini-batch
3 | %created by Zilin Gao
4 | properties
5 | loss_weight = 100;
6 | CM_num = 16 ;
7 | end
8 |
9 | methods
10 |
11 | function forwardAdvanced(obj, layer)
12 | net = obj.net ;
13 | in = layer.inputIndexes ;
14 | out = layer.outputIndexes ;
15 | input = net.vars(in).value;
16 | if iscell(input)
17 | input = input{1};
18 | end
19 |
20 | w_sum = sum(input,2);
21 | net.vars(out).value{3} = w_sum;
22 | RSD = std(w_sum) / mean(w_sum) ;%relative standard deviation
23 | net.vars(out).value{1} = obj.loss_weight * RSD^2 ;
24 | net.vars(out).value{2} = RSD ;
25 |
26 | assert(~any(isnan( net.vars(out).value{1}(:))))
27 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) - 1;
28 | % clear inputs if not needed anymore
29 | if net.numPendingVarRefs(in) == 0
30 | if ~net.vars(in).precious & ~net.computingDerivative & net.conserveMemory
31 | net.vars(in).value = [] ;
32 | end
33 | end
34 | end
35 |
36 | function backwardAdvanced(obj, layer)
37 | if ~obj.net.conserveMemory
38 | backwardAdvanced@dagnn.Layer(obj, layer) ;
39 | return ;
40 | end
41 | net = obj.net ;
42 | in = layer.inputIndexes ;
43 | out = layer.outputIndexes ;
44 |
45 | w_sum = net.vars(out).value{3};
46 | m = mean(w_sum);
47 | N = obj.CM_num;
48 | w = obj.loss_weight;
49 | RSD = net.vars(out).value{2};
50 | in_data = net.vars(in).value;
51 | if iscell(in_data)
52 | in_data = in_data{1};
53 | end
54 |
55 | der_sfm = w_sum * 2 * w / ((N-1) * m^2) - ...
56 | 2* w * ( 1 / (N-1) + RSD ^2 /N) / m;
57 | derInput = der_sfm * ones(1,size(in_data ,2 ));
58 |
59 | if ~net.vars(out).precious & net.conserveMemory
60 | net.vars(out).value = [] ;
61 | end
62 |
63 | if net.numPendingVarRefs(in) == 0
64 | net.vars(in).der = derInput ;
65 | else
66 | net.vars(in).der = net.vars(in).der + derInput ;
67 | end
68 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ;
69 | assert(~any(isnan(net.vars(in).der(:))))
70 | end
71 |
72 | function obj = Balance_loss(varargin)
73 | obj.load(varargin) ;
74 | end
75 | end
76 | end
77 |
--------------------------------------------------------------------------------
/matlab/+dagnn/BatchNorm.m:
--------------------------------------------------------------------------------
1 | classdef BatchNorm < dagnn.ElementWise
2 | properties
3 | numChannels
4 | epsilon = 1e-5
5 | opts = {'NoCuDNN'} % ours seems slightly faster
6 | end
7 |
8 | properties (Transient)
9 | moments
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | if strcmp(obj.net.mode, 'test')
15 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
16 | 'moments', params{3}, ...
17 | 'epsilon', obj.epsilon, ...
18 | obj.opts{:}) ;
19 | else
20 | [outputs{1},obj.moments] = ...
21 | vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
22 | 'epsilon', obj.epsilon, ...
23 | obj.opts{:}) ;
24 | end
25 | end
26 |
27 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
28 | [derInputs{1}, derParams{1}, derParams{2}, derParams{3}] = ...
29 | vl_nnbnorm(inputs{1}, params{1}, params{2}, derOutputs{1}, ...
30 | 'epsilon', obj.epsilon, ...
31 | 'moments', obj.moments, ...
32 | obj.opts{:}) ;
33 | obj.moments = [] ;
34 | % multiply the moments update by the number of images in the batch
35 | % this is required to make the update additive for subbatches
36 | % and will eventually be normalized away
37 | derParams{3} = derParams{3} * size(inputs{1},4) ;
38 | end
39 |
40 | % ---------------------------------------------------------------------
41 | function obj = BatchNorm(varargin)
42 | obj.load(varargin{:}) ;
43 | end
44 |
45 | function params = initParams(obj)
46 | params{1} = ones(obj.numChannels,1,'single') ;
47 | params{2} = zeros(obj.numChannels,1,'single') ;
48 | params{3} = zeros(obj.numChannels,2,'single') ;
49 | end
50 |
51 | function attach(obj, net, index)
52 | attach@dagnn.ElementWise(obj, net, index) ;
53 | p = net.getParamIndex(net.layers(index).params{3}) ;
54 | net.params(p).trainMethod = 'average' ;
55 | net.params(p).learningRate = 0.1 ;
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/matlab/+dagnn/BilinearSampler.m:
--------------------------------------------------------------------------------
1 | % Wrapper for BilinearSampler block:
2 | % (c) 2016 Ankush Gupta
3 |
4 | classdef BilinearSampler < dagnn.Layer
5 | methods
6 | function outputs = forward(obj, inputs, params)
7 | outputs = vl_nnbilinearsampler(inputs{1}, inputs{2});
8 | outputs = {outputs};
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | [dX,dG] = vl_nnbilinearsampler(inputs{1}, inputs{2}, derOutputs{1});
13 | derInputs = {dX,dG};
14 | derParams = {};
15 | end
16 |
17 | function outputSizes = getOutputSizes(obj, inputSizes)
18 | xSize = inputSizes{1};
19 | gSize = inputSizes{2};
20 | outputSizes = {[gSize(2), gSize(3), xSize(3), xSize(4)]};
21 | end
22 |
23 | function obj = BilinearSampler(varargin)
24 | obj.load(varargin);
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matlab/+dagnn/CM_out.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZilinGao/GM-SOP/5922792067a4d6e5ca057b5429a221f2ea101a19/matlab/+dagnn/CM_out.m
--------------------------------------------------------------------------------
/matlab/+dagnn/Concat.m:
--------------------------------------------------------------------------------
1 | classdef Concat < dagnn.ElementWise
2 | properties
3 | dim = 3
4 | end
5 |
6 | properties (Transient)
7 | inputSizes = {}
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | outputs{1} = vl_nnconcat(inputs, obj.dim) ;
13 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
14 | end
15 |
16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
17 | derInputs = vl_nnconcat(inputs, obj.dim, derOutputs{1}, 'inputSizes', obj.inputSizes) ;
18 | derParams = {} ;
19 | end
20 |
21 | function reset(obj)
22 | obj.inputSizes = {} ;
23 | end
24 |
25 | function outputSizes = getOutputSizes(obj, inputSizes)
26 | sz = inputSizes{1} ;
27 | for k = 2:numel(inputSizes)
28 | sz(obj.dim) = sz(obj.dim) + inputSizes{k}(obj.dim) ;
29 | end
30 | outputSizes{1} = sz ;
31 | end
32 |
33 | function rfs = getReceptiveFields(obj)
34 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
35 | if obj.dim == 3 || obj.dim == 4
36 | rfs = getReceptiveFields@dagnn.ElementWise(obj) ;
37 | rfs = repmat(rfs, numInputs, 1) ;
38 | else
39 | for i = 1:numInputs
40 | rfs(i,1).size = [NaN NaN] ;
41 | rfs(i,1).stride = [NaN NaN] ;
42 | rfs(i,1).offset = [NaN NaN] ;
43 | end
44 | end
45 | end
46 |
47 | function load(obj, varargin)
48 | s = dagnn.Layer.argsToStruct(varargin{:}) ;
49 | % backward file compatibility
50 | if isfield(s, 'numInputs'), s = rmfield(s, 'numInputs') ; end
51 | load@dagnn.Layer(obj, s) ;
52 | end
53 |
54 | function obj = Concat(varargin)
55 | obj.load(varargin{:}) ;
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Conv.m:
--------------------------------------------------------------------------------
1 | classdef Conv < dagnn.Filter
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | if ~obj.hasBias, params{2} = [] ; end
11 | outputs{1} = vl_nnconv(...
12 | inputs{1}, params{1}, params{2}, ...
13 | 'pad', obj.pad, ...
14 | 'stride', obj.stride, ...
15 | 'dilate', obj.dilate, ...
16 | obj.opts{:}) ;
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | if ~obj.hasBias, params{2} = [] ; end
21 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconv(...
22 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
23 | 'pad', obj.pad, ...
24 | 'stride', obj.stride, ...
25 | 'dilate', obj.dilate, ...
26 | obj.opts{:}) ;
27 | end
28 |
29 | function kernelSize = getKernelSize(obj)
30 | kernelSize = obj.size(1:2) ;
31 | end
32 |
33 | function outputSizes = getOutputSizes(obj, inputSizes)
34 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
35 | outputSizes{1}(3) = obj.size(4) ;
36 | end
37 |
38 | function params = initParams(obj)
39 | % Xavier improved
40 | sc = sqrt(2 / prod(obj.size(1:3))) ;
41 | %sc = sqrt(2 / prod(obj.size([1 2 4]))) ;
42 | params{1} = randn(obj.size,'single') * sc ;
43 | if obj.hasBias
44 | params{2} = zeros(obj.size(4),1,'single') ;
45 | end
46 | end
47 |
48 | function set.size(obj, ksize)
49 | % make sure that ksize has 4 dimensions
50 | ksize = [ksize(:)' 1 1 1 1] ;
51 | obj.size = ksize(1:4) ;
52 | end
53 |
54 | function obj = Conv(varargin)
55 | obj.load(varargin) ;
56 | % normalize field by implicitly calling setters defined in
57 | % dagnn.Filter and here
58 | obj.size = obj.size ;
59 | obj.stride = obj.stride ;
60 | obj.pad = obj.pad ;
61 | end
62 | end
63 | end
64 |
--------------------------------------------------------------------------------
/matlab/+dagnn/ConvTranspose.m:
--------------------------------------------------------------------------------
1 | classdef ConvTranspose < dagnn.Layer
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | upsample = [1 1]
6 | crop = [0 0 0 0]
7 | numGroups = 1
8 | opts = {'cuDNN'}
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if ~obj.hasBias, params{2} = [] ; end
14 | outputs{1} = vl_nnconvt(...
15 | inputs{1}, params{1}, params{2}, ...
16 | 'upsample', obj.upsample, ...
17 | 'crop', obj.crop, ...
18 | 'numGroups', obj.numGroups, ...
19 | obj.opts{:}) ;
20 | end
21 |
22 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
23 | if ~obj.hasBias, params{2} = [] ; end
24 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconvt(...
25 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
26 | 'upsample', obj.upsample, ...
27 | 'crop', obj.crop, ...
28 | 'numGroups', obj.numGroups, ...
29 | obj.opts{:}) ;
30 | end
31 |
32 | function outputSizes = getOutputSizes(obj, inputSizes)
33 | outputSizes{1} = [...
34 | obj.upsample(1) * (inputSizes{1}(1) - 1) + obj.size(1) - obj.crop(1) - obj.crop(2), ...
35 | obj.upsample(2) * (inputSizes{1}(2) - 1) + obj.size(2) - obj.crop(3) - obj.crop(4), ...
36 | obj.size(3), ...
37 | inputSizes{1}(4)] ;
38 | end
39 |
40 | function rfs = getReceptiveFields(obj)
41 | rfs.size = (obj.size(1:2) - 1) ./ obj.upsample + 1 ;
42 | rfs.stride = 1 ./ [obj.upsample] ;
43 | rfs.offset = (2*obj.crop([1 3]) - obj.size(1:2) + 1) ...
44 | ./ (2*obj.upsample) + 1 ;
45 | end
46 |
47 | function params = initParams(obj)
48 | % todo: test this initialization method
49 | sc = sqrt(2 / prod(obj.size([1 2 4]))) ;
50 | params{1} = randn(obj.size,'single') * sc ;
51 | if obj.hasBias
52 | params{2} = zeros(obj.size(3),1,'single') * sc ;
53 | end
54 | end
55 |
56 | function set.size(obj, ksize)
57 | % make sure that ksize has 4 dimensions
58 | ksize = [ksize(:)' 1 1 1 1] ;
59 | obj.size = ksize(1:4) ;
60 | end
61 |
62 | function set.crop(obj, crop)
63 | if numel(crop) == 1
64 | obj.crop = [crop crop crop crop] ;
65 | elseif numel(crop) == 2
66 | obj.crop = crop([1 1 2 2]) ;
67 | else
68 | obj.crop = crop ;
69 | end
70 | end
71 |
72 | function set.upsample(obj, upsample)
73 | if numel(upsample) == 1
74 | obj.upsample = [upsample upsample] ;
75 | else
76 | obj.upsample = upsample ;
77 | end
78 | end
79 |
80 | function obj = ConvTranspose(varargin)
81 | obj.load(varargin) ;
82 | % normalize field by implicitly calling setters defined in
83 | % dagnn.Filter and here
84 | obj.size = obj.size ;
85 | obj.upsample = obj.upsample ;
86 | obj.crop = obj.crop ;
87 | end
88 | end
89 | end
90 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Crop.m:
--------------------------------------------------------------------------------
1 | classdef Crop < dagnn.ElementWise
2 | %CROP DagNN cropping layer.
3 | % This is a pecurial layer from FCN. It crops inputs{1} to
4 | % match the size of inputs{2} (starting with a base crop amount).
5 | % A future version
6 |
7 | properties
8 | crop = [0 0]
9 | end
10 |
11 | properties (Transient)
12 | inputSizes = {}
13 | end
14 |
15 | methods
16 | function crop = getAdaptedCrops(obj)
17 | cropv = obj.inputSizes{1}(1) - obj.inputSizes{2}(1) ;
18 | cropu = obj.inputSizes{1}(2) - obj.inputSizes{2}(2) ;
19 | cropv1 = max(0, cropv - obj.crop(1)) ;
20 | cropu1 = max(0, cropu - obj.crop(2)) ;
21 | crop = [cropv - cropv1, cropv1, cropu - cropu1, cropu1] ;
22 | end
23 |
24 | function outputs = forward(obj, inputs, params)
25 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
26 | adjCrop = obj.getAdaptedCrops() ;
27 | outputs{1} = vl_nncrop(inputs{1}, adjCrop) ;
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | adjCrop = obj.getAdaptedCrops() ;
32 | derInputs{1} = vl_nncrop(inputs{1}, adjCrop, derOutputs{1}, obj.inputSizes{1}) ;
33 | derInputs{2} = [] ;
34 | derParams = {} ;
35 | end
36 |
37 | function reset(obj)
38 | obj.inputSizes = {} ;
39 | end
40 |
41 | function outputSizes = getOutputSizes(obj, inputSizes)
42 | obj.inputSizes = inputSizes ;
43 | crop = obj.getAdaptedCrops() ;
44 | outputSizes{1} = inputSizes{1} - [crop(1)+crop(2), crop(3)+crop(4), 0, 0] ;
45 | end
46 |
47 | function rfs = getReceptiveFields(obj)
48 | rfs(1,1).size = [1 1] ;
49 | rfs(1,1).stride = [1 1] ;
50 | rfs(1,1).offset = 1 + obj.crop ;
51 | rfs(2,1).size = [] ;
52 | rfs(2,1).stride = [] ;
53 | rfs(2,1).offset = [] ;
54 | end
55 |
56 | function obj = Crop(varargin)
57 | obj.load(varargin) ;
58 | end
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/matlab/+dagnn/DropOut.m:
--------------------------------------------------------------------------------
1 | classdef DropOut < dagnn.ElementWise
2 | properties
3 | rate = 0.5
4 | frozen = false
5 | end
6 |
7 | properties (Transient)
8 | mask
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if strcmp(obj.net.mode, 'test')
14 | outputs = inputs ;
15 | return ;
16 | end
17 | if obj.frozen & ~isempty(obj.mask)
18 | outputs{1} = vl_nndropout(inputs{1}, 'mask', obj.mask) ;
19 | else
20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ;
21 | end
22 | end
23 |
24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
25 | if strcmp(obj.net.mode, 'test')
26 | derInputs = derOutputs ;
27 | derParams = {} ;
28 | return ;
29 | end
30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ;
31 | derParams = {} ;
32 | end
33 |
34 | % ---------------------------------------------------------------------
35 | function obj = DropOut(varargin)
36 | obj.load(varargin{:}) ;
37 | end
38 |
39 | function obj = reset(obj)
40 | reset@dagnn.ElementWise(obj) ;
41 | obj.mask = [] ;
42 | obj.frozen = false ;
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/matlab/+dagnn/ElementWise.m:
--------------------------------------------------------------------------------
1 | classdef ElementWise < dagnn.Layer
2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations
3 | methods
4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes)
5 | outputSizes = inputSizes ;
6 | transforms = {eye(6)} ;
7 | end
8 |
9 | function rfs = getReceptiveFields(obj)
10 | rfs.size = [1 1] ;
11 | rfs.stride = [1 1] ;
12 | rfs.offset = [1 1] ;
13 | end
14 |
15 | function outputSizes = getOutputSizes(obj, inputSizes)
16 | outputSizes = inputSizes ;
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Filter.m:
--------------------------------------------------------------------------------
1 | classdef Filter < dagnn.Layer
2 | properties
3 | pad = [0 0 0 0]
4 | stride = [1 1]
5 | dilate = [1 1]
6 | end
7 | methods
8 | function set.pad(obj, pad)
9 | if numel(pad) == 1
10 | obj.pad = [pad pad pad pad] ;
11 | elseif numel(pad) == 2
12 | obj.pad = pad([1 1 2 2]) ;
13 | else
14 | obj.pad = pad ;
15 | end
16 | end
17 |
18 | function set.stride(obj, stride)
19 | if numel(stride) == 1
20 | obj.stride = [stride stride] ;
21 | else
22 | obj.stride = stride ;
23 | end
24 | end
25 |
26 | function set.dilate(obj, dilate)
27 | if numel(dilate) == 1
28 | obj.dilate = [dilate dilate] ;
29 | else
30 | obj.dilate = dilate ;
31 | end
32 | end
33 |
34 | function kernelSize = getKernelSize(obj)
35 | kernelSize = [1 1] ;
36 | end
37 |
38 | function outputSizes = getOutputSizes(obj, inputSizes)
39 | ks = obj.getKernelSize() ;
40 | ke = (ks - 1) .* obj.dilate + 1 ;
41 | outputSizes{1} = [...
42 | fix((inputSizes{1}(1) + obj.pad(1) + obj.pad(2) - ke(1)) / obj.stride(1)) + 1, ...
43 | fix((inputSizes{1}(2) + obj.pad(3) + obj.pad(4) - ke(2)) / obj.stride(2)) + 1, ...
44 | 1, ...
45 | inputSizes{1}(4)] ;
46 | end
47 |
48 | function rfs = getReceptiveFields(obj)
49 | ks = obj.getKernelSize() ;
50 | ke = (ks - 1) .* obj.dilate + 1 ;
51 | y1 = 1 - obj.pad(1) ;
52 | y2 = 1 - obj.pad(1) + ke(1) - 1 ;
53 | x1 = 1 - obj.pad(3) ;
54 | x2 = 1 - obj.pad(3) + ke(2) - 1 ;
55 | h = y2 - y1 + 1 ;
56 | w = x2 - x1 + 1 ;
57 | rfs.size = [h, w] ;
58 | rfs.stride = obj.stride ;
59 | rfs.offset = [y1+y2, x1+x2]/2 ;
60 | end
61 | end
62 | end
63 |
--------------------------------------------------------------------------------
/matlab/+dagnn/LRN.m:
--------------------------------------------------------------------------------
1 | classdef LRN < dagnn.ElementWise
2 | properties
3 | param = [5 1 0.0001/5 0.75]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = LRN(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Loss.m:
--------------------------------------------------------------------------------
1 | classdef Loss < dagnn.ElementWise
2 | properties
3 | loss = 'softmaxlog'
4 | ignoreAverage = false
5 | opts = {}
6 | end
7 |
8 | properties (Transient)
9 | average = 0
10 | numAveraged = 0
11 | end
12 |
13 | methods
14 | function outputs = forward(obj, inputs, params)
15 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss, obj.opts{:}) ;
16 | obj.accumulateAverage(inputs, outputs);
17 | end
18 |
19 | function accumulateAverage(obj, inputs, outputs)
20 | if obj.ignoreAverage, return; end;
21 | n = obj.numAveraged ;
22 | m = n + size(inputs{1}, 1) * size(inputs{1}, 2) * size(inputs{1}, 4);
23 | obj.average = bsxfun(@plus, n * obj.average, gather(outputs{1})) / m ;
24 | obj.numAveraged = m ;
25 | end
26 |
27 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
28 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss, obj.opts{:}) ;
29 | derInputs{2} = [] ;
30 | derParams = {} ;
31 | end
32 |
33 | function reset(obj)
34 | obj.average = 0 ;
35 | obj.numAveraged = 0 ;
36 | end
37 |
38 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes)
39 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ;
40 | end
41 |
42 | function rfs = getReceptiveFields(obj)
43 | % the receptive field depends on the dimension of the variables
44 | % which is not known until the network is run
45 | rfs(1,1).size = [NaN NaN] ;
46 | rfs(1,1).stride = [NaN NaN] ;
47 | rfs(1,1).offset = [NaN NaN] ;
48 | rfs(2,1) = rfs(1,1) ;
49 | end
50 |
51 | function obj = Loss(varargin)
52 | obj.load(varargin) ;
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/matlab/+dagnn/NormOffset.m:
--------------------------------------------------------------------------------
1 | classdef NormOffset < dagnn.ElementWise
2 | properties
3 | param = [1 0.5]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = NormOffset(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matlab/+dagnn/OBJ_ConvNet_COV_Pool.m:
--------------------------------------------------------------------------------
1 | classdef OBJ_ConvNet_COV_Pool < dagnn.Layer
2 | properties
3 | opts = {'cuDNN'}
4 | cudnn = {'CuDNN'}
5 | end
6 |
7 |
8 |
9 | methods
10 | function outputs = forward(self, inputs, params)
11 | %[outputs{1}] = ConvNet_Cov_Pool(inputs{1}); % lph 2017/10/14 11:37
12 |
13 | % %res(i+1).x = vl_nncov_pool(res(i).x, cudnn{:});
14 | [outputs{1}] = vl_nncov_pool(inputs{1}, self.cudnn{:});
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs, outputs)
18 | % [~, derInputs{1}]= ConvNet_Cov_Pool(inputs{1}, derOutputs{1}); % lph 2017/10/14 11:37
19 |
20 | % % res(i).dzdx = vl_nncov_pool(res(i).x, res(i+1).dzdx, cudnn{:});
21 | [derInputs{1}] = vl_nncov_pool(inputs{1}, derOutputs{1}, self.cudnn{:});
22 | derParams = {} ;
23 | end
24 |
25 | function obj = OBJ_ConvNet_COV_Pool(varargin)
26 | obj.load(varargin) ;
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/matlab/+dagnn/OBJ_ConvNet_COV_ScaleTr.m:
--------------------------------------------------------------------------------
1 | classdef OBJ_ConvNet_COV_ScaleTr < dagnn.Layer
2 | properties
3 | opts = {'cuDNN'}
4 | cudnn = {'CuDNN'}
5 | end
6 |
7 |
8 |
9 | methods
10 |
11 | % [x_next, dLdX, dLdX_tr] = ConvNet_Cov_ScaleTr(x, x_prior_tr, dzdy)
12 |
13 | function outputs = forward(self, inputs, params)
14 | % [outputs{1}] = ConvNet_Cov_ScaleTr(inputs{1}, inputs{2});
15 | [outputs{1}] = vl_nncov_traceNorm(inputs{1},inputs{2},self.cudnn{:}); % xjt 2017/10/18 12:09
16 | end
17 |
18 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs, outputs)
19 | % [~, derInputs{1}, derInputs{2}]= ConvNet_Cov_ScaleTr(inputs{1}, inputs{2}, derOutputs{1}); % matlab code
20 | [derInputs{1}, derInputs{2}] = vl_nncov_traceNorm(inputs{1},...
21 | inputs{2},...
22 | derOutputs{1},self.cudnn{:}); % xjt 2017/10/18 12:08
23 | derParams = {} ;
24 | end
25 |
26 | function obj = OBJ_ConvNet_COV_ScaleTr(varargin)
27 | obj.load(varargin) ;
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/matlab/+dagnn/PDist.m:
--------------------------------------------------------------------------------
1 | %PDIST vl_nnpdist dagnn wrapper
2 | % Accepts 2 or 3 inputs, where third input is used as variable
3 | % 'instanceWeights' parameter. Derivatives for the 3rd input are not
4 | % computed.
5 | % By default aggregates the element-wise loss.
6 | classdef PDist < dagnn.Loss
7 | properties
8 | p = 2;
9 | aggregate = true;
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | switch numel(inputs)
15 | case 2
16 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
17 | 'aggregate', obj.aggregate, obj.opts{:}) ;
18 | case 3
19 | outputs{1} = vl_nnpdist(inputs{1}, inputs{2}, obj.p, [], ...
20 | 'aggregate', obj.aggregate, 'instanceWeights', inputs{3}, ...
21 | obj.opts{:}) ;
22 | otherwise
23 | error('Invalid number of inputs');
24 | end
25 | obj.accumulateAverage(inputs, outputs);
26 | end
27 |
28 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
29 | derInputs = cell(1, numel(inputs));
30 | switch numel(inputs)
31 | case 2
32 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
33 | obj.p, derOutputs{1}, 'aggregate', obj.aggregate, obj.opts{:}) ;
34 | case 3
35 | [derInputs{1}, derInputs{2}] = vl_nnpdist(inputs{1}, inputs{2}, ...
36 | obj.p, derOutputs{1}, 'aggregate', obj.aggregate, ...
37 | 'instanceWeights', inputs{3}, obj.opts{:}) ;
38 | otherwise
39 | error('Invalid number of inputs');
40 | end
41 | derParams = {} ;
42 | end
43 |
44 | function obj = PDist(varargin)
45 | obj.load(varargin) ;
46 | obj.loss = 'pdist';
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Pooling.m:
--------------------------------------------------------------------------------
1 | classdef Pooling < dagnn.Filter
2 | properties
3 | method = 'max'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = Pooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matlab/+dagnn/ROIPooling.m:
--------------------------------------------------------------------------------
1 | classdef ROIPooling < dagnn.Layer
2 | % DAGNN.ROIPOOLING Region of interest pooling layer
3 |
4 | % Copyright (C) 2016 Hakan Bilen.
5 | % All rights reserved.
6 | %
7 | % This file is part of the VLFeat library and is made available under
8 | % the terms of the BSD license (see the COPYING file).
9 |
10 | properties
11 | method = 'max'
12 | subdivisions = [6 6]
13 | transform = 1
14 | flatten = false
15 | end
16 |
17 | methods
18 | function outputs = forward(obj, inputs, params)
19 | numROIs = numel(inputs{2}) / 5 ;
20 | outputs{1} = vl_nnroipool(...
21 | inputs{1}, inputs{2}, ...
22 | 'subdivisions', obj.subdivisions, ...
23 | 'transform', obj.transform, ...
24 | 'method', obj.method) ;
25 | if obj.flatten
26 | outputs{1} = reshape(outputs{1},1,1,[],numROIs) ;
27 | end
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | numROIs = numel(inputs{2}) / 5 ;
32 | if obj.flatten
33 | % unflatten
34 | derOutputs{1} = reshape(...
35 | derOutputs{1},obj.subdivisions(1),obj.subdivisions(2),[],numROIs) ;
36 | end
37 | derInputs{1} = vl_nnroipool(...
38 | inputs{1}, inputs{2}, derOutputs{1}, ...
39 | 'subdivisions', obj.subdivisions, ...
40 | 'transform', obj.transform, ...
41 | 'method', obj.method) ;
42 | derInputs{2} = [];
43 | derParams = {} ;
44 | end
45 |
46 | function outputSizes = getOutputSizes(obj, inputSizes)
47 | if isempty(inputSizes{1})
48 | n = 0 ;
49 | else
50 | n = prod(inputSizes{2})/5 ;
51 | end
52 | outputSizes{1} = [obj.subdivisions, inputSizes{1}(3), n] ;
53 | end
54 |
55 | function obj = ROIPooling(varargin)
56 | obj.load(varargin) ;
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/matlab/+dagnn/ReLU.m:
--------------------------------------------------------------------------------
1 | classdef ReLU < dagnn.ElementWise
2 | properties
3 | useShortCircuit = true
4 | leak = 0
5 | opts = {}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | outputs{1} = vl_nnrelu(inputs{1}, [], ...
11 | 'leak', obj.leak, obj.opts{:}) ;
12 | end
13 |
14 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
15 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}, ...
16 | 'leak', obj.leak, ...
17 | obj.opts{:}) ;
18 | derParams = {} ;
19 | end
20 |
21 | function forwardAdvanced(obj, layer)
22 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
23 | forwardAdvanced@dagnn.Layer(obj, layer) ;
24 | return ;
25 | end
26 | net = obj.net ;
27 | in = layer.inputIndexes ;
28 | out = layer.outputIndexes ;
29 | net.vars(out).value = vl_nnrelu(net.vars(in).value, [], ...
30 | 'leak', obj.leak, ...
31 | obj.opts{:}) ;
32 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) - 1;
33 | if ~net.vars(in).precious & net.numPendingVarRefs(in) == 0
34 | net.vars(in).value = [] ;
35 | end
36 | end
37 |
38 | function backwardAdvanced(obj, layer)
39 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
40 | backwardAdvanced@dagnn.Layer(obj, layer) ;
41 | return ;
42 | end
43 | net = obj.net ;
44 | in = layer.inputIndexes ;
45 | out = layer.outputIndexes ;
46 |
47 | if isempty(net.vars(out).der), return ; end
48 |
49 | derInput = vl_nnrelu(net.vars(out).value, net.vars(out).der, ...
50 | 'leak', obj.leak, obj.opts{:}) ;
51 |
52 | if ~net.vars(out).precious
53 | net.vars(out).der = [] ;
54 | net.vars(out).value = [] ;
55 | end
56 |
57 | if net.numPendingVarRefs(in) == 0
58 | net.vars(in).der = derInput ;
59 | else
60 | net.vars(in).der = net.vars(in).der + derInput ;
61 | end
62 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ;
63 | end
64 |
65 | function obj = ReLU(varargin)
66 | obj.load(varargin) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < dagnn.ElementWise
2 | properties
3 | size
4 | hasBias = true
5 | end
6 |
7 | methods
8 |
9 | function outputs = forward(obj, inputs, params)
10 | args = horzcat(inputs, params) ;
11 | outputs{1} = bsxfun(@times, args{1}, args{2}) ;
12 | if obj.hasBias
13 | outputs{1} = bsxfun(@plus, outputs{1}, args{3}) ;
14 | end
15 | end
16 |
17 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
18 | args = horzcat(inputs, params) ;
19 | sz = [size(args{2}) 1 1 1 1] ;
20 | sz = sz(1:4) ;
21 | dargs{1} = bsxfun(@times, derOutputs{1}, args{2}) ;
22 | dargs{2} = derOutputs{1} .* args{1} ;
23 | for k = find(sz == 1)
24 | dargs{2} = sum(dargs{2}, k) ;
25 | end
26 | if obj.hasBias
27 | dargs{3} = derOutputs{1} ;
28 | for k = find(sz == 1)
29 | dargs{3} = sum(dargs{3}, k) ;
30 | end
31 | end
32 | derInputs = dargs(1:numel(inputs)) ;
33 | derParams = dargs(numel(inputs)+(1:numel(params))) ;
34 | end
35 |
36 | function obj = Scale(varargin)
37 | obj.load(varargin) ;
38 | end
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Sigmoid.m:
--------------------------------------------------------------------------------
1 | classdef Sigmoid < dagnn.ElementWise
2 | methods
3 | function outputs = forward(obj, inputs, params)
4 | outputs{1} = vl_nnsigmoid(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/matlab/+dagnn/SoftMax.m:
--------------------------------------------------------------------------------
1 | classdef SoftMax < dagnn.ElementWise
2 | methods
3 | function outputs = forward(self, inputs, params)
4 | outputs{1} = vl_nnsoftmax(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 |
12 | function obj = SoftMax(varargin)
13 | obj.load(varargin) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matlab/+dagnn/SpatialNorm.m:
--------------------------------------------------------------------------------
1 | classdef SpatialNorm < dagnn.ElementWise
2 | properties
3 | param = [2 2 10 2]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = SpatialNorm(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matlab/+dagnn/Sum.m:
--------------------------------------------------------------------------------
1 | classdef Sum < dagnn.ElementWise
2 | %SUM DagNN sum layer
3 | % The SUM layer takes the sum of all its inputs and store the result
4 | % as its only output.
5 |
6 | properties (Transient)
7 | numInputs
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | obj.numInputs = numel(inputs) ;
13 | outputs{1} = inputs{1} ;
14 | for k = 2:obj.numInputs
15 | outputs{1} = outputs{1} + inputs{k} ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | for k = 1:obj.numInputs
21 | derInputs{k} = derOutputs{1} ;
22 | end
23 | derParams = {} ;
24 | end
25 |
26 | function outputSizes = getOutputSizes(obj, inputSizes)
27 | outputSizes{1} = inputSizes{1} ;
28 | for k = 2:numel(inputSizes)
29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1}))
30 | if ~isequal(inputSizes{k}, outputSizes{1})
31 | warning('Sum layer: the dimensions of the input variables is not the same.') ;
32 | end
33 | end
34 | end
35 | end
36 |
37 | function rfs = getReceptiveFields(obj)
38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
39 | rfs.size = [1 1] ;
40 | rfs.stride = [1 1] ;
41 | rfs.offset = [1 1] ;
42 | rfs = repmat(rfs, numInputs, 1) ;
43 | end
44 |
45 | function obj = Sum(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matlab/EIG.m:
--------------------------------------------------------------------------------
1 | function [V,S,D] = EIG(x)
2 | % EIG
3 | % eigenvalue decomposition of a batch of covariance matrices
4 | % created by Jiangtao Xie for MPN-COV
5 | type = class(x);
6 | L = size(x,3);
7 | n = size(x,1);
8 | V = zeros(n,n,L,type);
9 | S = zeros(n,1,L,type);
10 | D = zeros(L,1,type);
11 | idx = n : -1 : 1;
12 | for i = 1 : L
13 | [v,diag_S] = eig(x(:,:,i),'vector');
14 | diag_S = diag_S(idx);
15 | V(:,:,i) = v(:, idx);
16 | ind = diag_S > ( eps(max(diag_S)));
17 | Dmin = min(find(ind, 1, 'last'), n);%valid feature value of covariance matrix of each pic in batch
18 | D(i) = Dmin;
19 | V(:, Dmin + 1 : end,i) = 0;
20 | diag_S(Dmin + 1 : end) = 0;
21 | S(:,:,i) = diag_S;
22 | end
23 | end
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/matlab/compatibility/parallel/gather.m:
--------------------------------------------------------------------------------
1 | function x=gather(x)
2 | % GATHER Compatibility stub for the GATHER() function
3 | % GATHER() is a function in the Parallel MATLAB toolbox. MATCONVNET
4 | % can work without it.
5 |
--------------------------------------------------------------------------------
/matlab/compatibility/parallel/labindex.m:
--------------------------------------------------------------------------------
1 | function i = labindex()
2 | i = 1 ;
3 |
--------------------------------------------------------------------------------
/matlab/compatibility/parallel/numlabs.m:
--------------------------------------------------------------------------------
1 | function n = numlabs()
2 | n = 1 ;
3 |
--------------------------------------------------------------------------------
/matlab/functionSignatures.json:
--------------------------------------------------------------------------------
1 | {
2 | "vl_contrib":
3 | {
4 | "inputs":
5 | [
6 | {"mutuallyExclusiveGroup":
7 | [
8 | [
9 | {"name":"ItemType", "kind":"required", "type":"choices={'help','list'}"}
10 | ],
11 | [
12 | {"name":"ItemType", "kind":"required", "type":"choices={'install','update','setup','unload','compile','test', 'path'}"},
13 | {"name":"desctype", "kind":"optional", "type":[["char", "choices=vl_contrib('list')"]]}
14 | ]
15 | ]
16 | }
17 | ]
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/matlab/simplenn/vl_simplenn_move.m:
--------------------------------------------------------------------------------
1 | function net = vl_simplenn_move(net, destination)
2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU.
3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the
4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the
5 | % network to the CPU.
6 | %
7 | % See also: VL_SIMPLENN().
8 |
9 | % Copyright (C) 2014-15 Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | switch destination
16 | case 'gpu', moveop = @(x) gpuArray(x) ;
17 | case 'cpu', moveop = @(x) gather(x) ;
18 | otherwise, error('Unknown destination ''%s''.', destination) ;
19 | end
20 | for l=1:numel(net.layers)
21 | switch net.layers{l}.type
22 | case {'conv', 'convt', 'bnorm'}
23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'}
24 | f = char(f) ;
25 | if isfield(net.layers{l}, f)
26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ;
27 | end
28 | end
29 | for f = {'weights', 'momentum'}
30 | f = char(f) ;
31 | if isfield(net.layers{l}, f)
32 | for j=1:numel(net.layers{l}.(f))
33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ;
34 | end
35 | end
36 | end
37 | otherwise
38 | % nothing to do ?
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matlab/simplenn/vl_simplenn_start_parserv.m:
--------------------------------------------------------------------------------
1 | function vl_simplenn_start_parserv(net, ps)
2 | %VL_SIMPLENN_START_PARSERV Setup a parameter server for this network
3 | % VL_SIMPLENN_START_PARSERV(NET, PS) registers the network
4 | % parameter derivatives with the specified ParameterServer instance
5 | % PS and then starts the server.
6 |
7 | for i = 1:numel(net.layers)
8 | for j = 1:numel(net.layers{i}.weights)
9 | value = net.layers{i}.weights{j} ;
10 | name = sprintf('l%d_%d',i,j) ;
11 | if strcmp(class(value),'gpuArray')
12 | deviceType = 'gpu' ;
13 | dataType = classUnderlying(value) ;
14 | else
15 | deviceType = 'cpu' ;
16 | dataType = class(value) ;
17 | end
18 | ps.register(...
19 | name, ...
20 | size(value), ...
21 | dataType, ...
22 | deviceType) ;
23 | end
24 | end
25 | ps.start() ;
26 |
--------------------------------------------------------------------------------
/matlab/src/bits/config/mex_CUDA_glnxa64.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
11 |
26 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/matlab/src/bits/data.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "data.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/datamex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "datamex.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/datamex.hpp:
--------------------------------------------------------------------------------
1 | // @file datamex.hpp
2 | // @brief Basic data structures (MEX support)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__datamex__
14 | #define __vl__datamex__
15 |
16 | #include "mex.h"
17 |
18 | #if ENABLE_GPU
19 | #include "gpu/mxGPUArray.h"
20 | #endif
21 |
22 | #include "data.hpp"
23 |
24 | namespace vl {
25 |
26 | class MexTensor ;
27 |
28 | class MexContext : public Context
29 | {
30 | public:
31 | MexContext() ;
32 | ~MexContext() ;
33 |
34 | protected:
35 | #if ENABLE_GPU
36 | vl::ErrorCode initGpu() ;
37 | vl::ErrorCode validateGpu() ;
38 | mxArray * canary ; // if it breathes, the GPU state is valid
39 | bool gpuIsInitialized ;
40 | #endif
41 |
42 | friend class MexTensor ;
43 | } ;
44 |
45 | class MexTensor : public Tensor
46 | {
47 | public:
48 | MexTensor(MexContext & context) ;
49 | vl::ErrorCode init(mxArray const * array) ;
50 | vl::ErrorCode init(DeviceType deviceType, DataType dataType, TensorShape const & shape) ;
51 | vl::ErrorCode initWithZeros(DeviceType deviceType, DataType dataType, TensorShape const & shape) ;
52 | vl::ErrorCode initWithValue(DeviceType deviceType, DataType dataType, TensorShape const & shape, double value) ;
53 |
54 | void makePersistent() ;
55 | mxArray * relinquish() ;
56 | void clear() ;
57 | ~MexTensor() ;
58 |
59 | size_t getMemorySize() const ;
60 |
61 | protected:
62 | MexContext & context ;
63 | mxArray const * array ;
64 | #ifdef ENABLE_GPU
65 | mxGPUArray const * gpuArray ;
66 | #endif
67 | bool isArrayOwner ;
68 |
69 | private: // prevention
70 | MexTensor(MexTensor const &) ;
71 | MexTensor & operator= (MexTensor & tensor) ;
72 | vl::ErrorCode initHelper(DeviceType deviceType, DataType dataType, TensorShape const & shape, bool fillWithZeros = false) ;
73 | } ;
74 |
75 | void print(char const * str, MexTensor const & tensor) ;
76 |
77 | void mexThrowError(Context const& context, vl::ErrorCode error) ;
78 | }
79 |
80 |
81 | #endif /* defined(__vl__datamex__) */
82 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/bilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file bilinearsampler.hpp
2 | // @brief Bilinear sampler implementation
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_BILINEARSAMPLER_H
15 | #define VL_BILINEARSAMPLER_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | // defines the dispatcher for CUDA kernels:
21 | namespace vl { namespace impl {
22 |
23 | template
24 | struct bilinearsampler {
25 |
26 | static vl::ErrorCode
27 | forward(Context& context,
28 | type* output,
29 | type const* data,
30 | type const* grid,
31 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
32 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
33 |
34 |
35 | static vl::ErrorCode
36 | backward(Context& context,
37 | type* derData,
38 | type* derGrid,
39 | type const* data,
40 | type const* grid,
41 | type const* derOutput,
42 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
43 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
44 | } ;
45 |
46 | } }
47 |
48 | #endif /* defined(VL_BILINEARSAMPLER_H) */
49 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/bnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file bnorm.hpp
2 | // @brief Batch Normalization block implementation
3 | // @author Sebastien Ehrhardt
4 |
5 | /*
6 | Copyright (C) 2015-16 Sebastien Ehrhardt.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bnorm__
14 | #define __vl__bnorm__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct bnorm
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | type* output,
27 | type* moments, // can be null and it will be allocated size_ternally
28 | type const* data,
29 | type const* multipliers,
30 | type const* biases,
31 | size_t height, size_t width, size_t depth, size_t size,
32 | type epsilon) ;
33 |
34 | static vl::ErrorCode
35 | forward_given_moments(Context& context,
36 | type* output,
37 | type const* moments,
38 | type const* data,
39 | type const* multipliers,
40 | type const* biases,
41 | size_t height, size_t width, size_t depth, size_t size) ;
42 |
43 | static vl::ErrorCode
44 | backward(Context& context,
45 | type* derData,
46 | type* derMultipliers,
47 | type* derBiases,
48 | type* moments, // can be null and it will be allocated size_ternally
49 | type const* data,
50 | type const* multipliers,
51 | type const* biases,
52 | type const* derOutput,
53 | size_t height, size_t width, size_t depth, size_t size,
54 | type epsilon) ;
55 |
56 | static vl::ErrorCode
57 | backward_given_moments(Context& context,
58 | type* derData,
59 | type* derMultipliers,
60 | type* derBiases,
61 | type const* moments,
62 | type const* data,
63 | type const* multipliers,
64 | type const* biases,
65 | type const* derOutput,
66 | size_t height, size_t width, size_t depth, size_t size,
67 | type epsilon) ;
68 | } ;
69 |
70 | } }
71 | #endif /* __vl__bnorm__ */
72 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/compat.h:
--------------------------------------------------------------------------------
1 | #ifndef COMPAT_H
2 | #define COMPAT_H
3 |
4 | #ifdef _MSC_VER
5 | #define snprintf _snprintf
6 | #define vsnprintf _vsnprintf
7 | #define __func__ __FUNCTION__
8 | #undef max
9 | #undef min
10 |
11 | #ifdef _WIN64
12 | typedef signed __int64 ssize_t;
13 | #else
14 | typedef signed int ssize_t;
15 | #endif // _WIN64
16 |
17 | #if _MSC_VER < 1700
18 | #define false 0
19 | #define true 1
20 | #elif _MSC_VER > 1700
21 | #include
22 | #endif // _MSC_VER < 1700
23 |
24 | #if _MSC_VER < 1800
25 | // Add some missing functions from C99
26 | #define isnan(x) _isnan(x)
27 | #define isinf(x) (!_finite(x))
28 | #define round(x) (x >= 0.0 ? (double)(int)(x + 0.5) : (double)(int)(x - 0.5))
29 | #define roundf(x) (x >= 0.0f ? (float)(int)(x + 0.5f) : (float)(int)(x - 0.5f))
30 | #endif
31 |
32 | #endif // _MSC_VER
33 |
34 |
35 | #endif // COMPAT_H
36 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/copy.hpp:
--------------------------------------------------------------------------------
1 | // @file copy.hpp
2 | // @brief Copy and other data operations
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__copy__
14 | #define __vl__copy__
15 |
16 | #include "../data.hpp"
17 |
18 | namespace vl { namespace impl {
19 |
20 | template
21 | struct operations
22 | {
23 | static vl::ErrorCode copy(type * dst, type const * src, size_t numElements, double mult = 1.0) ;
24 | static vl::ErrorCode fill(type * dst, size_t numElements, type value) ;
25 | } ;
26 | } }
27 |
28 | #endif /* defined(__vl__copy__) */
29 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/copy_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file copy_cpu.cpp
2 | // @brief Copy and other data operations (CPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include
15 |
16 | namespace vl { namespace impl {
17 |
18 | template
19 | struct operations
20 | {
21 | static vl::ErrorCode
22 | copy(type * dst,
23 | type const * src,
24 | size_t numElements,
25 | double mult)
26 | {
27 | if (mult == 1.0) {
28 | memcpy(dst, src, numElements * sizeof(type)) ;
29 | } else {
30 | auto end = src + numElements ;
31 | while (src != end) {
32 | *dst++ = mult * (*src++) ;
33 | }
34 | }
35 | return VLE_Success ;
36 | }
37 |
38 | static vl::ErrorCode
39 | fill(type * dst,
40 | size_t numElements,
41 | type value)
42 | {
43 | for (size_t k = 0 ; k < numElements ; ++k) {
44 | dst[k] = value ;
45 | }
46 | return VLE_Success ;
47 | }
48 | } ;
49 |
50 | } }
51 |
52 | template struct vl::impl::operations ;
53 |
54 | #ifdef ENABLE_DOUBLE
55 | template struct vl::impl::operations ;
56 | #endif
57 |
58 |
59 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/copy_gpu.cu:
--------------------------------------------------------------------------------
1 | // @file copy_gpu.cu
2 | // @brief Copy and other data operations (GPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include "../datacu.hpp"
15 | #include
16 |
17 | template __global__ void
18 | fill_kernel (type * data, type value, size_t size)
19 | {
20 | int index = threadIdx.x + blockIdx.x * blockDim.x ;
21 | if (index < size) data[index] = value ;
22 | }
23 |
24 | template __global__ void
25 | copy_kernel (type *dst, type const *src, size_t size, type mult)
26 | {
27 | int index = threadIdx.x + blockIdx.x * blockDim.x ;
28 | if (index < size) dst[index] = mult * src[index] ;
29 | }
30 |
31 | namespace vl { namespace impl {
32 |
33 | template
34 | struct operations
35 | {
36 | static vl::ErrorCode
37 | copy(type * dst,
38 | type const * src,
39 | size_t numElements,
40 | double mult)
41 | {
42 | if (mult == 1.0) {
43 | cudaMemcpy(dst, src, numElements * sizeof(type), cudaMemcpyDeviceToDevice) ;
44 | } else {
45 | copy_kernel
46 | <<>>
47 | (dst, src, numElements, mult) ;
48 | cudaError_t error = cudaGetLastError() ;
49 | if (error != cudaSuccess) {
50 | return VLE_Cuda ;
51 | }
52 | }
53 | return VLE_Success ;
54 | }
55 |
56 | static vl::ErrorCode
57 | fill(type * dst,
58 | size_t numElements,
59 | type value)
60 | {
61 | fill_kernel
62 | <<>>
63 | (dst, numElements, value) ;
64 |
65 | cudaError_t error = cudaGetLastError() ;
66 | if (error != cudaSuccess) {
67 | return VLE_Cuda ;
68 | }
69 | return VLE_Success ;
70 | }
71 | } ;
72 |
73 | } }
74 |
75 | template struct vl::impl::operations ;
76 |
77 | #ifdef ENABLE_DOUBLE
78 | template struct vl::impl::operations ;
79 | #endif
80 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/im2row.hpp:
--------------------------------------------------------------------------------
1 | // @file im2row.hpp
2 | // @brief Stack image patches as matrix rows
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__im2row__
14 | #define __vl__im2row__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct im2row {
23 |
24 | static vl::ErrorCode
25 | forward(vl::Context& context,
26 | type* stacked,
27 | type const* data,
28 | size_t height, size_t width, size_t depth,
29 | size_t windowHeight, size_t windowWidth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight,
32 | int dilateY, int dialteX) ;
33 |
34 | static vl::ErrorCode
35 | backward(vl::Context& context,
36 | type* data,
37 | type const* stacked,
38 | size_t height, size_t width, size_t depth,
39 | size_t windowHeight, size_t windowWidth,
40 | size_t strideY, size_t strideX,
41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight,
42 | int dilateY, int dilateX) ;
43 | } ;
44 |
45 | } }
46 |
47 | #endif /* defined(__vl__im2row__) */
48 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnbias_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias_blas.hpp
2 | // @brief biasolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias_cudnn__
14 | #define __vl__nnbias_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | // todo: data type should be handled internally?
22 |
23 | template
24 | struct nnbias_cudnn
25 | {
26 | static vl::ErrorCode
27 | forward(vl::Context& context,
28 | vl::Tensor output, double outputMult,
29 | vl::Tensor data, double dataMult,
30 | vl::Tensor biases, double biasesMult) ;
31 |
32 | static vl::ErrorCode
33 | backward(vl::Context& context,
34 | vl::Tensor derData, double derDataMult,
35 | vl::Tensor derBiases, double derBiasesMult,
36 | vl::Tensor derOutput, double derOutputMult) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbias_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnbilinearsampler_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler_cudnn.hpp
2 | // @brief BilinearSampler CuDNN-based implementation.
3 | // @author Ankush Gupta, Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bilinearsampler_cudnn__
14 | #define __vl__bilinearsampler_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnbilinearsampler_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | Tensor output,
27 | Tensor data,
28 | Tensor grid) ;
29 |
30 | static vl::ErrorCode
31 | backward(Context& context,
32 | Tensor derData,
33 | Tensor derGrid,
34 | Tensor data,
35 | Tensor grid,
36 | Tensor derOutput) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbilinearsampler_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnbnorm_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbnorm_cudnn.hpp
2 | // @brief bnorm CuDNN-based implementation.
3 | // @author Ankush Gupta, Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bnorm_cudnn__
14 | #define __vl__bnorm_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnbnorm_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(vl::Context& context,
26 | vl::Tensor output,
27 | vl::Tensor moments,
28 | vl::Tensor data,
29 | vl::Tensor multipliers,
30 | vl::Tensor biases,
31 | double epsilon) ;
32 |
33 | static vl::ErrorCode
34 | forward_given_moments(vl::Context& context,
35 | vl::Tensor output,
36 | vl::Tensor moments,
37 | vl::Tensor data,
38 | vl::Tensor multipliers,
39 | vl::Tensor biases) ;
40 |
41 | static vl::ErrorCode
42 | backward(Context& context,
43 | vl::Tensor derData,
44 | vl::Tensor derMultipliers,
45 | vl::Tensor derBiases,
46 | vl::Tensor moments,
47 | vl::Tensor data,
48 | vl::Tensor multipliers,
49 | vl::Tensor biases,
50 | vl::Tensor derOutput,
51 | double epsilon) ;
52 |
53 | static vl::ErrorCode
54 | backward_given_moments(Context& context,
55 | vl::Tensor derData,
56 | vl::Tensor derMultipliers,
57 | vl::Tensor derBiases,
58 | vl::Tensor moments,
59 | vl::Tensor data,
60 | vl::Tensor multipliers,
61 | vl::Tensor biases,
62 | vl::Tensor derOutput,
63 | double epsilon) ;
64 | } ;
65 |
66 | } }
67 |
68 | #endif /* defined(__vl__nnbnorm_cudnn__) */
69 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnconv_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv_blas.hpp
2 | // @brief Convolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnconv_cudnn__
14 | #define __vl__nnconv_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnconv_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | Tensor output, double outputMult,
27 | Tensor data, double dataMult,
28 | Tensor filters,
29 | Tensor biases,
30 | int strideX, int strideY,
31 | int padLeft, int padRight,
32 | int padTop, int padBottom,
33 | int dilateX, int dilateY) ;
34 |
35 | static vl::ErrorCode
36 | backward(Context& context,
37 | Tensor derData,
38 | Tensor derFilters,
39 | Tensor derBiases,
40 | Tensor data,
41 | Tensor filters,
42 | Tensor derOutput,
43 | int strideX, int strideY,
44 | int padLeft, int padRight,
45 | int padTop, int padBottom,
46 | int dilateX, int dilateY) ;
47 | } ;
48 |
49 | } }
50 | #endif /* defined(__vl__nnconv_cudnn__) */
51 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nncov_pool_blas.hpp:
--------------------------------------------------------------------------------
1 | // @file nncov_pool_blas.hpp
2 | // @brief MPN-COV
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 | #ifndef __vl__nncov__pool__blas__
13 | #define __vl__nncov__pool__blas__
14 |
15 | #include "../data.hpp"
16 | #include
17 |
18 | namespace vl { namespace impl {
19 | template
20 | struct cov_pool
21 | {
22 | static vl::ErrorCode
23 | forward(Context& context,
24 | type* output,
25 | type const* data,
26 | size_t height, size_t width, size_t depth, size_t num);
27 | static vl::ErrorCode
28 | backward(Context& context,
29 | type* derData,
30 | type const* data,
31 | type const* derOutput,
32 | size_t height, size_t width, size_t depth, size_t num);
33 | };
34 | } }
35 |
36 |
37 | #endif /* __vl_mpn_cov__ */
38 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nncov_sqrtm_blas.hpp:
--------------------------------------------------------------------------------
1 | // @file nncov_sqrtm_blas.hpp
2 | // @brief MPN-COV
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 | #ifndef __vl__nncov__sqrtm__blas__
13 | #define __vl__nncov__sqrtm__blas__
14 |
15 | #include "../data.hpp"
16 | #include
17 |
18 | namespace vl { namespace impl {
19 | template
20 | struct cov_sqrtm
21 | {
22 | static vl::ErrorCode
23 | forward(Context& context,
24 | type* output,
25 | type const* data,
26 | type* aux_Y,
27 | type* aux_Z,
28 | size_t height, size_t width, size_t depth, size_t num,
29 | int coef,
30 | int iterNum);
31 | static vl::ErrorCode
32 | backward(Context& context,
33 | type* derData,
34 | type const* data,
35 | type const* derOutput,
36 | type const* aux_Y,
37 | type const* aux_Z,
38 | size_t height, size_t width, size_t depth, size_t num,
39 | int coef,
40 | int iterNum);
41 | };
42 | } }
43 |
44 |
45 | #endif /* __vl_cov_sqrtm__ */
46 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nncov_traceNorm_blas.hpp:
--------------------------------------------------------------------------------
1 | // @file nnmpn_cov_blas.hpp
2 | // @brief MPN-COV
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 | #ifndef __vl__nncov__traceNorm__blas__
13 | #define __vl__nncov__traceNorm__blas__
14 |
15 | #include "../data.hpp"
16 | #include
17 |
18 | namespace vl { namespace impl {
19 | template
20 | struct cov_traceNorm
21 | {
22 | static vl::ErrorCode
23 | forward(Context& context,
24 | type* output,
25 | type const* data,
26 | type* aux_T,
27 | size_t height, size_t width, size_t depth, size_t num);
28 | static vl::ErrorCode
29 | backward(Context& context,
30 | type* derData,
31 | type const* data,
32 | type const* derOutput,
33 | type const* derOutput_aux,
34 | type const* aux_T,
35 | size_t height, size_t width, size_t depth, size_t num);
36 | static vl::ErrorCode
37 | forward_aux(Context& context,
38 | type* output,
39 | type const* data,
40 | type* aux_T,
41 | size_t height, size_t width, size_t depth, size_t num);
42 | static vl::ErrorCode
43 | backward_aux(Context& context,
44 | type* derData,
45 | type* derData_aux,
46 | type const* data,
47 | type const* derOutput,
48 | type const* aux_T,
49 | size_t height, size_t width, size_t depth, size_t num);
50 | };
51 | } }
52 |
53 |
54 | #endif /* __vl_mpn_cov__ */
55 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnimsn_cov_blas.hpp:
--------------------------------------------------------------------------------
1 | // @file nnmpn_cov_blas.hpp
2 | // @brief MPN-COV
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 | #ifndef __vl__nnmpn__cov__blas__
13 | #define __vl__nnmpn__cov__blas__
14 |
15 | #include "../data.hpp"
16 | #include
17 |
18 | namespace vl { namespace impl {
19 | template
20 | struct imsn_cov
21 | {
22 | static vl::ErrorCode
23 | forward(Context& context,
24 | type* output,
25 | type const* data,
26 | type* aux_Y,
27 | type* aux_Z,
28 | type* aux_T,
29 | size_t height, size_t width, size_t depth, size_t num,
30 | int iterNum);
31 | static vl::ErrorCode
32 | backward(Context& context,
33 | type* derData,
34 | type const* data,
35 | type const* derOutput,
36 | type const* aux_Y,
37 | type const* aux_Z,
38 | type const* aux_T,
39 | size_t height, size_t width, size_t depth, size_t num,
40 | int iterNum);
41 | };
42 | } }
43 |
44 |
45 | #endif /* __vl_mpn_cov__ */
46 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnmpn_cov_blas.hpp:
--------------------------------------------------------------------------------
1 | // @file nnmpn_cov_blas.hpp
2 | // @brief MPN-COV
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 | #ifndef __vl__nnmpn__cov__blas__
13 | #define __vl__nnmpn__cov__blas__
14 |
15 | #include "../data.hpp"
16 | #include
17 |
18 | namespace vl { namespace impl {
19 | template
20 | struct mpn_cov
21 | {
22 | static vl::ErrorCode
23 | forward(Context& context,
24 | type* output,
25 | type const* data,
26 | type* aux_S,
27 | type* aux_V,
28 | type* aux_D,
29 | size_t height, size_t width, size_t depth, size_t num,
30 | type epsilon,
31 | type alpha);
32 | static vl::ErrorCode
33 | backward(Context& context,
34 | type* derData,
35 | type const* data,
36 | type const* derOutput,
37 | type const* aux_S,
38 | type const* aux_V,
39 | type const* aux_D,
40 | size_t height, size_t width, size_t depth, size_t num,
41 | type epsilon,
42 | type alpha);
43 | };
44 | } }
45 |
46 |
47 | #endif /* __vl_mpn_cov__ */
48 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/nnpooling_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling_blas.hpp
2 | // @brief Pooling block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling_cudnn__
14 | #define __vl__nnpooling_cudnn__
15 |
16 | #include "../nnpooling.hpp"
17 | #include "../data.hpp"
18 | #include "cudnn.h"
19 |
20 |
21 | namespace vl { namespace impl {
22 |
23 | // todo: data type should be handled internally?
24 |
25 | template
26 | struct nnpooling_cudnn
27 | {
28 | static vl::ErrorCode
29 | forward(Context& context,
30 | Tensor output,
31 | Tensor data,
32 | vl::PoolingMethod method,
33 | int poolHeight, int poolWidth,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 |
38 | static vl::ErrorCode
39 | backward(Context& context,
40 | Tensor derData,
41 | Tensor data,
42 | Tensor output,
43 | Tensor derOutput,
44 | vl::PoolingMethod method,
45 | int poolHeight, int poolWidth,
46 | int strideY, int strideX,
47 | int padTop, int padBottom,
48 | int padLeft, int padRight) ;
49 | };
50 |
51 | } }
52 |
53 | #endif /* defined(__vl__nnpooling_cudnn__) */
54 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/normalize.hpp:
--------------------------------------------------------------------------------
1 | // @file normalize.hpp
2 | // @brief Normalize block implementation
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__normalize__
14 | #define __vl__normalize__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct lrn
23 | {
24 | static vl::ErrorCode
25 | forward(type* output,
26 | type const* data,
27 | size_t height, size_t width, size_t depth, size_t size,
28 | size_t normDetph,
29 | type kappa, type alpha, type beta) ;
30 |
31 | static vl::ErrorCode
32 | backward(type* derData,
33 | type const* data,
34 | type const* derOutput,
35 | size_t height, size_t width, size_t depth, size_t size,
36 | size_t normDetph,
37 | type kappa, type alpha, type beta) ;
38 | } ;
39 |
40 | } }
41 |
42 | #endif /* __vl__normalize__ */
43 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/pooling.hpp:
--------------------------------------------------------------------------------
1 | // @file pooling.hpp
2 | // @brief Pooling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_POOLING_H
15 | #define VL_POOLING_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct pooling_max {
24 | typedef type data_type ;
25 |
26 | static vl::ErrorCode
27 | forward(data_type* output,
28 | data_type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t poolHeight, size_t poolWidth,
31 | size_t strideY, size_t strideX,
32 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
33 |
34 | static vl::ErrorCode
35 | backward(data_type* derData,
36 | data_type const* data,
37 | data_type const* derOutput,
38 | size_t height, size_t width, size_t depth,
39 | size_t poolHeight, size_t poolWidth,
40 | size_t strideY, size_t strideX,
41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
42 | } ;
43 |
44 | template
45 | struct pooling_average {
46 | typedef type data_type ;
47 |
48 | static vl::ErrorCode
49 | forward(data_type* output,
50 | data_type const* data,
51 | size_t height, size_t width, size_t depth,
52 | size_t poolHeight, size_t poolWidth,
53 | size_t strideY, size_t strideX,
54 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
55 |
56 | static vl::ErrorCode
57 | backward(type* derData,
58 | type const* derOutput,
59 | size_t height, size_t width, size_t depth,
60 | size_t poolHeight, size_t poolWidth,
61 | size_t strideY, size_t strideX,
62 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
63 | } ;
64 |
65 | } }
66 |
67 | #endif /* defined(VL_POOLING_H) */
68 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/subsample.hpp:
--------------------------------------------------------------------------------
1 | // @file subsampling.hpp
2 | // @brief Subsampling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_NNSUBSAMPLE_H
15 | #define VL_NNSUBSAMPLE_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct subsample {
24 |
25 | static vl::ErrorCode
26 | forward(vl::Context& context,
27 | type* output,
28 | type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
32 |
33 | static vl::ErrorCode
34 | backward(vl::Context& context,
35 | type* derData,
36 | type const* derOutput,
37 | size_t height, size_t width, size_t depth,
38 | size_t strideY, size_t strideX,
39 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
40 | } ;
41 |
42 | } }
43 |
44 | #endif /* defined(VL_NNSUBSAMPLE_H) */
45 |
--------------------------------------------------------------------------------
/matlab/src/bits/impl/subsample_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file subsampling_cpu.cpp
2 | // @brief Subsampling block implementation (CPU)
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #include "subsample.hpp"
15 | #include
16 | #include
17 |
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct subsample
23 | {
24 |
25 | static vl::ErrorCode
26 | forward(vl::Context& context,
27 | type* output,
28 | type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
32 | {
33 | int outputWidth = (width + (padLeft + padRight) - 1)/strideX + 1 ;
34 | int outputHeight = (height + (padTop + padBottom) - 1)/strideY + 1 ;
35 | for (int z = 0; z < depth; ++z) {
36 | for (int x = 0; x < outputWidth; ++x) {
37 | for (int y = 0; y < outputHeight; ++y) {
38 | int x1 = x * (signed)strideX - (signed)padLeft ;
39 | int y1 = y * (signed)strideY - (signed)padTop ;
40 | type value = 0 ;
41 | if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) {
42 | value = data[x1 * height + y1] ;
43 | }
44 | output[x * outputHeight + y] = value ;
45 | }
46 | }
47 | data += width*height ;
48 | output += outputWidth*outputHeight ;
49 | }
50 | return VLE_Success ;
51 | }
52 |
53 | static vl::ErrorCode
54 | backward(vl::Context& context,
55 | type* derData,
56 | type const* derOutput,
57 | size_t height, size_t width, size_t depth,
58 | size_t strideY, size_t strideX,
59 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
60 | {
61 | int outputWidth = (width + (padLeft + padRight) - 1)/strideX + 1 ;
62 | int outputHeight = (height + (padTop + padBottom) - 1)/strideY + 1 ;
63 |
64 | memset(derData, 0, sizeof(type) * width * height * depth) ;
65 |
66 | for (int z = 0; z < depth; ++z) {
67 | for (int px = 0; px < outputWidth; ++px) {
68 | for (int py = 0; py < outputHeight; ++py) {
69 | int x1 = px * (int)strideX - (int)padLeft ;
70 | int y1 = py * (int)strideY - (int)padTop ;
71 | if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) {
72 | derData[x1 * height + y1] = derOutput[px * outputHeight + py] ;
73 | }
74 | }
75 | }
76 | derData += width*height ;
77 | derOutput += outputWidth*outputHeight ;
78 | }
79 | return VLE_Success ;
80 | }
81 | } ;
82 |
83 | } }
84 |
85 | // Instantiations
86 | template struct vl::impl::subsample ;
87 |
88 | #ifdef ENABLE_DOUBLE
89 | template struct vl::impl::subsample ;
90 | #endif
91 |
--------------------------------------------------------------------------------
/matlab/src/bits/imread.cpp:
--------------------------------------------------------------------------------
1 | // @file imread.cpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "imread.hpp"
14 | #include
15 |
16 | vl::ImageShape::ImageShape()
17 | : height(0), width(0), depth(0)
18 | { }
19 |
20 | vl::ImageShape::ImageShape(size_t height, size_t width, size_t depth)
21 | : height(height), width(width), depth(depth)
22 | { }
23 |
24 | vl::ImageShape::ImageShape(ImageShape const & im)
25 | : height(im.height), width(im.width), depth(im.depth)
26 | { }
27 |
28 | vl::ImageShape & vl::ImageShape::operator =(vl::ImageShape const & im)
29 | {
30 | height = im.height ;
31 | width = im.width ;
32 | depth = im.depth ;
33 | return *this ;
34 | }
35 |
36 | bool vl::ImageShape::operator == (vl::ImageShape const & im)
37 | {
38 | return
39 | (height == im.height) &
40 | (width == im.width) &
41 | (depth == im.depth) ;
42 | }
43 |
44 | size_t vl::ImageShape::getNumElements() const
45 | {
46 | return height*width*depth ;
47 | }
48 |
49 | void vl::ImageShape::clear()
50 | {
51 | height = 0 ;
52 | width = 0 ;
53 | depth = 0 ;
54 | }
55 |
56 | vl::Image::Image()
57 | : shape(), memory(NULL)
58 | { }
59 |
60 | vl::Image::Image(Image const & im)
61 | : shape(im.shape), memory(im.memory)
62 | { }
63 |
64 | vl::Image::Image(vl::ImageShape const & shape, float * memory)
65 | : shape(shape), memory(memory)
66 | { }
67 |
68 | vl::ImageShape const & vl::Image::getShape() const { return shape ; }
69 | float * vl::Image::getMemory() const { return memory ; }
70 |
71 | void vl::Image::clear()
72 | {
73 | shape.clear() ;
74 | memory = 0 ;
75 | }
76 |
--------------------------------------------------------------------------------
/matlab/src/bits/imread.hpp:
--------------------------------------------------------------------------------
1 | // @file imread.hpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__imread__
14 | #define __vl__imread__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | #define VL_IMAGE_ERROR_MSG_MAX_LENGTH 256
21 |
22 | struct ImageShape
23 | {
24 | size_t height ;
25 | size_t width ;
26 | size_t depth ;
27 |
28 | ImageShape() ;
29 | ImageShape(size_t height, size_t width, size_t depth) ;
30 | ImageShape(ImageShape const & im) ;
31 | ImageShape & operator = (ImageShape const & im) ;
32 | bool operator == (ImageShape const & im) ;
33 |
34 | size_t getNumElements() const ;
35 | void clear() ;
36 | } ;
37 |
38 | class Image
39 | {
40 | public:
41 | Image() ;
42 | Image(Image const & im) ;
43 | Image(ImageShape const & shape, float * memory = NULL) ;
44 | ImageShape const & getShape() const ;
45 | float * getMemory() const ;
46 | void clear() ;
47 |
48 | protected:
49 | ImageShape shape ;
50 | float * memory ;
51 | } ;
52 |
53 | class ImageReader
54 | {
55 | public:
56 | ImageReader() ;
57 | ~ImageReader() ;
58 | vl::ErrorCode readShape(ImageShape & image, char const * fileName) ;
59 | vl::ErrorCode readPixels(float * memory, char const * fileName) ;
60 | char const * getLastErrorMessage() const ;
61 |
62 | private:
63 | class Impl ;
64 | Impl * impl ;
65 | } ;
66 | }
67 |
68 | #endif
69 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbias.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnbias.cu"
5 |
6 | /**
7 | @brief nnbias_forward
8 | @param context context.
9 | @param output output tensor $\by$ [output].
10 | @param outputMult output tensor multiplier $\alpha$.
11 | @param data data tensor $\bx$.
12 | @param dataMult data tensor multiplier $\beta$.
13 | @param biases biases tensor $\bb$.
14 | @param biasesMult biases tensor multiplier $\gamma$.
15 |
16 | The function computes
17 | @f[
18 | y_{ijkd} \leftarrow
19 | \alpha y_{ijkd} +
20 | \beta x_{ijkd} +
21 | \gamma b_k.
22 | @f]
23 |
24 | @a data can be the null tensor, in which case this tensor
25 | is dropped in the summation.
26 | */
27 |
28 | /**
29 | @brief nnbias_backward
30 | @param context context.
31 | @param derData data derivative tensor $d\bx$ [output].
32 | @param derDataMult data derivative tensor multiplier $\eta$.
33 | @param derBiases biases derivative tensor $d\bb$ [output].
34 | @param derBiasesMult biased derivative tensor multiplier $\tau$.
35 | @param data data tensor $\bx$.
36 | @param dataMult data tensor multiplier $\beta$.
37 | @param biases biases tensor $\bb$.
38 | @param biasesMult biases tensor multiplier $\gamma$.
39 |
40 | If @a derData is the null tensor, this derivative is not comptued and
41 | @param biases can also be null.
42 |
43 | If @a derBiases is the null tensor, this derivative is not computed and
44 | @param data can also be null.
45 | */
46 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbias.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias.hpp
2 | // @brief Bias block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-17 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias__
14 | #define __vl__nnbias__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl { namespace nn {
19 |
20 | class Bias {
21 | public:
22 | Bias(vl::Context &context) ;
23 |
24 | // output <- outputMult * output + inputMult * input + biasMult * bias
25 | vl::ErrorCode forward(vl::Tensor &output, double outputMult,
26 | vl::Tensor const &input, double inputMult,
27 | vl::Tensor const &bias, double biasMult) ;
28 |
29 | vl::ErrorCode backward(vl::Tensor &derInput, double derInputMult,
30 | vl::Tensor &derBias, double derBiasMult,
31 | double inputMult, double biasMult,
32 | vl::Tensor const &derOutput) ;
33 |
34 | vl::Context& context ;
35 | } ;
36 |
37 | } }
38 |
39 | #endif /* defined(__vl__nnbias__) */
40 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler.hpp
2 | // @brief Bilinear sampler block
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016-17 Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbilinearsampler__
14 | #define __vl__nnbilinearsampler__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl { namespace nn {
20 |
21 | class BilinearSampler {
22 | public:
23 | BilinearSampler(Context &context) ;
24 |
25 | vl::ErrorCode forward(vl::Tensor &output,
26 | vl::Tensor const &input,
27 | vl::Tensor const &grid) ;
28 |
29 | vl::ErrorCode backward(vl::Tensor &derInput,
30 | vl::Tensor &derGrid,
31 | vl::Tensor const &input,
32 | vl::Tensor const &grid,
33 | vl::Tensor const &derOutput) ;
34 |
35 | Context &context ;
36 | } ;
37 |
38 | } }
39 |
40 | #endif /* defined(__vl__nnbilinearsampler__) */
41 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnbnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbnorm.hpp
2 | // @brief Batch normalizatoion block
3 | // @author Sebastien Ehrhardt
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2015-16 Sebastien Ehrhardt and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef __vl__nnbnorm__
15 | #define __vl__nnbnorm__
16 |
17 | #include "data.hpp"
18 | #include
19 |
20 | namespace vl { namespace nn {
21 |
22 | class BatchNorm {
23 | public:
24 | BatchNorm(vl::Context &context,
25 | double epsilon) ;
26 |
27 | vl::ErrorCode forward(vl::Tensor &output,
28 | vl::Tensor &moment,
29 | vl::Tensor const &input,
30 | vl::Tensor const &multiplier,
31 | vl::Tensor const &bias) ;
32 |
33 | vl::ErrorCode forwardWithMoment(vl::Tensor &output,
34 | vl::Tensor const &moment,
35 | vl::Tensor const &input,
36 | vl::Tensor const &multiplier,
37 | vl::Tensor const &bias) ;
38 |
39 | vl::ErrorCode backward(vl::Tensor &derInput,
40 | vl::Tensor &derMultiplier,
41 | vl::Tensor &derBias,
42 | vl::Tensor &moment,
43 | vl::Tensor const &input,
44 | vl::Tensor const &multiplier,
45 | vl::Tensor const &bias,
46 | vl::Tensor const &derOutput) ;
47 |
48 | vl::ErrorCode backwardWithMoment(vl::Tensor &derInput,
49 | vl::Tensor &derMultiplier,
50 | vl::Tensor &derBias,
51 | vl::Tensor const &moment,
52 | vl::Tensor const &input,
53 | vl::Tensor const &multiplier,
54 | vl::Tensor const &bias,
55 | vl::Tensor const &derOutput) ;
56 |
57 | vl::Context& context ;
58 | double epsilon ;
59 | } ;
60 |
61 | } }
62 |
63 | #endif /* defined(__vl__nnbnorm__) */
64 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnconv.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnconv.cu should be compiled instead"
3 | #endif
4 | #include "nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnconv.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv.cu
2 | // @brief Convolution block
3 | // @author Andrea Vedaldi
4 | // @author Max Jaderberg
5 |
6 | /*
7 | Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg
8 | Copyright (C) 2015-17 Andrea Vedaldi.
9 |
10 | All rights reserved.
11 |
12 | This file is part of the VLFeat library and is made available under
13 | the terms of the BSD license (see the COPYING file).
14 | */
15 |
16 | #ifndef __vl__nnconv__
17 | #define __vl__nnconv__
18 |
19 | #include "data.hpp"
20 |
21 | namespace vl { namespace nn {
22 |
23 | class Convolution {
24 | public:
25 | Convolution(Context &context,
26 | int strideY, int strideX,
27 | int padTop, int padBottom,
28 | int padLeft, int padRight,
29 | int dilateY, int dilateX) ;
30 |
31 | vl::ErrorCode forward(vl::Tensor &output, double outputMult,
32 | vl::Tensor const& input, double inputMult,
33 | vl::Tensor const& filter,
34 | vl::Tensor const& bias) ;
35 |
36 | vl::ErrorCode backward(vl::Tensor &derInput,
37 | vl::Tensor &derFilter,
38 | vl::Tensor &derBias,
39 | vl::Tensor const &input,
40 | vl::Tensor const &filter,
41 | vl::Tensor const &derOutput) ;
42 |
43 | Context &context ;
44 | int strideY ;
45 | int strideX ;
46 | int padTop ;
47 | int padBottom ;
48 | int padLeft ;
49 | int padRight ;
50 | int dilateY ;
51 | int dilateX ;
52 | } ;
53 |
54 | class ConvolutionTranspose {
55 | public:
56 | ConvolutionTranspose(Context &context,
57 | int upsampleY, int upsampleX,
58 | int cropTop, int cropBottom,
59 | int cropLeft, int cropRight) ;
60 |
61 | vl::ErrorCode forward(vl::Tensor &output,
62 | vl::Tensor const &input,
63 | vl::Tensor const &filter,
64 | vl::Tensor const &bias) ;
65 |
66 | vl::ErrorCode backward(vl::Tensor &derData,
67 | vl::Tensor &derFilter,
68 | vl::Tensor &derBias,
69 | vl::Tensor const &input,
70 | vl::Tensor const &filter,
71 | vl::Tensor const &derOutput);
72 |
73 | Context &context ;
74 | int upsampleY ;
75 | int upsampleX ;
76 | int cropTop ;
77 | int cropBottom ;
78 | int cropLeft ;
79 | int cropRight ;
80 | } ;
81 |
82 | } }
83 |
84 |
85 | #endif /* defined(__vl__nnconv__) */
86 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnconv_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv_blas.hpp
2 | // @brief Convolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnconv_cudnn__
14 | #define __vl__nnconv_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnconv_cudnn
23 | {
24 | static vl::ErrorCode
25 | forward(Context& context,
26 | Tensor output, double outputMult,
27 | Tensor data, double dataMult,
28 | Tensor filters,
29 | Tensor biases,
30 | int strideX, int strideY,
31 | int padLeft, int padRight,
32 | int padTop, int padBottom,
33 | int dilateX, int dilateY) ;
34 |
35 | static vl::ErrorCode
36 | backward(Context& context,
37 | Tensor derData,
38 | Tensor derFilters,
39 | Tensor derBiases,
40 | Tensor data,
41 | Tensor filters,
42 | Tensor derOutput,
43 | int strideX, int strideY,
44 | int padLeft, int padRight,
45 | int padTop, int padBottom,
46 | int dilateX, int dilateY) ;
47 | } ;
48 |
49 | } }
50 | #endif /* defined(__vl__nnconv_cudnn__) */
51 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_pool.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nncov_pool.cu should be compiled instead"
3 | #endif
4 | #include "nncov_pool.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_pool.hpp:
--------------------------------------------------------------------------------
1 | // @file nncov_pool.hpp
2 | // @brief MPN-COV block
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 |
13 | #ifndef __vl__nncov__pool__
14 | #define __vl__nncov__pool__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nncov_pool_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data) ;
24 |
25 | vl::ErrorCode
26 | nncov_pool_backward(vl::Context& context,
27 | vl::Tensor derData,
28 | vl::Tensor data,
29 | vl::Tensor derOutput) ;
30 | }
31 |
32 |
33 | #endif /* defined(__vl__nnmpn_cov__) */
34 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_sqrtm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnmpn_cov.cu should be compiled instead"
3 | #endif
4 | #include "nncov_sqrtm.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_sqrtm.hpp:
--------------------------------------------------------------------------------
1 | // @file nncov_sqrtm.hpp
2 | // @brief MPN-COV block
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 |
13 | #ifndef __vl__nncov__sqrtm__
14 | #define __vl__nncov__sqrtm__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nncov_sqrtm_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor aux_Y,
25 | vl::Tensor aux_Z,
26 | int coef,
27 | int iterNum) ;
28 |
29 | vl::ErrorCode
30 | nncov_sqrtm_backward(vl::Context& context,
31 | vl::Tensor derData,
32 | vl::Tensor data,
33 | vl::Tensor derOutput,
34 | vl::Tensor aux_Y,
35 | vl::Tensor aux_Z,
36 | int coef,
37 | int iterNum) ;
38 | }
39 |
40 |
41 | #endif /* defined(__vl__nnmpn_cov__) */
42 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_traceNorm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnmpn_cov.cu should be compiled instead"
3 | #endif
4 | #include "nncov_traceNorm.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nncov_traceNorm.hpp:
--------------------------------------------------------------------------------
1 | // @file nncov_traceNorm.hpp
2 | // @brief MPN-COV block
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 |
13 | #ifndef __vl__nncov__traceNorm__
14 | #define __vl__nncov__traceNorm__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nncov_traceNorm_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor aux_T) ;
25 |
26 | vl::ErrorCode
27 | nncov_traceNorm_backward(vl::Context& context,
28 | vl::Tensor derData,
29 | vl::Tensor data,
30 | vl::Tensor derOutput,
31 | vl::Tensor derOutput_aux,
32 | vl::Tensor aux_T) ;
33 | vl::ErrorCode
34 | nncov_traceNorm_aux_forward(vl::Context& context, //
35 | vl::Tensor output,
36 | vl::Tensor data,
37 | vl::Tensor aux_T);
38 | vl::ErrorCode
39 | nncov_traceNorm_aux_backward(vl::Context& context,
40 | vl::Tensor derData,
41 | vl::Tensor derData_aux,
42 | vl::Tensor data,
43 | vl::Tensor derOutput,
44 | vl::Tensor aux_T) ;
45 | }
46 |
47 |
48 | #endif /* defined(__vl__nnmpn_cov__) */
49 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnfullyconnected.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnfullyconnected.cu should be compiled instead"
3 | #endif
4 | #include "nnfullyconnected.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnfullyconnected.hpp:
--------------------------------------------------------------------------------
1 | // @file nnfullyconnected.hpp
2 | // @brief Fully-connected block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-17 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnfullyconnected__
14 | #define __vl__nnfullyconnected__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl { namespace nn {
19 |
20 | class FullyConnected {
21 | public:
22 | FullyConnected(vl::Context &context) ;
23 |
24 | vl::ErrorCode forward(vl::Tensor &output,
25 | vl::Tensor const& input,
26 | vl::Tensor const& filter,
27 | vl::Tensor const& bias) ;
28 |
29 | vl::ErrorCode backward(vl::Tensor &derInput,
30 | vl::Tensor &derFilter,
31 | vl::Tensor &derBias,
32 | vl::Tensor const &input,
33 | vl::Tensor const &filter,
34 | vl::Tensor const &derOutput) ;
35 |
36 | vl::Context& context ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnfullyconnected__) */
42 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnimsn_cov.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnmpn_cov.cu should be compiled instead"
3 | #endif
4 | #include "nnmpn_cov.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnimsn_cov.hpp:
--------------------------------------------------------------------------------
1 | // @file nnimsn_cov.hpp
2 | // @brief MPN-COV block
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 |
13 | #ifndef __vl__nnmpn__cov__
14 | #define __vl__nnmpn__cov__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nnimsn_cov_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor aux_Y,
25 | vl::Tensor aux_Z,
26 | vl::Tensor aux_T,
27 | int iterNum) ;
28 |
29 | vl::ErrorCode
30 | nnimsn_cov_backward(vl::Context& context,
31 | vl::Tensor derData,
32 | vl::Tensor data,
33 | vl::Tensor derOutput,
34 | vl::Tensor aux_Y,
35 | vl::Tensor aux_Z,
36 | vl::Tensor aux_T,
37 | int iterNum) ;
38 | }
39 |
40 |
41 | #endif /* defined(__vl__nnmpn_cov__) */
42 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnmpn_cov.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnmpn_cov.cu should be compiled instead"
3 | #endif
4 | #include "nnimsn_cov.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnmpn_cov.hpp:
--------------------------------------------------------------------------------
1 | // @file nnmpn_cov.hpp
2 | // @brief MPN-COV block
3 | // @author Jiangtao Xie
4 | // @author Peihua Li
5 |
6 | /*
7 | Copyright (C) 2017 Peihua Li and Jiangtao Xie
8 |
9 | All rights reserved.
10 | */
11 |
12 |
13 | #ifndef __vl__nnmpn__cov__
14 | #define __vl__nnmpn__cov__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::ErrorCode
21 | nnmpn_cov_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor aux_S,
25 | vl::Tensor aux_V,
26 | vl::Tensor aux_D,
27 | double epsilon,
28 | double alpha) ;
29 |
30 | vl::ErrorCode
31 | nnmpn_cov_backward(vl::Context& context,
32 | vl::Tensor derData,
33 | vl::Tensor data,
34 | vl::Tensor derOutput,
35 | vl::Tensor aux_S,
36 | vl::Tensor aux_V,
37 | vl::Tensor aux_D,
38 | double epsilon,
39 | double alpha) ;
40 | }
41 |
42 |
43 | #endif /* defined(__vl__nnmpn_cov__) */
44 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnnormalize.cu should be compiled instead"
3 | #endif
4 | #include "nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnnormalize.hpp:
--------------------------------------------------------------------------------
1 | // @file nnnormalize.hpp
2 | // @brief Normalization block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-17 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnnormalize__
14 | #define __vl__nnnormalize__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl { namespace nn {
20 |
21 | class LRN {
22 | public:
23 | LRN(vl::Context &context,
24 | int normDepth = 5,
25 | double kappa = 2.0,
26 | double alpha = 1e-3,
27 | double beta = 0.5) ;
28 |
29 | vl::ErrorCode forward(vl::Tensor &output,
30 | vl::Tensor const &data) ;
31 |
32 | vl::ErrorCode backward(vl::Tensor &derData,
33 | vl::Tensor const &data,
34 | vl::Tensor const &derOutput) ;
35 | vl::Context& context ;
36 | double kappa ;
37 | double alpha ;
38 | double beta ;
39 | int normDepth ;
40 | } ;
41 |
42 | } }
43 |
44 | #endif /* defined(__vl__nnnormalize__) */
45 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnnormalizelp.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnnormalizelp.cu should be compiled instead"
3 | #endif
4 | #include "nnnormalizelp.cu"
5 |
6 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnnormalizelp.hpp:
--------------------------------------------------------------------------------
1 | // @file nnnormalizelp.hpp
2 | // @brief Batch normalizatoion block
3 | // @author Sebastien Ehrhardt
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2017 Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef __nnnormalizelp__
15 | #define __nnnormalizelp__
16 |
17 | #include "data.hpp"
18 | #include
19 | #include
20 |
21 | namespace vl { namespace nn {
22 |
23 | class NormalizeLp {
24 | public:
25 | NormalizeLp(vl::Context &context,
26 | std::vector const& selectedDimensions,
27 | double exponent = 2.0,
28 | double epsilon = 1e-3) ;
29 |
30 | vl::TensorShape getNormsShapeForData(vl::Tensor const &data) ;
31 |
32 | vl::ErrorCode forward(vl::Tensor &output,
33 | vl::Tensor &norms,
34 | vl::Tensor const &data) ;
35 |
36 | vl::ErrorCode forwardWithNorms(vl::Tensor &output,
37 | vl::Tensor const &norms,
38 | vl::Tensor const &data) ;
39 |
40 | vl::ErrorCode backward(vl::Tensor &derData,
41 | vl::Tensor &moments,
42 | vl::Tensor const &data,
43 | vl::Tensor const &derOutput) ;
44 |
45 | vl::ErrorCode backwardWithNorms(vl::Tensor &derData,
46 | vl::Tensor const &norms,
47 | vl::Tensor const &data,
48 | vl::Tensor const &derOutput) ;
49 |
50 | vl::Context& context ;
51 | std::vector selectedDimensions ;
52 | double exponent ;
53 | double epsilon ;
54 | } ;
55 |
56 | } }
57 |
58 | #endif /* defined(__nnnormalizelp__) */
59 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnpooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnpooling.cu should be compiled instead"
3 | #endif
4 | #include "nnpooling.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnpooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling.hpp
2 | // @brief Pooling layer.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-17 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling__
14 | #define __vl__nnpooling__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl { namespace nn {
20 |
21 | class Pooling {
22 | public:
23 | enum Method { Max, Average } ;
24 |
25 | Pooling(vl::Context &context,
26 | int poolHeight, int poolWidth,
27 | int strideY, int strideX,
28 | int padTop, int padBottom,
29 | int padLeft, int padRight,
30 | Method method) ;
31 |
32 | vl::ErrorCode forward(vl::Tensor &output,
33 | vl::Tensor const &input) ;
34 |
35 | vl::ErrorCode backward(vl::Tensor &derInput,
36 | vl::Tensor const &input,
37 | vl::Tensor const &derOutput) ;
38 |
39 | vl::Context& context ;
40 | int poolHeight ;
41 | int poolWidth ;
42 | int strideY ;
43 | int strideX ;
44 | int padTop ;
45 | int padBottom ;
46 | int padLeft ;
47 | int padRight ;
48 | Method method ;
49 | } ;
50 |
51 | } }
52 |
53 | #endif /* defined(__vl__nnpooling__) */
54 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnroipooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnroipooling.cu should be compiled instead"
3 | #endif
4 | #include "nnroipooling.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnroipooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnroipooling.hpp
2 | // @brief ROI pooling block
3 | // @author Hakan Bilen
4 | // @author Abishek Dutta
5 | // @author Andrea Vedaldi
6 |
7 | /*
8 | Copyright (C) 2016-17 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi.
9 | All rights reserved.
10 |
11 | This file is part of the VLFeat library and is made available under
12 | the terms of the BSD license (see the COPYING file).
13 | */
14 |
15 | #ifndef __vl__nnroipooling__
16 | #define __vl__nnroipooling__
17 |
18 | #include "data.hpp"
19 | #include
20 |
21 | namespace vl { namespace nn {
22 |
23 | class ROIPooling {
24 | public:
25 | enum Method { Max, Average } ;
26 |
27 | ROIPooling(vl::Context &context,
28 | std::array subdivisions,
29 | std::array transform,
30 | Method method) ;
31 |
32 | vl::ErrorCode forward(vl::Tensor &output,
33 | vl::Tensor const &input,
34 | vl::Tensor const &rois) ;
35 |
36 | vl::ErrorCode backward(vl::Tensor &derInput,
37 | vl::Tensor const &input,
38 | vl::Tensor const &rois,
39 | vl::Tensor const &derOutput) ;
40 |
41 | vl::Context& context ;
42 | std::array subdivisions ;
43 | std::array transform ;
44 | Method method ;
45 | } ;
46 |
47 | } }
48 |
49 | #endif /* defined(__vl__nnroipooling__) */
50 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnsubsample.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnsubsample.cu"
5 |
6 |
--------------------------------------------------------------------------------
/matlab/src/bits/nnsubsample.hpp:
--------------------------------------------------------------------------------
1 | // @file nnsubsample.hpp
2 | // @brief Subsamping block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-17 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnsubsample__
14 | #define __vl__nnsubsample__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl { namespace nn {
19 |
20 | class Subsample {
21 | public:
22 | Subsample(vl::Context &context,
23 | int strideY, int strideX,
24 | int padTop, int padBottom,
25 | int padLeft, int padRight) ;
26 |
27 | vl::ErrorCode forwardWithBias(vl::Tensor &output,
28 | vl::Tensor const &input,
29 | vl::Tensor const &biases) ;
30 |
31 | vl::ErrorCode backwardWithBias(vl::Tensor &derInput,
32 | vl::Tensor &derBiases,
33 | vl::Tensor const &derOutput) ;
34 |
35 | vl::Context& context ;
36 | int strideY ;
37 | int strideX ;
38 | int padTop ;
39 | int padBottom ;
40 | int padLeft ;
41 | int padRight ;
42 | } ;
43 |
44 | } }
45 |
46 | #endif /* defined(__vl__nnsubsample__) */
47 |
--------------------------------------------------------------------------------
/matlab/src/config/mex_CUDA_glnxa64.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
11 |
26 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/matlab/src/vl_cudatool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_cudatool.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_imreadjpeg.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_imreadjpeg_old.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnconv.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnconvt.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconvt.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nncov_pool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nncov_pool.cu"
--------------------------------------------------------------------------------
/matlab/src/vl_nncov_sqrtm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nncov_sqrtm.cu"
--------------------------------------------------------------------------------
/matlab/src/vl_nncov_traceNorm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nncov_traceNorm.cu"
--------------------------------------------------------------------------------
/matlab/src/vl_nnimsn_cov.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnimsn_cov.cu"
--------------------------------------------------------------------------------
/matlab/src/vl_nnmpn_cov.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnmpn_cov.cu"
--------------------------------------------------------------------------------
/matlab/src/vl_nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnnormalizelp.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnnormalizelp.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnpool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnpool.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_nnroipool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnroipool.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_taccummex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_taccummex.cu"
5 |
--------------------------------------------------------------------------------
/matlab/src/vl_tmove.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_tmove.cu"
5 |
--------------------------------------------------------------------------------
/matlab/vl_nnbilinearsampler.m:
--------------------------------------------------------------------------------
1 | %VL_NNBILIEARSAMPLER CNN spatial bilinear resampling
2 | % Y = VL_NNBILINEARSAMPLER(X,GRID) resamples image X at the spatial
3 | % locations specified by GRID using bilinear interpolation.
4 | %
5 | % X is a array of dimension H x W x C x N, where (H,W) are the
6 | % height and width of the image, C is the number of feature
7 | % channels, and N is the number of images in the batch.
8 | %
9 | % GRID is an array of dimension 2 x Ho x Wo x No, where (Ho,Wo) are
10 | % the height and width of the output image and No the number of
11 | % output images in the output batch Y. The output array Y has
12 | % dimensions Ho x Wo x C x No. The same resampling grid is used for
13 | % all input feature channels, but each output image in the batchY
14 | % uses its own grid.
15 | %
16 | % For output image n, GRID(1,:,:,n) specifies the vertical location
17 | % v of a sample in the input image X and GRID(2,:,:,n) the
18 | % horizontal location u. The convention follows standard
19 | % impelemntations of this operator in the literature. Namely:
20 | %
21 | % 1. The grid coordinates are normalized in the range [-1,1]. This
22 | % means that (-1,-1) is the center of the upper-left pixel in the
23 | % input image and (+1,+1) the center of the bottom-right pixel.
24 | %
25 | % 2. The V,U coordiante planes are stacked in the fisrt dimension of
26 | % GRID instead of in the third, as it would be more natural in
27 | % MatConvNet (as these could be interpreted as 'channels' in
28 | % GRID).
29 | %
30 | % Further, No can be a multiple of N; in this case, it is assumed
31 | % that there are No/N transforms per input image, hence, the
32 | % transforms [1 ... No/N] are applied to the first image, [No/N+1
33 | % ... 2*No/N] are applied to the second image, etc.
34 | %
35 | % [DX, DGRID] = VL_NNBILINEARSAMPLER(X, GRID, DY) computes the
36 | % derivatives of the block projected onto DY. DX, DGRID, DY have the
37 | % same dimensions as X, GRID and Y, respectively.
38 | %
39 | % ## CUDNN SUPPORT
40 | %
41 | % If compiled in, the function will use cuDNN's
42 | % implementation. Note, cuDNN v5 or higher is required.
43 | % You can use the 'NoCudnn' option to disable
44 | % cuDNN or 'CuDNN' to activate it back again (the
45 | % choice sticks until MATLAB purges the MEX files for any reason).
46 |
47 | % Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
48 | % All rights reserved.
49 | %
50 | % This file is part of the VLFeat library and is made available under
51 | % the terms of the BSD license (see the COPYING file).
52 | vl_nnnotfound(mfilename);
53 |
--------------------------------------------------------------------------------
/matlab/vl_nnbnorm.m:
--------------------------------------------------------------------------------
1 | %VL_NNBNORM CNN batch normalisation.
2 | % Y = VL_NNBNORM(X,G,B) applies batch normalization to the input
3 | % X. Batch normalization is defined as:
4 | %
5 | % Y(i,j,k,t) = G(k) * (X(i,j,k,t) - mu(k)) / sigma(k) + B(k)
6 | %
7 | % where:
8 | %
9 | % mu(k) = mean_ijt X(i,j,k,t),
10 | % sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2,
11 | % sigma(k) = sqrt(sigma2(k) + EPSILON)
12 | %
13 | % are respectively the per-channel mean, variance, and standard
14 | % deviation of each feature channel in the data X. The parameters
15 | % G(k) and B(k) are multiplicative and additive constants use to
16 | % scale each data channel.
17 | %
18 | % Means and variances are accumulated across all the data items
19 | % (images) stored in the 4D tensor X (from which the name batch
20 | % normalization is derived). The constant EPSILON is used to
21 | % regularize the computation of sigma(k) and to avoid division by
22 | % zero.
23 | %
24 | % [DZDX,DZDG,DZDB] = VL_NNBNORM(X,G,B,DZDY) computes the derviatives
25 | % of the block projected onto DZDY. DZDX, DZDG, DZDB and DZDY have
26 | % the same dimensions as X, G, B, and Y respectivey.
27 | %
28 | % Optionally, [Y,MOMENTS] = VL_NNBNORM(...) and
29 | % [DZDX,DZDG,DZDB,MOMENTS] = VL_NNBNORM(...,DZDY) return the values
30 | % of the vectors mu and sigma in the formulas above. Here, MOMENTS
31 | % is a DEPTH x 2 array [MU, SIGMA].
32 | %
33 | % VL_NNBNROM(..., 'Option', value) takes the following options:
34 | %
35 | % `Epsilon`:: 1e-4
36 | % Specifies the constant EPSILON in the formuals above.
37 | %
38 | % `Moments`:: unspecified
39 | % Specifies an array MOMENTS with the values of mu and sigma to
40 | % use instead of computing them according to the equations
41 | % above. This is useful to disable batch normalization during
42 | % testing.
43 | %
44 | % `CuDNN`:: specified
45 | % If specified, turns on CuDNN. CuDNN is on by default. This
46 | % option can be useful to undo the effect of a previous
47 | % `NoCuDNN` option in the argument list.
48 | %
49 | % `NoCuDNN`:: not specified
50 | % If specified, turns off CuDNN.
51 | %
52 | % See also: VL_NNNORMALIZE().
53 |
54 | % Copyright (C) 2015 Sébastien Ehrhardt, Karel Lenc and Andrea Vedaldi.
55 | % All rights reserved.
56 | %
57 | % This file is part of the VLFeat library and is made available under
58 | % the terms of the BSD license (see the COPYING file).
59 | vl_nnnotfound(mfilename);
60 |
--------------------------------------------------------------------------------
/matlab/vl_nnconcat.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin)
2 | %VL_NNCONCAT CNN concatenate multiple inputs.
3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell
4 | % array INPUTS along dimension DIM generating an output Y.
5 | %
6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives
7 | % of the block projected onto DZDY. DZDINPUTS has one element for
8 | % each element of INPUTS, each of which is an array that has the same
9 | % dimensions of the corresponding array in INPUTS.
10 |
11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
12 | % All rights reserved.
13 | %
14 | % This file is part of the VLFeat library and is made available under
15 | % the terms of the BSD license (see the COPYING file).
16 |
17 | opts.inputSizes = [] ;
18 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
19 |
20 | if nargin < 2, dim = 3; end;
21 | if nargin < 3, dzdy = []; end;
22 |
23 | if isempty(dzdy)
24 | y = cat(dim, inputs{:});
25 | else
26 | if isempty(opts.inputSizes)
27 | opts.inputSizes = cellfun(@(inp) [size(inp,1),size(inp,2),size(inp,3),size(inp,4)], inputs, 'UniformOutput', false) ;
28 | end
29 | start = 1 ;
30 | y = cell(1, numel(opts.inputSizes)) ;
31 | s.type = '()' ;
32 | s.subs = {':', ':', ':', ':'} ;
33 | for i = 1:numel(opts.inputSizes)
34 | stop = start + opts.inputSizes{i}(dim) ;
35 | s.subs{dim} = start:stop-1 ;
36 | y{i} = subsref(dzdy,s) ;
37 | start = stop ;
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matlab/vl_nncrop.m:
--------------------------------------------------------------------------------
1 | function y = vl_nncrop(x, crop, dzdy, inputSize)
2 | %VL_NNCROP CNN crop.
3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the
4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT].
5 | %
6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the
7 | % function projected on the output derivative DZDY. DZDX has the same
8 | % dimension as X and DZDY the same dimension as Y.
9 | %
10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to
11 | % the previous call in which X is omitted and its size is passed as
12 | % INPUTSIZE.
13 |
14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi.
15 | % All rights reserved.
16 | %
17 | % This file is part of the VLFeat library and is made available under
18 | % the terms of the BSD license (see the COPYING file).
19 |
20 | if nargin < 4
21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ;
22 | else
23 | sz = inputSize ;
24 | end
25 |
26 | sv = 1 + crop(1) : sz(1) - crop(2) ;
27 | su = 1 + crop(3) : sz(2) - crop(4) ;
28 |
29 | if nargin <= 2 || isempty(dzdy)
30 | y = x(sv, su, :, :) ;
31 | else
32 | if isa(dzdy, 'gpuArray')
33 | y = gpuArray.zeros(sz, classUnderlying(dzdy)) ;
34 | else
35 | y = zeros(sz, class(dzdy)) ;
36 | end
37 | y(sv, su, :, :) = dzdy ;
38 | end
39 |
--------------------------------------------------------------------------------
/matlab/vl_nndropout.m:
--------------------------------------------------------------------------------
1 | function [y,mask] = vl_nndropout(x,varargin)
2 | %VL_NNDROPOUT CNN dropout.
3 | % [Y,MASK] = VL_NNDROPOUT(X) applies dropout to the data X. MASK
4 | % is the randomly sampled dropout mask. Both Y and MASK have the
5 | % same size as X.
6 | %
7 | % VL_NNDROPOUT(X, 'rate', R) sets the dropout rate to R. Rate is defined
8 | % as the probability that a variable will be zeroed (i.e. it is one
9 | % minus the expected value of MASK).
10 | %
11 | % [DZDX] = VL_NNDROPOUT(X, DZDY, 'mask', MASK) computes the
12 | % derivatives of the blocks projected onto DZDY. Note that MASK must
13 | % be specified in order to compute the derivative consistently with
14 | % the MASK randomly sampled in the forward pass. DZDX and DZDY have
15 | % the same dimesnions as X and Y respectivey.
16 | %
17 | % Note that in the original paper on dropout, at test time the
18 | % network weights for the dropout layers are scaled down to
19 | % compensate for having all the neurons active. In this
20 | % implementation the dropout function itself already does this
21 | % compensation during training. So at test time no alterations are
22 | % required.
23 |
24 | % Copyright (C) 2014-16 Andrea Vedaldi, Karel Lenc.
25 | % All rights reserved.
26 | %
27 | % This file is part of the VLFeat library and is made available under
28 | % the terms of the BSD license (see the COPYING file).
29 |
30 | opts.rate = 0.5 ;
31 | opts.mask = [] ;
32 |
33 | backMode = numel(varargin) > 0 && ~ischar(varargin{1}) ;
34 | if backMode
35 | dzdy = varargin{1} ;
36 | opts = vl_argparse(opts, varargin(2:end)) ;
37 | else
38 | opts = vl_argparse(opts, varargin) ;
39 | end
40 |
41 | % determine mask
42 | scale = 1 / (1 - opts.rate) ;
43 | if isa(x, 'gpuArray')
44 | dataType = classUnderlying(x) ;
45 | else
46 | dataType = class(x) ;
47 | end
48 | switch dataType
49 | case 'single'
50 | scale = single(scale) ;
51 | case 'double'
52 | scale = double(scale) ;
53 | end
54 |
55 | if backMode && isempty(opts.mask)
56 | warning('vl_nndropout: when using in backward mode, the mask should be specified') ;
57 | end
58 | if isempty(opts.mask)
59 | % product determines data type
60 | if isa(x,'gpuArray')
61 | opts.mask = scale * (gpuArray.rand(size(x), 'single') >= opts.rate) ;
62 | else
63 | opts.mask = scale * (rand(size(x), 'single') >= opts.rate) ;
64 | end
65 | end
66 |
67 | % Apply dropout mask. Note that mask is either `single` or `double`
68 | % and a CPU or GPU array like the input argument `x`.
69 | if ~backMode
70 | y = opts.mask .* x ;
71 | else
72 | y = opts.mask .* dzdy ;
73 | end
74 | mask = opts.mask ;
75 |
--------------------------------------------------------------------------------
/matlab/vl_nnnoffset.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnoffset(x, param, dzdy)
2 | %VL_NNNOFFSET CNN norm-dependent offset.
3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the
4 | % weighted norm of the feature channels:
5 | %
6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2)
7 | %
8 | % where
9 | %
10 | % L(i,j) = sum_K X(i,j,k)^2
11 | %
12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the
13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
14 | % as X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | L = sum(x.^2,3) ;
23 | L = max(L, 1e-8) ;
24 |
25 | if nargin <= 2
26 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ;
27 | else
28 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matlab/vl_nnnormalize.m:
--------------------------------------------------------------------------------
1 | %VL_NNNORMALIZE CNN Local Response Normalization (LRN)
2 | % Y = VL_NNORMALIZE(X, PARAM) computes the so-called Local Response
3 | % Normalization (LRN) operator. This operator performs a
4 | % channel-wise sliding window normalization of each column of the
5 | % input array X. The normalized output is given by:
6 | %
7 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA
8 | %
9 | % where the normalization factor is given by
10 | %
11 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2,
12 | %
13 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The
14 | % window Q(k) is defined as:
15 | %
16 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))].
17 | %
18 | % where D is the number of feature channels in X. Note in particular
19 | % that, by setting N >= 2D, the function can be used to normalize
20 | % all the channels as a single group (useful to achieve L2
21 | % normalization).
22 | %
23 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of
24 | % the block projected onto DZDY. DZDX and DZDY have the same
25 | % dimensions as X and Y respectively.
26 | %
27 | % **Remark:** Some CNN libraries (e.g. Caffe) use a slightly
28 | % different convention for the parameters of the LRN. Caffe in
29 | % particular uses the convention:
30 | %
31 | % PARAM_CAFFE = [N KAPPA N*ALPHA BETA]
32 | %
33 | % i.e. the ALPHA paramter is multiplied by N.
34 |
35 | % Copyright (C) 2014 Andrea Vedaldi.
36 | % All rights reserved.
37 | %
38 | % This file is part of the VLFeat library and is made available under
39 | % the terms of the BSD license (see the COPYING file).
40 | vl_nnnotfound(mfilename);
41 |
--------------------------------------------------------------------------------
/matlab/vl_nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnormalizelp(x,dzdy,varargin)
2 | %VL_NNNORMALIZELP CNN Lp normalization
3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each column of
4 | % features in the array X, along its third dimension:
5 | %
6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p)
7 | %
8 | % [Y,N] = VL_NNNORMALIZELP(X) returns the array N containing the
9 | % computed norms.
10 | %
11 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the
12 | % function with respect to X projected onto DZDY.
13 | %
14 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options:
15 | %
16 | % `exponent`:: 2
17 | % The exponent of the Lp norm. Warning: currently only even
18 | % exponents are supported.
19 | %
20 | % `p`:: same as exponent
21 | %
22 | % `epsilon`:: 0.01
23 | % The constant added to the sum of p-powers before taking the
24 | % 1/p square root (see the formula above).
25 | %
26 | % `dimensions`:: [3]
27 | % The list of dimensions along wich to operate. By default,
28 | % normalization is along the third dimension, usually
29 | % corresponding to feature channels.
30 | %
31 | % `spatial`:: `false`
32 | % If `true`, sum along the two spatial dimensions instead of
33 | % along the feature channels. This is the same as setting
34 | % `dimensions` to [1,2].
35 | %
36 | % See also: VL_NNNORMALIZE().
37 |
38 | opts.epsilon = 1e-2 ;
39 | opts.p = 2 ;
40 | opts.spatial = false ;
41 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
42 |
43 | if ~opts.spatial
44 | massp = sum(x.^opts.p,3) + opts.epsilon ;
45 | else
46 | massp = sum(sum(x.^opts.p,1),2) + opts.epsilon ;
47 | end
48 | mass = massp.^(1/opts.p) ;
49 | y = bsxfun(@rdivide, x, mass) ;
50 |
51 | if nargin < 2 || isempty(dzdy)
52 | return ;
53 | else
54 | dzdy = bsxfun(@rdivide, dzdy, mass) ;
55 | if ~opts.spatial
56 | tmp = sum(dzdy .* x, 3) ;
57 | else
58 | tmp = sum(sum(dzdy .* x, 1),2);
59 | end
60 | y = dzdy - bsxfun(@times, tmp, bsxfun(@rdivide, x.^(opts.p-1), massp)) ;
61 | end
62 |
--------------------------------------------------------------------------------
/matlab/vl_nnnotfound.m:
--------------------------------------------------------------------------------
1 | function vl_nnnotfound(fname)
2 | %VL_NNNOTFOUND Prints a help error message to set up MatConvNet
3 | % Warn users about common pitfalls in setting up MatConvNet.
4 |
5 | % Copyright (C) 2017 Karel Lenc.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | paths = strsplit(path, pathsep) ;
12 | mexpath = fullfile(fileparts(mfilename('fullpath')), 'mex') ;
13 |
14 | if ~exist(mexpath, 'dir') || ~exist(fullfile(mexpath, [fname, '.', mexext]), 'file')
15 | error('MatConvNet not compiled or the compilation fialed. Please run `vl_compilenn`.');
16 | end
17 |
18 | if ~ismember(mexpath, paths)
19 | error('MatConvNet not set up. Please run \n\t`run %s; rehash;`.', ...
20 | fullfile(vl_rootnn(), 'matlab', 'vl_setupnn.m')) ;
21 | end
22 |
23 | if strcmp(pwd, fullfile(vl_rootnn, 'matlab'))
24 | error(['MatConvNet cannot be run in MatConvNet''s MATLAB path %s.\n', ...
25 | 'Please change path and call `rehash`.%s'], vl_rootnn()) ;
26 | end
27 |
--------------------------------------------------------------------------------
/matlab/vl_nnpool.m:
--------------------------------------------------------------------------------
1 | %VL_NNPOOL CNN poolinng.
2 | % Y = VL_NNPOOL(X, POOL) applies the pooling operator to all
3 | % channels of the data X using a square filter of size POOL. X is a
4 | % SINGLE array of dimension H x W x D x N where (H,W) are the
5 | % height and width of the map stack, D is the image depth (number
6 | % of feature channels) and N the number of of images in the stack.
7 | %
8 | % Y = VL_NNPOOL(X, [POOLY, POOLX]) uses a rectangular filter of
9 | % height POOLY and width POOLX.
10 | %
11 | % DZDX = VL_NNPOOL(X, POOL, DZDY) computes the derivatives of the
12 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
13 | % as X and Y respectively.
14 | %
15 | % VL_NNPOOL(..., 'option', value, ...) takes the following options:
16 | %
17 | % `Stride`:: 1
18 | % The output stride (downsampling factor). It can be either a
19 | % scalar for isotropic downsampling or a vector [STRIDEY
20 | % STRIDEX].
21 | %
22 | % `Pad`:: 0
23 | % The amount of input padding. Input images are padded with zeros
24 | % by this number of pixels on all sides before the convolution is
25 | % computed. It can also be a vector [TOP BOTTOM LEFT RIGHT] to
26 | % specify a different amount of padding in each direction. The
27 | % size of the pooling filter has to exceed the padding.
28 | %
29 | % `Method`:: 'max'
30 | % Specify method of pooling. It can be either 'max' (retain max value
31 | % over the pooling region per channel) or 'avg' (compute the average
32 | % value over the pooling region per channel).
33 | %
34 | % The pooling window must be not larger than the padded image, i.e.
35 | %
36 | % 1 <= POOLY <= HEIGHT + (PADTOP + PADBOTTOM),
37 | % 1 <= POOLX <= WIDTH + (PADLEFT + PADRIGHT).
38 | %
39 | % The output a is a SINGLE array of dimension YH x YW x K x N of N
40 | % images with K challens and size:
41 | %
42 | % YH = floor((H + (PADTOP+PADBOTTOM) - POOLY)/STRIDEY) + 1,
43 | % YW = floor((W + (PADLEFT+PADRIGHT) - POOLX)/STRIDEX) + 1.
44 | %
45 | % The derivative DZDY has the same dimension of the output Y and
46 | % the derivative DZDX has the same dimension as the input X.
47 | %
48 | % ## CUDNN SUPPORT
49 | %
50 | % If compiled in, the function will use cuDNN convolution routines
51 | % (with the exception of asymmetric left-right or top-bottom
52 | % padding and average pooling that triggers a bug in cuDNN). You
53 | % can use the 'NoCuDNN' option to disable cuDNN or 'cuDNN' to
54 | % activate it back again (the choice sticks until MATLAB purges the
55 | % MEX files for any reason).
56 |
57 | % Copyright (C) 2014 Andrea Vedaldi, Karel Lenc, and Max Jaderberg.
58 | % Copyright (C) 2015 Andrea Vedaldi and Karel Lenc.
59 | % All rights reserved.
60 | %
61 | % This file is part of the VLFeat library and is made available under
62 | % the terms of the BSD license (see the COPYING file).
63 | vl_nnnotfound(mfilename);
64 |
--------------------------------------------------------------------------------
/matlab/vl_nnrelu.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnrelu(x,varargin)
2 | %VL_NNRELU CNN rectified linear unit.
3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data
4 | % X. X can have arbitrary size.
5 | %
6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block
7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
8 | % X and Y respectively.
9 | %
10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options:
11 | %
12 | % `Leak`:: 0
13 | % Set the leak factor, a non-negative number. Y is equal to X if
14 | % X is not smaller than zero; otherwise, Y is equal to X
15 | % multipied by the leak factor. By default, the leak factor is
16 | % zero; for values greater than that one obtains the leaky ReLU
17 | % unit.
18 | %
19 | % ADVANCED USAGE
20 | %
21 | % As a further optimization, in the backward computation it is
22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then
23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY).
24 | % This is useful because it means that the buffer X does not need to
25 | % be remembered in the backward pass.
26 |
27 | % Copyright (C) 2014-15 Andrea Vedaldi.
28 | % All rights reserved.
29 | %
30 | % This file is part of the VLFeat library and is made available under
31 | % the terms of the BSD license (see the COPYING file).
32 |
33 | if ~isempty(varargin) && ~ischar(varargin{1}) % passed in dzdy
34 | dzdy = varargin{1} ;
35 | varargin(1) = [] ;
36 | else
37 | dzdy = [] ;
38 | end
39 |
40 | opts.leak = 0 ;
41 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
42 |
43 | if opts.leak == 0
44 | if nargin <= 1 || isempty(dzdy)
45 | y = max(x, 0) ;
46 | else
47 | y = dzdy .* (x > 0) ;
48 | end
49 | else
50 | if nargin <= 1 || isempty(dzdy)
51 | y = x .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
52 | else
53 | y = dzdy .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/matlab/vl_nnroipool.m:
--------------------------------------------------------------------------------
1 | % VL_NNROIPOOL CNN region of interest pooling.
2 | % Y = VL_NNROIPOOL(X, ROIS) pools each feature channel in X in
3 | % the specified regions of interest ROIS. ROIS is a 5 x K array
4 | % containing K regions. Each region has five coordinates `[t, u0,
5 | % v0, u1, v1]` where `u0`, `v0` is the upper-left corner of a ROI,
6 | % `u1`, `v1` is the bottom-right corner, and `t` is the index of the
7 | % image that contains the region. Spatial coordiantes start at (1,1),
8 | % with `u` indexing the horizontal axis and `v` the vertical one.
9 | % The image indeces ranges from 1 to the number of images stored
10 | % in the tensor X.
11 | %
12 | % If X has C feature channels, then the output Y is a 1 x 1 x C x K
13 | % array, with one image instance per region. Arguments can be SINGLE
14 | % or DOUBLE and CPU or GPU arrays; however, they must all be of the
15 | % same type (unless empty).
16 | %
17 | % DZDX = VL_NNROIPOOL(X, ROIS, DZDY) computes the derivative of
18 | % the layer projected on DZDY with respect to X.
19 | %
20 | % VL_NNROIPOOL(___, 'opt', value, ...) accepts the following
21 | % options:
22 | %
23 | % `Method`:: `'max'`
24 | % Choose between `'max'` and `'avg'` (average) pooling.
25 | %
26 | % `Subdivisions`:: `[1 1]`
27 | % Specifies the number [SH,SW] of vertical and horizontal tiles of
28 | % a region. This makes the output a SH x SW x C x K array.
29 | %
30 | % `Transform`:: `1`
31 | % Specifies a spatial transformation to apply to region vertices before
32 | % they are applied to the input tensor. If T is a scalar, then
33 | % the transformation is a scaling centered at the origin:
34 | %
35 | % u' = T (u - 1) + 1,
36 | % v' = T (v - 1) + 1.
37 | %
38 | % If T is a 2D vector, then different scaling factors for the
39 | % `u` and `v` can be specified. Finally, if T is a 2 x 2 matrix, then:
40 | %
41 | % u' = T(1,1) u + T(1,2) v + T(1,3),
42 | % v' = T(2,1) u + T(2,2) v + T(2,3).
43 | %
44 | % Note that only the upper-left and bottom-right corners of each
45 | % rectangular region are transformed. Thus this is mostly useful
46 | % for axis-aligned transformations; the generality of the expression
47 | % allows, however, to swap `u` and `v`, which may be needed
48 | % to match different conventions for the box coordiantes.
49 | %
50 | % See also: VL_NNPOOL().
51 |
52 | % Copyright (C) 2016-17 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi.
53 | % All rights reserved.
54 | %
55 | % This file is part of the VLFeat library and is made available under
56 | % the terms of the BSD license (see the COPYING file).
57 | vl_nnnotfound(mfilename);
58 |
--------------------------------------------------------------------------------
/matlab/vl_nnsigmoid.m:
--------------------------------------------------------------------------------
1 | function out = vl_nnsigmoid(x,dzdy)
2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit.
3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can
4 | % have an arbitrary size. The sigmoid is defined as follows:
5 | %
6 | % SIGMOID(X) = 1 / (1 + EXP(-X)).
7 | %
8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the
9 | % block projected onto DZDY. DZDX and DZDY have the same
10 | % dimensions as X and Y respectively.
11 |
12 | % Copyright (C) 2015 Karel Lenc.
13 | % All rights reserved.
14 | %
15 | % This file is part of the VLFeat library and is made available under
16 | % the terms of the BSD license (see the COPYING file).
17 |
18 | y = 1 ./ (1 + exp(-x));
19 |
20 | if nargin <= 1 || isempty(dzdy)
21 | out = y ;
22 | else
23 | out = dzdy .* (y .* (1 - y)) ;
24 | end
25 |
--------------------------------------------------------------------------------
/matlab/vl_nnsoftmax.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmax(X,dzdY)
2 | %VL_NNSOFTMAX CNN softmax.
3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X
4 | % has dimension H x W x D x N, packing N arrays of W x H
5 | % D-dimensional vectors.
6 | %
7 | % D can be thought of as the number of possible classes and the
8 | % function computes the softmax along the D dimension. Often W=H=1,
9 | % but this is not a requirement, as the operator is applied
10 | % convolutionally at all spatial locations.
11 | %
12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block
13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
14 | % X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ;
23 | L = sum(E,3) ;
24 | Y = bsxfun(@rdivide, E, L) ;
25 |
26 | if nargin <= 1, return ; end
27 |
28 | % backward
29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ;
30 |
--------------------------------------------------------------------------------
/matlab/vl_nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnsoftmaxloss(x,c,dzdy)
2 | %VL_NNSOFTMAXLOSS CNN combined softmax and logistic loss.
3 | % **Deprecated: use `vl_nnloss` instead**
4 | %
5 | % Y = VL_NNSOFTMAX(X, C) applies the softmax operator followed by
6 | % the logistic loss the data X. X has dimension H x W x D x N,
7 | % packing N arrays of W x H D-dimensional vectors.
8 | %
9 | % C contains the class labels, which should be integers in the range
10 | % 1 to D. C can be an array with either N elements or with dimensions
11 | % H x W x 1 x N dimensions. In the fist case, a given class label is
12 | % applied at all spatial locations; in the second case, different
13 | % class labels can be specified for different locations.
14 | %
15 | % DZDX = VL_NNSOFTMAXLOSS(X, C, DZDY) computes the derivative of the
16 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
17 | % as X and Y respectively.
18 |
19 | % Copyright (C) 2014-15 Andrea Vedaldi.
20 | % All rights reserved.
21 | %
22 | % This file is part of the VLFeat library and is made available under
23 | % the terms of the BSD license (see the COPYING file).
24 |
25 | % work around a bug in MATLAB, where native cast() would slow
26 | % progressively
27 | if isa(x, 'gpuArray')
28 | switch classUnderlying(x) ;
29 | case 'single', cast = @(z) single(z) ;
30 | case 'double', cast = @(z) double(z) ;
31 | end
32 | else
33 | switch class(x)
34 | case 'single', cast = @(z) single(z) ;
35 | case 'double', cast = @(z) double(z) ;
36 | end
37 | end
38 |
39 | %X = X + 1e-6 ;
40 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ;
41 |
42 | if numel(c) == sz(4)
43 | % one label per image
44 | c = reshape(c, [1 1 1 sz(4)]) ;
45 | end
46 | if size(c,1) == 1 & size(c,2) == 1
47 | c = repmat(c, [sz(1) sz(2)]) ;
48 | end
49 |
50 | % one label per spatial location
51 | sz_ = [size(c,1) size(c,2) size(c,3) size(c,4)] ;
52 | assert(isequal(sz_, [sz(1) sz(2) sz_(3) sz(4)])) ;
53 | assert(sz_(3)==1 | sz_(3)==2) ;
54 |
55 | % class c = 0 skips a spatial location
56 | mass = cast(c(:,:,1,:) > 0) ;
57 | if sz_(3) == 2
58 | % the second channel of c (if present) is used as weights
59 | mass = mass .* c(:,:,2,:) ;
60 | c(:,:,2,:) = [] ;
61 | end
62 |
63 | % convert to indexes
64 | c = c - 1 ;
65 | c_ = 0:numel(c)-1 ;
66 | c_ = 1 + ...
67 | mod(c_, sz(1)*sz(2)) + ...
68 | (sz(1)*sz(2)) * max(c(:), 0)' + ...
69 | (sz(1)*sz(2)*sz(3)) * floor(c_/(sz(1)*sz(2))) ;
70 |
71 | % compute softmaxloss
72 | xmax = max(x,[],3) ;
73 | ex = exp(bsxfun(@minus, x, xmax)) ;
74 |
75 | %n = sz(1)*sz(2) ;
76 | if nargin <= 2
77 | t = xmax + log(sum(ex,3)) - reshape(x(c_), [sz(1:2) 1 sz(4)]) ;
78 | y = sum(sum(sum(mass .* t,1),2),4) ;
79 | else
80 | y = bsxfun(@rdivide, ex, sum(ex,3)) ;
81 | y(c_) = y(c_) - 1;
82 | y = bsxfun(@times, y, bsxfun(@times, mass, dzdy)) ;
83 | end
84 |
--------------------------------------------------------------------------------
/matlab/vl_nnspnorm.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnspnorm(x, param, dzdy)
2 | %VL_NNSPNORM CNN spatial normalization.
3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of
4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and
5 | % PW define the size of the spatial neighbourhood used for
6 | % nomalization.
7 | %
8 | % For each feature channel, the function computes the sum of squares
9 | % of X inside each rectangle, N2(i,j). It then divides each element
10 | % of X as follows:
11 | %
12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA.
13 | %
14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the
15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
16 | % as X and Y respectively.
17 |
18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | pad = floor((param(1:2)-1)/2) ;
25 | pad = [pad ; param(1:2)-1-pad] ;
26 |
27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ;
28 | f = 1 + param(3) * n2 ;
29 |
30 | if nargin <= 2 || isempty(dzdy)
31 | y = f.^(-param(4)) .* x ;
32 | else
33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ;
34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ;
35 | end
--------------------------------------------------------------------------------
/matlab/vl_rootnn.m:
--------------------------------------------------------------------------------
1 | function root = vl_rootnn()
2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox.
3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox.
4 |
5 | % Copyright (C) 2014 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = fileparts(fileparts(mfilename('fullpath'))) ;
12 |
--------------------------------------------------------------------------------
/matlab/vl_setupnn.m:
--------------------------------------------------------------------------------
1 | function vl_setupnn()
2 | %VL_SETUPNN Setup the MatConvNet toolbox.
3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path.
4 |
5 | % Copyright (C) 2014-15 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = vl_rootnn() ;
12 | addpath(fullfile(root, 'matlab')) ;
13 | addpath(fullfile(root, 'matlab', 'mex')) ;
14 | addpath(fullfile(root, 'matlab', 'simplenn')) ;
15 | addpath(fullfile(root, 'matlab', 'xtest')) ;
16 | addpath(fullfile(root, 'examples')) ;
17 |
18 | if ~exist('gather')
19 | warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ;
20 | addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ;
21 | end
22 |
23 | if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0
24 | warning('MatConvNet is not compiled. Consider running `vl_compilenn`.');
25 | end
26 |
--------------------------------------------------------------------------------
/matlab/vl_taccum.m:
--------------------------------------------------------------------------------
1 | function a = vl_taccum(alpha, a, beta, b)
2 | %VL_TACCUM Compute A = alpha A + beta B
3 | % A = VL_TACCUM(ALPHA, A, BETA, B) computes efficiently A = alpha A
4 | % + beta B. For GPU arrays, it performs its computation in place, by
5 | % modifiying A without creating an additional copy.
6 |
7 | % Copyright (C) 2016 Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | if isscalar(a) || isscalar(b)
14 | a = alpha * a + beta * b ;
15 | return ;
16 | elseif isa(a, 'gpuArray')
17 | vl_taccummex(alpha, a, beta, b, 'inplace') ;
18 | else
19 | a = vl_taccummex(alpha, a, beta, b) ;
20 | end
21 |
--------------------------------------------------------------------------------
/matlab/vl_tshow.m:
--------------------------------------------------------------------------------
1 | function vl_tshow(T, varargin)
2 | %VL_TSHOW Visualize a 4D tensor.
3 | % VL_TSHOW(T) shows the 4D tensor T in the current figure.
4 | %
5 | % The tensor is shown as a montage of 2D slices (e.g. filters), with the
6 | % 3rd dimension stacked along the rows and the 4th dimension along the
7 | % columns.
8 | %
9 | % VL_TSHOW(T, 'option', value, ...) accepts the following options:
10 | %
11 | % `labels`:: true
12 | % If true, labels the x/y axis of the montage.
13 | %
14 | % Any additional options are passed to IMAGESC (e.g. to set the parent
15 | % axes, or other properties).
16 |
17 | % Copyright (C) 2017 Joao F. Henriques.
18 | % All rights reserved.
19 | %
20 | % This file is part of the VLFeat library and is made available under
21 | % the terms of the BSD license (see the COPYING file).
22 |
23 | opts.labels = true ;
24 | [opts, varargin] = vl_argparse(opts, varargin, 'nonrecursive') ;
25 |
26 | assert((isnumeric(T) || islogical(T)) && ndims(T) <= 4, ...
27 | 'T must be a 4D numeric or logical tensor.') ;
28 |
29 | % Stack input channels along rows (merge 1st dim. with 3rd), and output
30 | % channels along columns (merge 2nd dim. with 4th), to form a 2D image
31 | sz = size(T) ;
32 | sz(end+1:4) = 1 ;
33 | T = reshape(permute(T, [1 3 2 4]), sz(1) * sz(3), sz(2) * sz(4)) ;
34 |
35 | % Display it
36 | h = imagesc(T, varargin{:}) ;
37 |
38 | ax = get(h, 'Parent') ;
39 | axis(ax, 'image') ;
40 |
41 | % Display grid between filters
42 | set(ax, 'XGrid', 'on', 'YGrid', 'on', 'GridAlpha', 1, ...
43 | 'TickLength', [0 0], 'XTickLabel', {}, 'YTickLabel', {}, ...
44 | 'YTick', sz(1) + 0.5 : sz(1) : sz(1) * sz(3) - 0.5, ...
45 | 'XTick', sz(2) + 0.5 : sz(2) : sz(2) * sz(4) - 0.5) ;
46 |
47 | if opts.labels
48 | xlabel(sprintf('Output channels (%i)', sz(4)), 'Parent', ax) ;
49 | ylabel(sprintf('Input channels (%i)', sz(3)), 'Parent', ax) ;
50 | end
51 |
52 |
--------------------------------------------------------------------------------
/matlab/xtest/cmyk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZilinGao/GM-SOP/5922792067a4d6e5ca057b5429a221f2ea101a19/matlab/xtest/cmyk.jpg
--------------------------------------------------------------------------------
/matlab/xtest/suite/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < nntest
2 | properties
3 | x
4 | a
5 | b
6 | end
7 |
8 | properties (TestParameter)
9 | dim = {1 2 3 4}
10 | end
11 |
12 | methods (TestClassSetup)
13 | function data(test,device)
14 | test.x = test.randn(15,14,3,2) ;
15 | test.a = test.randn(15,14,3,2) ;
16 | test.b = test.randn(15,14,3,2) ;
17 | end
18 | end
19 |
20 | methods (Test)
21 | function data_and_parameters(test, dim)
22 | x = test.x ;
23 | a = test.a ;
24 | b = test.b ;
25 |
26 | a = sum(a, dim) ;
27 | b = sum(b, dim) ;
28 |
29 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
30 |
31 | output = scale.forward({x}, {a,b}) ;
32 | dzdy = test.randn(size(output{1})) ;
33 | [derInputs, derParams] = scale.backward({x}, {a,b}, {dzdy}) ;
34 |
35 | pick = @(x) x{1} ;
36 | dzdx = derInputs{1} ;
37 | dzda = derParams{1} ;
38 | dzdb = derParams{2} ;
39 |
40 | test.der(@(x) pick(scale.forward({x},{a,b})), x, dzdy, dzdx, 1e-2 * test.range) ;
41 | test.der(@(a) pick(scale.forward({x},{a,b})), a, dzdy, dzda, 1e-2 * test.range) ;
42 | test.der(@(b) pick(scale.forward({x},{a,b})), b, dzdy, dzdb, 1e-2 * test.range) ;
43 | end
44 |
45 | function data_only(test, dim)
46 | x = test.x ;
47 | a = test.a ;
48 | b = test.b ;
49 |
50 | a = sum(a, dim) ;
51 | b = sum(b, dim) ;
52 |
53 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
54 |
55 | output = scale.forward({x,a,b}, {}) ;
56 | dzdy = test.randn(size(output{1})) ;
57 | [derInputs, derParams] = scale.backward({x,a,b}, {}, {dzdy}) ;
58 |
59 | pick = @(x) x{1} ;
60 | dzdx = derInputs{1} ;
61 | dzda = derInputs{2} ;
62 | dzdb = derInputs{3} ;
63 |
64 | test.der(@(x) pick(scale.forward({x,a,b},{})), x, dzdy, dzdx, 1e-2 * test.range) ;
65 | test.der(@(a) pick(scale.forward({x,a,b},{})), a, dzdy, dzda, 1e-2 * test.range) ;
66 | test.der(@(b) pick(scale.forward({x,a,b},{})), b, dzdy, dzdb, 1e-2 * test.range) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnbnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnbnorm < nntest
2 | properties (TestParameter)
3 | rows = {2 8 13}
4 | cols = {2 8 17}
5 | numDims = {1 3 4}
6 | batchSize = {2 7}
7 | end
8 | methods (Test)
9 | function regression(test, rows,cols, numDims, batchSize)
10 | r = rows ;
11 | c = cols ;
12 | nd = numDims ;
13 | bs = batchSize ;
14 | x = test.randn(r, c, nd, bs) ;
15 | g = test.randn(nd, 1) / test.range ;
16 | b = test.randn(nd, 1) / test.range ;
17 | epsilon = 0.001 ;
18 |
19 | [y,m] = vl_nnbnorm(x,g,b,'epsilon',epsilon) ;
20 | n = numel(x) / size(x,3) ;
21 | mu = sum(sum(sum(x,1),2),4) / n ;
22 | sigma2 = sum(sum(sum(bsxfun(@minus,x,mu).^2,1),2),4) / n + epsilon ;
23 | sigma = sqrt(sigma2) ;
24 | m_ = [mu(:),sigma(:)] ;
25 | test.eq(m,m_) ;
26 |
27 | g = reshape(g,1,1,[]) ;
28 | b = reshape(b,1,1,[]) ;
29 | y_ = bsxfun(@plus,b,bsxfun(@times,g,bsxfun(@rdivide,bsxfun(@minus,x,mu),sigma))) ;
30 | test.eq(y,y_) ;
31 | end
32 |
33 | function basic(test, rows, cols, numDims, batchSize)
34 | r = rows ;
35 | c = cols ;
36 | nd = numDims ;
37 | bs = batchSize ;
38 | x = test.randn(r, c, nd, bs) ;
39 | %g = test.randn(1, 1, nd, 1) ;
40 | %b = test.randn(1, 1, nd, 1) ;
41 | g = test.randn(nd, 1) / test.range ;
42 | b = test.randn(nd, 1) / test.range ;
43 |
44 | y = vl_nnbnorm(x,g,b) ;
45 | dzdy = test.randn(size(y)) ;
46 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ;
47 |
48 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-3) ;
49 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, 1e-2) ;
50 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, 1e-3) ;
51 | end
52 |
53 | function givenMoments(test, rows, cols, numDims, batchSize)
54 | r = rows ;
55 | c = cols ;
56 | nd = numDims ;
57 | bs = batchSize ;
58 | x = test.randn(r, c, nd, bs) ;
59 | %g = test.randn(1, 1, nd, 1) ;
60 | %b = test.randn(1, 1, nd, 1) ;
61 | g = test.randn(nd, 1) / test.range ;
62 | b = test.randn(nd, 1) / test.range ;
63 |
64 | [y,m] = vl_nnbnorm(x,g,b) ;
65 | [y_,m_] = vl_nnbnorm(x,g,b,'moments',m) ;
66 |
67 | test.eq(y,y_) ;
68 | test.eq(m,m_) ;
69 |
70 | dzdy = test.randn(size(y)) ;
71 | [dzdx,dzdg,dzdb,m__] = vl_nnbnorm(x,g,b,dzdy) ;
72 | [dzdx_,dzdg_,dzdb_,m___] = vl_nnbnorm(x,g,b,dzdy,'moments',m) ;
73 |
74 | test.eq(m,m__)
75 | test.eq(m,m___)
76 | test.eq(dzdx,dzdx_) ;
77 | test.eq(dzdg,dzdg_) ;
78 | test.eq(dzdb,dzdb_) ;
79 | end
80 |
81 | end
82 | end
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnconcat.m:
--------------------------------------------------------------------------------
1 | classdef nnconcat < nntest
2 | methods (Test)
3 | function basic(test)
4 | pick = @(i,x) x{i} ;
5 | sz = [4,5,10,3] ;
6 | for dim = 1:3
7 | sz1 = sz ; sz1(dim) = 3 ;
8 | sz2 = sz ; sz2(dim) = 7 ;
9 | sz3 = sz ; sz3(dim) = 2 ;
10 | x1 = test.randn(sz1) ;
11 | x2 = test.randn(sz2) ;
12 | x3 = test.randn(sz3) ;
13 |
14 | y = vl_nnconcat({x1, x2, x3}, dim) ;
15 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
16 | dzdy = test.randn(size(y)) ;
17 | dzdx = vl_nnconcat({x1, x2, x3} ,dim, dzdy) ;
18 |
19 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
20 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
21 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
22 | end
23 | end
24 |
25 | function by_size(test)
26 | pick = @(i,x) x{i} ;
27 | sz = [4,5,10,3] ;
28 | for dim = 1:3
29 | sz1 = sz ; sz1(dim) = 3 ;
30 | sz2 = sz ; sz2(dim) = 7 ;
31 | sz3 = sz ; sz3(dim) = 2 ;
32 | x1 = test.randn(sz1) ;
33 | x2 = test.randn(sz2) ;
34 | x3 = test.randn(sz3) ;
35 |
36 | y = vl_nnconcat({x1, x2, x3}, dim) ;
37 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
38 | dzdy = test.randn(size(y)) ;
39 | dzdx = vl_nnconcat({}, dim, dzdy, 'inputSizes', {sz1, sz2, sz3}) ;
40 |
41 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
42 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
43 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nndropout.m:
--------------------------------------------------------------------------------
1 | classdef nndropout < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(4,5,10,3) ;
5 | [y,mask] = vl_nndropout(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ;
8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ;
9 | end
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnloss.m:
--------------------------------------------------------------------------------
1 | classdef nnloss < nntest
2 | properties (TestParameter)
3 | loss = {...
4 | 'classerror', 'log', 'softmaxlog', 'mhinge', 'mshinge', ...
5 | 'binaryerror', 'binarylog', 'logistic', 'hinge'}
6 | weighed = {false, true}
7 | end
8 |
9 | properties
10 | x
11 | end
12 |
13 | methods
14 | function [x,c,dzdy,instanceWeights] = getx(test,loss)
15 | numClasses = 3 ;
16 | numAttributes = 5 ;
17 | numImages = 3 ;
18 | w = 5 ;
19 | h = 4 ;
20 | switch loss
21 | case {'log', 'softmaxlog', 'mhinge', 'mshinge', 'classerror'}
22 | % multiclass
23 | instanceWeights = test.rand(h,w) / test.range / (h*w) ;
24 | c = randi(numClasses, h,w,1,numImages) ;
25 | c = test.toDevice(c) ;
26 | otherwise
27 | % binary
28 | instanceWeights = test.rand(h,w, numAttributes) / test.range / (h*w*numAttributes) ;
29 | c = sign(test.randn(h,w,numAttributes, numImages)) ;
30 | end
31 | c = test.toDataType(c) ;
32 | switch loss
33 | case {'log'}
34 | x = test.rand(h,w, numClasses, numImages) / test.range * .60 + .20 ;
35 | x = bsxfun(@rdivide, x, sum(x,3)) ;
36 | case {'binarylog'}
37 | x = test.rand(h,w, numAttributes, numImages) / test.range * .60 + .20 ;
38 | case {'softmaxlog'}
39 | x = test.randn(h,w, numClasses, numImages) / test.range ;
40 | case {'mhinge', 'mshinge', 'classerror'}
41 | x = test.randn(h,w, numClasses, numImages) / test.range ;
42 | case {'hinge', 'logistic', 'binaryerror'}
43 | x = test.randn(h,w, numAttributes, numImages) / test.range ;
44 | end
45 | dzdy = test.randn(1,1) / test.range ;
46 | end
47 | end
48 |
49 | methods (Test)
50 | function nullcategories(test, loss, weighed)
51 | [x,c,dzdy,instanceWeights] = test.getx(loss) ;
52 | % make a number of categories null
53 | c = c .* (test.randn(size(c)) > 0) ;
54 | opts = {'loss',loss} ;
55 | if weighed, opts = {opts{:}, 'instanceWeights', instanceWeights} ; end
56 | y = vl_nnloss(x,c,[],opts{:}) ;
57 | dzdx = vl_nnloss(x,c,dzdy,opts{:}) ;
58 | test.der(@(x) vl_nnloss(x,c,[],opts{:}), x, dzdy, dzdx, 0.001, -5e-1) ;
59 | end
60 |
61 | function convolutional(test, loss, weighed)
62 | [x,c,dzdy,instanceWeights] = test.getx(loss) ;
63 | opts = {'loss',loss} ;
64 | if weighed, opts = {opts{:}, 'instanceWeights', instanceWeights} ; end
65 | y = vl_nnloss(x,c,[],opts{:}) ;
66 | dzdx = vl_nnloss(x,c,dzdy,opts{:}) ;
67 | test.der(@(x) vl_nnloss(x,c,[],opts{:}), x, dzdy, dzdx, 0.001, -5e-1) ;
68 | end
69 |
70 | function instanceWeights(test, loss)
71 | % Make sure that the instance weights are being applied
72 | [x,c,dzdy,instanceWeights] = test.getx(loss) ;
73 | % make a number of categories null
74 | c = c .* (test.randn(size(c)) > 0) ;
75 | instanceWeights(:) = 0;
76 | opts = {'loss', loss, 'instanceWeights', instanceWeights} ;
77 | y = gather(vl_nnloss(x,c,[],opts{:})) ;
78 | test.verifyEqual(y, cast(0, 'like', y), 'AbsTol', 1e-3) ;
79 | end
80 | end
81 | end
82 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnmnist.m:
--------------------------------------------------------------------------------
1 | classdef nnmnist < nntest
2 | properties (TestParameter)
3 | networkType = {'simplenn', 'dagnn'}
4 | end
5 |
6 | methods (TestClassSetup)
7 | function init(test)
8 | addpath(fullfile(vl_rootnn, 'examples', 'mnist'));
9 | end
10 | end
11 |
12 | methods (Test)
13 | function valErrorRate(test, networkType)
14 | clear mex ; % will reset GPU, remove MCN to avoid crashing
15 | % MATLAB on exit (BLAS issues?)
16 | if strcmp(test.currentDataType, 'double'), return ; end
17 | rng(0); % fix random seed, for reproducible tests
18 | switch test.currentDevice
19 | case 'cpu'
20 | gpus = [];
21 | case 'gpu'
22 | gpus = 1;
23 | end
24 | trainOpts = struct('numEpochs', 1, 'continue', false, 'gpus', gpus, ...
25 | 'plotStatistics', false);
26 | if strcmp(networkType, 'simplenn')
27 | trainOpts.errorLabels = {'error', 'top5err'} ;
28 | end
29 | [~, info] = cnn_mnist('train', trainOpts, 'networkType', networkType);
30 | test.verifyLessThan(info.train.error, 0.08);
31 | test.verifyLessThan(info.val.error, 0.025);
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnnormalize.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalize < nntest
2 | properties (TestParameter)
3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17}
4 | sgroup = {2 3 4 5 6 7}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, group)
9 | param = [group, .1, .5, .75] ;
10 | x = test.randn(3,2,10,4) ;
11 | y = vl_nnnormalize(x,param) ;
12 | dzdy = test.rand(size(y))-0.5 ;
13 | dzdx = vl_nnnormalize(x,param,dzdy) ;
14 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ;
15 | end
16 |
17 | function compare_to_naive(test, sgroup)
18 | param = [sgroup, .1, .5, .75] ;
19 | x = test.randn(3,2,10,4) ;
20 | y = vl_nnnormalize(gather(x),param) ;
21 | y_ = test.zeros(size(y)) ;
22 | x_ = gather(x) ;
23 | for i=1:size(x,1)
24 | for j=1:size(x,2)
25 | for n=1:size(x,4)
26 | t = test.zeros(1,1,size(x,3),1) ;
27 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ...
28 | ones(param(1),1), 'same')).^(-param(4)) ;
29 | y_(i,j,:,n) = x_(i,j,:,n) .* t ;
30 | end
31 | end
32 | end
33 | test.eq(y,y_) ;
34 | end
35 |
36 | function l2(test)
37 | x = test.randn(1,1,10,1) ;
38 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ;
39 | test.eq(sum(y(:).^2), test.toDataType(1), 1e-2) ;
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalizelp < nntest
2 | properties (TestParameter)
3 | h = {1 2 3 4}
4 | w = {1 2 3 4}
5 | d = {2 3 4}
6 | p = {2 4}
7 | end
8 |
9 | methods (Test)
10 | function basicl2(test, h,w,d)
11 | x = test.randn(h,w,d,3) ;
12 | y = vl_nnnormalizelp(x) ;
13 | dzdy = test.rand(size(y))-0.5 ;
14 | dzdx = vl_nnnormalizelp(x,dzdy) ;
15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, 1e-4, 0.3) ;
16 | end
17 |
18 | function lp(test, p)
19 | x = test.randn(2,3,5,3) / test.range ;
20 | y = vl_nnnormalizelp(x,'p', p) ;
21 | dzdy = test.rand(size(y))-0.5 ;
22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ;
23 | test.der(@(x) vl_nnnormalizelp(x,'p',p), x, dzdy, dzdx, 1e-4, 0.3) ;
24 | end
25 |
26 | function dimensions(test, p)
27 | x = test.randn(2,3,5,3) / test.range ;
28 | dimsr = {[1],[2],[3],[4], ...
29 | [1 2], [1 3], [1 4], [2 3], [2 4], [3 4], ...
30 | [1 2 3], [2 3 4], ...
31 | [1 2 3 4]} ;
32 | for dims = dimsr
33 | opts = {'p',2,'dimensions',dims{1},'epsilon',1e-4} ;
34 | y = vl_nnnormalizelp(x,opts{:}) ;
35 | dzdy = test.rand(size(y))-0.5 ;
36 | dzdx = vl_nnnormalizelp(x,dzdy,opts{:}) ;
37 | test.der(@(x) vl_nnnormalizelp(x,opts{:}), x, dzdy, dzdx, .5e-4, 1.0) ;
38 | end
39 | end
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnoffset.m:
--------------------------------------------------------------------------------
1 | classdef nnoffset < nntest
2 | methods (Test)
3 | function basic(test)
4 | param = [.34, .5] ;
5 | x = test.randn(4,5,10,3) ;
6 | y = vl_nnnoffset(x,param) ;
7 | dzdy = test.randn(size(y)) ;
8 | dzdx = vl_nnnoffset(x,param,dzdy) ;
9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ;
10 | end
11 | end
12 | end
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnpdist.m:
--------------------------------------------------------------------------------
1 | classdef nnpdist < nntest
2 | properties (TestParameter)
3 | oneToOne = {false, true}
4 | noRoot = {false, true}
5 | p = {.5 1 2 3}
6 | aggregate = {false, true}
7 | end
8 | methods (Test)
9 | function basic(test,oneToOne, noRoot, p, aggregate)
10 | if aggregate
11 | % make it smaller to avoid numerical derivative issues with
12 | % float
13 | h = 2 ;
14 | w = 2 ;
15 | else
16 | h = 13 ;
17 | w = 17 ;
18 | end
19 | d = 4 ;
20 | n = 5 ;
21 | x = test.randn(h,w,d,n) ;
22 | if oneToOne
23 | x0 = test.randn(h,w,d,n) ;
24 | else
25 | x0 = test.randn(1,1,d,n) ;
26 | end
27 | opts = {'noRoot', noRoot, 'aggregate', aggregate} ;
28 |
29 | y = vl_nnpdist(x, x0, p, opts{:}) ;
30 |
31 | % make sure they are not too close in any dimension as this may be a
32 | % problem for the finite difference dereivatives as one could
33 | % approach 0 which is not differentiable for some p-norms
34 |
35 | s = abs(bsxfun(@minus, x, x0)) < test.range*1e-1 ;
36 | x(s) = x(s) + 5*test.range ;
37 |
38 | dzdy = test.rand(size(y)) ;
39 | [dzdx, dzdx0] = vl_nnpdist(x,x0,p,dzdy,opts{:}) ;
40 | test.der(@(x) vl_nnpdist(x,x0,p,opts{:}), x, dzdy, dzdx, test.range * 1e-3) ;
41 | if oneToOne
42 | % Pdist does not implement backprop of the bsxfun
43 | test.der(@(x0) vl_nnpdist(x,x0,p,opts{:}), x0, dzdy, dzdx0, test.range * 1e-3) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnrelu.m:
--------------------------------------------------------------------------------
1 | classdef nnrelu < nntest
2 | properties
3 | x
4 | end
5 |
6 | methods (TestClassSetup)
7 | function data(test,device)
8 | % make sure that all elements in x are different. in this way,
9 | % we can compute numerical derivatives reliably by adding a delta < .5.
10 | x = test.randn(15,14,3,2) ;
11 | x(:) = randperm(numel(x))' ;
12 | % avoid non-diff value for test
13 | x(x==0)=1 ;
14 | test.x = x ;
15 | test.range = 10 ;
16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
17 | end
18 | end
19 |
20 | methods (Test)
21 | function basic(test)
22 | x = test.x ;
23 | y = vl_nnrelu(x) ;
24 | dzdy = test.randn(size(y)) ;
25 | dzdx = vl_nnrelu(x,dzdy) ;
26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ;
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnroipool.m:
--------------------------------------------------------------------------------
1 | classdef nnroipool < nntest
2 | properties
3 | x
4 | end
5 |
6 | properties (TestParameter)
7 | method = {'avg', 'max'}
8 | subdivisions = {[1 1], [2 1], [1 2], [3 7], [16 16]}
9 | end
10 |
11 | methods (TestClassSetup)
12 | function data(test,device)
13 | % make sure that all elements in x are different. in this way,
14 | % we can compute numerical derivatives reliably by adding a delta < .5.
15 | x = test.randn(15,14,3,2) ;
16 | x(:) = randperm(numel(x))' ;
17 | test.x = x ;
18 | test.range = 10 ;
19 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
20 | end
21 | end
22 |
23 | methods (Test)
24 | function basic(test,method,subdivisions)
25 | R = [1 1 1 2 2 2 1 1 ;
26 | 0 1 2 0 1 2 1 1 ;
27 | 0 4 3 0 1 2 1 1 ;
28 | 15 5 6 15 4 2 9 0 ;
29 | 14 7 9 14 4 8 1 0] ;
30 | R = test.toDevice(test.toDataType(R)) ;
31 | x = test.x ;
32 | args = {'method', method, 'subdivisions', subdivisions} ;
33 | y = vl_nnroipool(x,R,args{:}) ;
34 | dzdy = test.randn(size(y)) ;
35 | dzdx = vl_nnroipool(x,R,dzdy,args{:}) ;
36 | test.der(@(x) vl_nnroipool(x,R,args{:}), ...
37 | x, dzdy, dzdx, test.range * 1e-2) ;
38 | end
39 |
40 | function identity(test,method)
41 | x = test.toDevice(test.toDataType((2:10)'*(1:10))) ;
42 | R = test.toDevice(test.toDataType([1, 1, 1, 9, 10])) ;
43 | T = [0 1 0 ; 1 0 0] ;
44 | opts = {'method', method, ...
45 | 'subdivisions', [9,10], ...
46 | 'transform', T} ;
47 | y = vl_nnroipool(x,R,opts{:}) ;
48 | test.eq(x,y) ;
49 | end
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnsigmoid.m:
--------------------------------------------------------------------------------
1 | classdef nnsigmoid < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(5,5,1,1)/test.range ;
5 | y = vl_nnsigmoid(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nnsigmoid(x,dzdy) ;
8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ;
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnsoftmax.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmax < nntest
2 | properties (TestParameter)
3 | h = {1 2 3}
4 | w = {1 2}
5 | end
6 | methods (Test)
7 | function basic(test,h,w)
8 | d = 10 ;
9 | n = 3 ;
10 | x = test.randn(h,w,d,n)/test.range ;
11 | y = vl_nnsoftmax(x) ;
12 | dzdy = test.randn(size(y)) ;
13 | dzdx = vl_nnsoftmax(x, dzdy) ;
14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ;
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmaxloss < nntest
2 | properties (TestParameter)
3 | weighed = {false true}
4 | multilab = {false true}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, multilab, weighed)
9 | C = 10 ;
10 | n = 3 ;
11 | if multilab
12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ;
13 | else
14 | c = reshape([7 2 1],1,1,1,[]) ;
15 | end
16 | if weighed
17 | c = cat(3, c, test.rand(size(c))) ;
18 | end
19 |
20 | % compare direct and indirect composition; this cannot
21 | % take large test.ranges
22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative
23 | y = vl_nnsoftmaxloss(x,c) ;
24 | if size(c,3) == 1
25 | opts = {'loss','log'} ;
26 | else
27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ;
28 | end
29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ;
30 | dzdy = test.randn(size(y)) ;
31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ;
33 | test.eq(y,y_) ;
34 | test.eq(dzdx,dzdx_) ;
35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ;
36 |
37 | % now larger input range
38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative
39 | y = vl_nnsoftmaxloss(x,c) ;
40 | dzdy = test.randn(size(y)) ;
41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ...
43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ;
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnsolvers.m:
--------------------------------------------------------------------------------
1 | classdef nnsolvers < nntest
2 | properties (TestParameter)
3 | networkType = {'simplenn', 'dagnn'}
4 | solver = {[], @solver.adagrad, @solver.adadelta, @solver.rmsprop, @solver.adam}
5 | end
6 | properties
7 | imdb
8 | init_w
9 | init_b
10 | end
11 |
12 | methods (TestClassSetup)
13 | function data(test, dataType)
14 | % synthetic data, 2 classes of gaussian samples with different means
15 | rng(0) ;
16 | sz = [15, 10, 5] ; % input size
17 | x1 = 2 * randn([sz, 100], dataType) ; % place mean at the origin
18 | x2 = bsxfun(@plus, 2 * randn(sz, dataType), 2 * randn([sz, 100], dataType)) ; % place mean randomly
19 |
20 | test.imdb.x = cat(4, x1, x2) ;
21 | test.imdb.y = [ones(100, 1, dataType); 2 * ones(100, 1, dataType)] ;
22 |
23 | test.init_w = 1e-3 * randn([sz, 2], dataType) ; % initial parameters
24 | test.init_b = zeros([2, 1], dataType) ;
25 | end
26 | end
27 |
28 | methods (Test)
29 | function basic(test, networkType, solver)
30 | clear mex ; % will reset GPU, remove MCN to avoid crashing
31 | % MATLAB on exit (BLAS issues?)
32 |
33 | if strcmp(networkType, 'simplenn') && strcmp(test.currentDataType, 'double')
34 | return % simplenn does not work well with doubles
35 | end
36 |
37 | % a simple logistic regression network
38 | net.layers = {struct('type','conv', 'weights',{{test.init_w, test.init_b}}), ...
39 | struct('type','softmaxloss')} ;
40 |
41 | switch test.currentDevice
42 | case 'cpu', gpus = [];
43 | case 'gpu', gpus = 1;
44 | end
45 |
46 | switch networkType
47 | case 'simplenn',
48 | trainfn = @cnn_train ;
49 | getBatch = @(imdb, batch) deal(imdb.x(:,:,:,batch), imdb.y(batch)) ;
50 |
51 | case 'dagnn',
52 | trainfn = @cnn_train_dag ;
53 |
54 | if isempty(gpus)
55 | getBatch = @(imdb, batch) ...
56 | {'input',imdb.x(:,:,:,batch), 'label',imdb.y(batch)} ;
57 | else
58 | getBatch = @(imdb, batch) ...
59 | {'input',gpuArray(imdb.x(:,:,:,batch)), 'label',imdb.y(batch)} ;
60 | end
61 |
62 | net = dagnn.DagNN.fromSimpleNN(net, 'canonicalNames', true) ;
63 | net.addLayer('top1err', dagnn.Loss('loss', 'classerror'), ...
64 | {'prediction','label'}, 'top1err') ;
65 | end
66 |
67 | % train 1 epoch with small batches and check convergence
68 | [~, info] = trainfn(net, test.imdb, getBatch, ...
69 | 'train', 1:numel(test.imdb.y), 'val', 1, ...
70 | 'solver', solver, 'batchSize', 10, 'numEpochs',1, ...
71 | 'continue', false, 'gpus', gpus, 'plotStatistics', false) ;
72 |
73 | test.verifyLessThan(info.train.top1err, 0.35);
74 | test.verifyLessThan(info.train.objective, 0.5);
75 | end
76 | end
77 | end
78 |
--------------------------------------------------------------------------------
/matlab/xtest/suite/nnspnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnspnorm < nntest
2 | methods (Test)
3 | function basic(test)
4 | h = 13 ;
5 | w = 17 ;
6 | d = 4 ;
7 | n = 5 ;
8 | param = [3, 3, 0.1, 0.75] ;
9 | x = test.randn(h,w,d,n) ;
10 | y = vl_nnspnorm(x, param) ;
11 | dzdy = test.rand(h, w, d, n) ;
12 | dzdx = vl_nnspnorm(x, param, dzdy) ;
13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_bench_bnorm.m:
--------------------------------------------------------------------------------
1 | function vl_bench_bnorm(gpu)
2 | if nargin < 1
3 | gpu = false ;
4 | end
5 |
6 | T = 100 ;
7 | x = randn(64,64,32,32,'single') ;
8 | g = randn(32,1,'single') ;
9 | b = randn(32,1,'single') ;
10 |
11 | if gpu
12 | x = gpuArray(x) ;
13 | g = gpuArray(g) ;
14 | b = gpuArray(b) ;
15 | end
16 |
17 | tic
18 | for t=1:T
19 | y = vl_nnbnorm(x,g,b) ;
20 | end
21 | if gpu, wait(gpuDevice) ; end
22 | fprintf('new: %f\n',toc);
23 |
24 | tic
25 | for t=1:T
26 | y_ = vl_nnbnorm_old(x,g,b) ;
27 | end
28 | if gpu, wait(gpuDevice) ; end
29 | fprintf('old: %f\n',toc);
30 |
31 | dzdy = randn(size(y),'single') ;
32 | if gpu
33 | dzdy = gpuArray(dzdy) ;
34 | end
35 |
36 | tic
37 | for t=1:T
38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ;
39 | end
40 | if gpu, wait(gpuDevice) ; end
41 | fprintf('new deriv: %f\n',toc);
42 |
43 | tic
44 | for t=1:T
45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ;
46 | end
47 | if gpu, wait(gpuDevice) ; end
48 | fprintf('old deriv: %f\n',toc);
49 |
50 | vl_testsim(y,y_);
51 | vl_testsim(a,a_);
52 | vl_testsim(b,b_);
53 | vl_testsim(c,c_);
54 | end
55 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_bench_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | % VL_BENCH_IMREADJPEG Evaluates the speed of imreadjpeg
2 |
3 | numThreads = 4 ;
4 | base = 'data/bench-imreadjpeg' ;
5 |
6 | files = {} ;
7 | files = dir(fullfile(base,'*.jpg')) ;
8 | files = fullfile(base, {files.name}) ;
9 | if numel(files) > 256, files = files(1:256) ; end
10 |
11 | for preallocate = [true, false]
12 | opts={'verbose','verbose', 'preallocate', preallocate} ;
13 | for t=1:4
14 | % simple read
15 | fprintf('direct read single thread\n') ;
16 | clear ims ;
17 | tic ;
18 | ims = vl_imreadjpeg(files, 'numThreads', 1, opts{:}) ;
19 | directSingle(t) = toc ;
20 | fprintf(' done\n') ;
21 | pause(1) ;
22 |
23 | % simple read
24 | fprintf('direct read multi thread\n') ;
25 | clear ims ;
26 | tic ;
27 | ims = vl_imreadjpeg(files, 'numThreads', numThreads, opts{:}) ;
28 | direct(t) = toc ;
29 | fprintf(' done\n') ;
30 | pause(1) ;
31 |
32 | % threaded read
33 | fprintf('issue prefetch\n') ;
34 | tic ;
35 | vl_imreadjpeg(files, 'prefetch', opts{:}) ;
36 | prefetch(t) = toc ;
37 | fprintf(' done [pause 6]\n') ;
38 | pause(6)
39 |
40 | fprintf('prefetched read\n') ;
41 | clear ims_ ; % do not accoutn for the time requried to delete this
42 | tic ;
43 | ims_ = vl_imreadjpeg(files, opts{:}) ;
44 | indirect(t) = toc ;
45 | pause(1) ;
46 | end
47 |
48 | n = numel(ims) ;
49 | fprintf('** test results preallcoate %d\n', preallocate) ;
50 | fprintf('\tsingle tread: %.1f pm %.1f\n', mean(n./directSingle), std(n./directSingle)) ;
51 | fprintf('\t%d threads: %.1f pm %.1f\n', numThreads, mean(n./direct), std(n./direct)) ;
52 | fprintf('\tissue prefetch: %.1f pm %.1f\n', mean(n./prefetch), std(n./prefetch)) ;
53 | fprintf('\tretrieve prefetched: %.1f pm %.1f\n', mean(n./indirect), std(n./indirect)) ;
54 | fprintf('\n\n') ;
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_nnnormalizelp_old.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnormalizelp(x,dzdy,varargin)
2 | %VL_NNNORMALIZELP CNN Lp normalization
3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial
4 | % location in the array X:
5 | %
6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p)
7 | %
8 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the
9 | % function with respect to X projected onto DZDY.
10 | %
11 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options:
12 | %
13 | % `p`:: 2
14 | % The exponent of the Lp norm. Warning: currently only even
15 | % exponents are supported.
16 | %
17 | % `epsilon`:: 0.01
18 | % The constant added to the sum of p-powers before taking the
19 | % 1/p square root (see the formula above).
20 | %
21 | % `spatial`:: `false`
22 | % If `true`, sum along the two spatial dimensions instead of
23 | % along the feature channels.
24 | %
25 | % See also: VL_NNNORMALIZE().
26 |
27 | opts.epsilon = 1e-2 ;
28 | opts.p = 2 ;
29 | opts.spatial = false ;
30 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
31 |
32 | if ~opts.spatial
33 | massp = sum(x.^opts.p,3) + opts.epsilon ;
34 | else
35 | massp = sum(sum(x.^opts.p,1),2) + opts.epsilon ;
36 | end
37 | mass = massp.^(1/opts.p) ;
38 | y = bsxfun(@rdivide, x, mass) ;
39 |
40 | if nargin < 2 || isempty(dzdy)
41 | return ;
42 | else
43 | dzdy = bsxfun(@rdivide, dzdy, mass) ;
44 | if ~opts.spatial
45 | tmp = sum(dzdy .* x, 3) ;
46 | else
47 | tmp = sum(sum(dzdy .* x, 1),2);
48 | end
49 | y = dzdy - bsxfun(@times, tmp, bsxfun(@rdivide, x.^(opts.p-1), massp)) ;
50 | end
51 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_test_bnorm.m:
--------------------------------------------------------------------------------
1 | %%
2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with
3 | % using VLFEAT
4 | %%
5 |
6 | gpu = false;
7 | gpu = true ;
8 |
9 | T = 1 ;
10 | x = randn(64,64,32,32,'single') ;
11 | g = randn(32,1,'single') ;
12 | b = randn(32,1,'single') ;
13 |
14 | if gpu
15 | x = gpuArray(x) ;
16 | g = gpuArray(g) ;
17 | b = gpuArray(b) ;
18 | end
19 |
20 | a=vl_nnbnorm(x,g,b);
21 | a_=vl_nnbnorm_old(x,g,b);
22 |
23 | vl_testsim(a,a_)
24 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_test_economic_relu.m:
--------------------------------------------------------------------------------
1 | % VL_TEST_ECONOMIC_RELU
2 | function vl_test_economic_relu()
3 |
4 | x = randn(11,12,8,'single');
5 | w = randn(5,6,8,9,'single');
6 | b = randn(1,9,'single') ;
7 |
8 | net.layers{1} = struct('type', 'conv', ...
9 | 'filters', w, ...
10 | 'biases', b, ...
11 | 'stride', 1, ...
12 | 'pad', 0);
13 | net.layers{2} = struct('type', 'relu') ;
14 |
15 | res = vl_simplenn(net, x) ;
16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ;
17 | clear res ;
18 |
19 | res_ = vl_simplenn(net, x, dzdy) ;
20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ;
21 |
22 | a=whos('res_') ;
23 | b=whos('res__') ;
24 | assert(a.bytes > b.bytes) ;
25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ;
26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ;
27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ;
28 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_test_gpureset.m:
--------------------------------------------------------------------------------
1 | for explictMexReset = [false]
2 |
3 | % reset the same GPU device
4 | for t = 1:6
5 | if explictMexReset, clear mex ; end
6 | if mod(t-1,2) == 0
7 | disp('vl_test_gpureset: resetting GPU') ;
8 | gpuDevice(1) ;
9 | else
10 | disp('vl_test_gpureset: not resetting GPU') ;
11 | end
12 | if t > 1, disp(a) ; end
13 | a = gpuArray(single(ones(10))) ;
14 | b = gpuArray(single(ones(5))) ;
15 | c = vl_nnconv(a,b,[],'nocudnn') ;
16 | end
17 |
18 | % resetting GPU arguments to a MEX file should fail properly
19 | a = gpuArray(single(ones(10))) ;
20 | b = gpuArray(single(ones(5))) ;
21 | c = vl_nnconv(a,b,[],'nocudnn') ;
22 |
23 | gpuDevice(1) ;
24 | disp(a) ;
25 | try
26 | c = vl_nnconv(a,b,[],'nocudnn') ;
27 | catch e
28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ;
29 | end
30 |
31 | % switch GPU devices
32 | if gpuDeviceCount > 1
33 | disp('vl_text_gpureset: test switching GPU device') ;
34 | for t = 1:gpuDeviceCount
35 | if explictMexReset, clear mex ; end
36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ;
37 | gpuDevice(t) ;
38 | a = gpuArray(single(ones(10))) ;
39 | b = gpuArray(single(ones(5))) ;
40 | c = vl_nnconv(a,b,[],'nocudnn') ;
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_test_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | function vl_test_imreadjpeg
2 | % VL_TEST_IMREADJPEG
3 |
4 | % Test basic file reading capability
5 | for t=1:6
6 | files{t} = which(sprintf('office_%d.jpg', t)) ;
7 | end
8 | ims = vl_imreadjpeg(files) ;
9 |
10 | % Test reading a CMYK image
11 | ims_cmyk = vl_imreadjpeg({which('cmyk.jpg')}) ;
12 |
13 | ims = vl_imreadjpeg(files) ;
14 | assert(all(~cellfun(@isempty, ims)), 'Imagae Files not loaded.');
15 |
16 | % Test inserting a non-image file
17 | files_ = files ;
18 | files_{3} = [mfilename('fullpath') '.m'];
19 | ims_ = vl_imreadjpeg(files_) ;
20 | for t=setdiff(1:6,3)
21 | assert(isequal(ims{t},ims_{t})) ;
22 | end
23 |
24 | % Test inserting a non-esiting file
25 | files__ = files_ ;
26 | files__{4} = 'idontexist.jpg' ;
27 | ims__ = vl_imreadjpeg(files__) ;
28 | for t=setdiff(1:6,[3 4])
29 | assert(isequal(ims{t},ims__{t})) ;
30 | end
31 |
32 | for n = 1:4
33 | % Test prefetching
34 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ;
35 | ims___ = vl_imreadjpeg(files) ;
36 | assert(isequal(ims,ims___)) ;
37 |
38 | % Hardening: test prefetching, clearing mex, fetching
39 | vl_imreadjpeg(files,'prefetch') ;
40 | clear mex ;
41 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ;
42 | assert(isequal(ims,ims___)) ;
43 | end
44 |
45 | ims = vl_imreadjpeg(files) ;
46 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_test_print.m:
--------------------------------------------------------------------------------
1 | function vl_test_print(varargin)
2 |
3 | addpath(fullfile(vl_rootnn(), 'examples', 'mnist'));
4 |
5 | net = cnn_mnist_init('networkType', 'dagnn');
6 | net.print(varargin{:});
7 |
8 | end
9 |
10 |
--------------------------------------------------------------------------------
/matlab/xtest/vl_testnn.m:
--------------------------------------------------------------------------------
1 | function vl_testnn(varargin)
2 | %VL_TESTNN Run MatConvNet test suite
3 | % VL_TESTNN('option', value, ...) takes the following options:
4 | % `cpu`:: true
5 | % Run the CPU tests.
6 | %
7 | % `gpu`:: false
8 | % Run the GPU tests.
9 | %
10 | % `single`:: true
11 | % Perform tests in single precision.
12 | %
13 | % `double`:: false
14 | % Perform tests in double precision.
15 | %
16 | % `command`:: `'nn'`
17 | % Run only tests which name starts with the specified substring.
18 | % E.g. `vl_testnn('command', 'nnloss') would run only the nnloss tests.
19 | %
20 | % `break`:: false
21 | % Stop tests in case of error.
22 | %
23 | % `tapFile`:: ''
24 | % Output the test results to a file. If a specified file does
25 | % exist it is overwritten.
26 | %
27 | % `suiteDir`:: ''
28 | % Specifies the directory where the test suite files are located. If
29 | % left empty, the default suite is used (/matlab/xtest/
30 | % /suite).
31 | %
32 | % This function uses the Matlab unit testing framework which was
33 | % introduced in Matlab R2013a (v8.1).
34 |
35 | % Copyright (C) 2015-16 Andrea Vedaldi, Karel Lenc.
36 | % All rights reserved.
37 | %
38 | % This file is part of the VLFeat library and is made available under
39 | % the terms of the BSD license (see the COPYING file).
40 |
41 | opts.cpu = true ;
42 | opts.gpu = false ;
43 | opts.single = true ;
44 | opts.double = false ;
45 | opts.command = 'nn' ;
46 | opts.break = false ;
47 | opts.tapFile = '';
48 | opts.suiteDir = '' ;
49 | opts = vl_argparse(opts, varargin) ;
50 |
51 | import matlab.unittest.constraints.* ;
52 | import matlab.unittest.selectors.* ;
53 | import matlab.unittest.plugins.TAPPlugin;
54 | import matlab.unittest.plugins.ToFile;
55 | addpath(fullfile(vl_rootnn, 'examples'));
56 |
57 | % Choose which tests to run
58 | sel = HasName(StartsWithSubstring(opts.command)) ;
59 | if ~opts.gpu
60 | sel = sel & ~HasName(ContainsSubstring('device=gpu')) ;
61 | end
62 | if ~opts.cpu
63 | sel = sel & ~HasName(ContainsSubstring('device=cpu')) ;
64 | end
65 | if ~opts.double
66 | sel = sel & ~HasName(ContainsSubstring('dataType=double')) ;
67 | end
68 | if ~opts.single
69 | sel = sel & ~HasName(ContainsSubstring('dataType=single')) ;
70 | end
71 |
72 | root = fileparts(mfilename('fullpath')) ;
73 | if isempty(opts.suiteDir)
74 | opts.suiteDir = fullfile(root, 'suite') ;
75 | else % any external subclasses of nntest will need it to be on the path
76 | addpath(fullfile(root, 'suite')) ;
77 | end
78 |
79 | % Run tests
80 | suite = matlab.unittest.TestSuite.fromFolder(opts.suiteDir, sel) ;
81 | runner = matlab.unittest.TestRunner.withTextOutput('Verbosity',3);
82 | if opts.break
83 | runner.addPlugin(matlab.unittest.plugins.StopOnFailuresPlugin) ;
84 | end
85 | if ~isempty(opts.tapFile)
86 | if exist(opts.tapFile, 'file')
87 | delete(opts.tapFile);
88 | end
89 | runner.addPlugin(TAPPlugin.producingOriginalFormat(ToFile(opts.tapFile)));
90 | end
91 | result = runner.run(suite);
92 | display(result)
93 |
--------------------------------------------------------------------------------