├── matconvnet-1.0-beta20
├── matlab
│ ├── 1
│ ├── compatibility
│ │ └── parallel
│ │ │ ├── labindex.m
│ │ │ ├── numlabs.m
│ │ │ └── gather.m
│ ├── mex
│ │ ├── vl_nnbnorm.mexa64
│ │ ├── vl_nnconv.mexa64
│ │ ├── vl_nnconvt.mexa64
│ │ ├── vl_nnpool.mexa64
│ │ ├── vl_imreadjpeg.mexa64
│ │ ├── vl_nnnormalize.mexa64
│ │ └── vl_nnbilinearsampler.mexa64
│ ├── src
│ │ ├── bits
│ │ │ ├── data.cpp
│ │ │ ├── nnbnorm.cpp
│ │ │ ├── nnconv.cpp
│ │ │ ├── datamex.cpp
│ │ │ ├── nnpooling.cpp
│ │ │ ├── nnnormalize.cpp
│ │ │ ├── nnbilinearsampler.cpp
│ │ │ ├── nnsubsample.cpp
│ │ │ ├── nnfullyconnected.cpp
│ │ │ ├── impl
│ │ │ │ ├── copy.hpp
│ │ │ │ ├── nnbilinearsampler_cudnn.hpp
│ │ │ │ ├── normalize.hpp
│ │ │ │ ├── nnbias_cudnn.hpp
│ │ │ │ ├── copy_cpu.cpp
│ │ │ │ ├── subsample.hpp
│ │ │ │ ├── im2row.hpp
│ │ │ │ ├── nnconv_cudnn.hpp
│ │ │ │ ├── bilinearsampler.hpp
│ │ │ │ ├── nnpooling_cudnn.hpp
│ │ │ │ ├── copy_gpu.cu
│ │ │ │ ├── pooling.hpp
│ │ │ │ ├── bnorm.hpp
│ │ │ │ └── subsample_cpu.cpp
│ │ │ ├── nnbias.hpp
│ │ │ ├── nnnormalize.hpp
│ │ │ ├── nnbilinearsampler.hpp
│ │ │ ├── nnfullyconnected.hpp
│ │ │ ├── nnsubsample.hpp
│ │ │ ├── nnpooling.hpp
│ │ │ ├── nnbias.cpp
│ │ │ ├── imread.hpp
│ │ │ ├── imread.cpp
│ │ │ ├── datamex.hpp
│ │ │ ├── nnconv.hpp
│ │ │ └── nnbnorm.hpp
│ │ ├── vl_nnconv.cpp
│ │ ├── vl_nnpool.cpp
│ │ ├── vl_nnbnorm.cpp
│ │ ├── vl_nnconvt.cpp
│ │ ├── vl_imreadjpeg.cpp
│ │ ├── vl_nnnormalize.cpp
│ │ ├── vl_nnbilinearsampler.cpp
│ │ └── config
│ │ │ └── mex_CUDA_glnxa64.xml
│ ├── xtest
│ │ ├── suite
│ │ │ ├── nnsigmoid.m
│ │ │ ├── nndropout.m
│ │ │ ├── nnoffset.m
│ │ │ ├── nnsoftmax.m
│ │ │ ├── nnspnorm.m
│ │ │ ├── nnnormalizelp.m
│ │ │ ├── nnrelu.m
│ │ │ ├── nnbnorm.m
│ │ │ ├── nnmnist.m
│ │ │ ├── nnnormalize.m
│ │ │ ├── nnpdist.m
│ │ │ ├── nnsoftmaxloss.m
│ │ │ ├── nnconcat.m
│ │ │ └── Scale.m
│ │ ├── vl_test_bnorm.m
│ │ ├── vl_test_economic_relu.m
│ │ ├── vl_test_imreadjpeg.m
│ │ ├── vl_bench_bnorm.m
│ │ ├── vl_test_gpureset.m
│ │ ├── vl_bench_imreadjpeg.m
│ │ └── vl_testnn.m
│ ├── +dagnn
│ │ ├── @DagNN
│ │ │ ├── setLayerInputs.m
│ │ │ ├── setLayerParams.m
│ │ │ ├── setLayerOutputs.m
│ │ │ ├── reset.m
│ │ │ ├── removeLayer.m
│ │ │ ├── initParams.m
│ │ │ ├── renameLayer.m
│ │ │ ├── move.m
│ │ │ ├── addLayer.m
│ │ │ ├── renameVar.m
│ │ │ ├── getVarSizes.m
│ │ │ ├── saveobj.m
│ │ │ └── loadobj.m
│ │ ├── Sigmoid.m
│ │ ├── SoftMax.m
│ │ ├── LRN.m
│ │ ├── NormOffset.m
│ │ ├── SpatialNorm.m
│ │ ├── ElementWise.m
│ │ ├── BilinearSampler.m
│ │ ├── Scale.m
│ │ ├── DropOut.m
│ │ ├── Pooling.m
│ │ ├── Filter.m
│ │ ├── Loss.m
│ │ ├── Sum.m
│ │ ├── Concat.m
│ │ ├── BatchNorm.m
│ │ ├── Conv.m
│ │ ├── Crop.m
│ │ ├── ReLU.m
│ │ └── ConvTranspose.m
│ ├── vl_rootnn.m
│ ├── vl_nnsigmoid.m
│ ├── vl_nnnoffset.m
│ ├── vl_setupnn.m
│ ├── vl_nnsoftmax.m
│ ├── vl_nnnormalizelp.m
│ ├── vl_nncrop.m
│ ├── vl_nnspnorm.m
│ ├── vl_nnconcat.m
│ ├── simplenn
│ │ └── vl_simplenn_move.m
│ ├── vl_nnnormalize.m
│ ├── vl_nnrelu.m
│ ├── vl_nnbnorm.m
│ ├── vl_nndropout.m
│ ├── vl_imreadjpeg.m
│ ├── vl_nnsoftmaxloss.m
│ ├── vl_nnbilinearsampler.m
│ └── vl_nnpool.m
├── PreTrainedNets
│ └── 1
├── pax_global_header
├── matconvnet.xcodeproj
│ ├── project.xcworkspace
│ │ └── contents.xcworkspacedata
│ └── xcshareddata
│ │ └── xcschemes
│ │ ├── matconv CPU.xcscheme
│ │ ├── matconv GPU.xcscheme
│ │ └── matconv cuDNN.xcscheme
├── README.md
├── COPYING
├── matconvnet.sln
└── CONTRIBUTING.md
├── Imgs
├── Results.png
└── SynthPlot.png
├── Data
├── Texture1.jpg
├── Texture10.bmp
├── Texture11.png
├── Texture12.png
├── Texture13.png
├── Texture14.png
├── Texture16.tif
├── Texture17.tif
├── Texture18.png
├── Texture18.tif
├── Texture19.png
├── Texture2.png
├── Texture20.tif
├── Texture21.tif
├── Texture22.png
├── Texture24.jpg
├── Texture25.png
├── Texture26.png
├── Texture27.jpg
├── Texture28.jpg
├── Texture29.jpg
├── Texture3.jpg
├── Texture3.png
├── Texture31.jpg
├── Texture32.jpg
├── Texture33.png
├── Texture34.png
├── Texture35.jpg
├── Texture36.png
├── Texture37.png
├── Texture38.png
├── Texture39.png
├── Texture4.png
├── Texture40.png
├── Texture41.png
├── Texture42.png
├── Texture43.png
├── Texture44.png
├── Texture45.png
├── Texture46.png
├── Texture47.png
├── Texture48.png
├── Texture49.png
├── Texture5.png
├── Texture50.png
├── Texture51.png
├── Texture52.png
├── Texture53.png
├── Texture54.png
├── Texture55.png
├── Texture56.png
├── Texture57.png
├── Texture58.png
├── Texture59.png
├── Texture60.png
├── Texture61.png
├── Texture62.bmp
├── Texture8.png
├── Texture113.png
├── Texture115.png
├── Texture140.png
├── Texture142.png
└── Output
│ ├── Texture13_Result.jpg
│ ├── Texture13_Result.mat
│ └── Texture13_Result.txt
├── L-BFGS-B-C-master
├── VERSION
├── Matlab
│ ├── lbfgsb_wrapper.mexa64
│ ├── lbfgsb_wrapper.mexw64
│ ├── lbfgsb_wrapper.mexmaci64
│ ├── fminunc_wrapper.m
│ ├── driver1.m
│ └── Makefile
├── INSTALL
├── src
│ ├── timer.c
│ └── Makefile
└── LICENSE
├── DCor_addpaths.m
├── CalcGramMatrix.m
├── ImExpand.m
├── ImNorm.m
├── ImDenorm.m
├── GenNet.m
├── Verify.m
├── DoForwardPass.m
├── lbfgsb_mywrapper.m
├── DCor_compile.m
├── CalcStructureErrorLoss.m
├── GetOptBounds.m
├── DoBackProp.m
├── COPYING
├── GetSRCMats.m
├── WriteResults.m
├── CalcStyleErrorLoss.m
├── CalcACorrTensor.m
├── GetGrad.m
├── CombineGrads.m
├── CalcACorrTensorFull.m
├── CalcACorrErrorLoss.m
├── CalcNetFeatures.m
├── LICENSE
├── Synth.m
├── CalcErrorLoss.m
├── GetSynthParams.m
├── GetLossFromImg.m
└── README.md
/matconvnet-1.0-beta20/matlab/1:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/PreTrainedNets/1:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Imgs/Results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Imgs/Results.png
--------------------------------------------------------------------------------
/Data/Texture1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture1.jpg
--------------------------------------------------------------------------------
/Data/Texture10.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture10.bmp
--------------------------------------------------------------------------------
/Data/Texture11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture11.png
--------------------------------------------------------------------------------
/Data/Texture12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture12.png
--------------------------------------------------------------------------------
/Data/Texture13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture13.png
--------------------------------------------------------------------------------
/Data/Texture14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture14.png
--------------------------------------------------------------------------------
/Data/Texture16.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture16.tif
--------------------------------------------------------------------------------
/Data/Texture17.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture17.tif
--------------------------------------------------------------------------------
/Data/Texture18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture18.png
--------------------------------------------------------------------------------
/Data/Texture18.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture18.tif
--------------------------------------------------------------------------------
/Data/Texture19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture19.png
--------------------------------------------------------------------------------
/Data/Texture2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture2.png
--------------------------------------------------------------------------------
/Data/Texture20.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture20.tif
--------------------------------------------------------------------------------
/Data/Texture21.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture21.tif
--------------------------------------------------------------------------------
/Data/Texture22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture22.png
--------------------------------------------------------------------------------
/Data/Texture24.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture24.jpg
--------------------------------------------------------------------------------
/Data/Texture25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture25.png
--------------------------------------------------------------------------------
/Data/Texture26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture26.png
--------------------------------------------------------------------------------
/Data/Texture27.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture27.jpg
--------------------------------------------------------------------------------
/Data/Texture28.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture28.jpg
--------------------------------------------------------------------------------
/Data/Texture29.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture29.jpg
--------------------------------------------------------------------------------
/Data/Texture3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture3.jpg
--------------------------------------------------------------------------------
/Data/Texture3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture3.png
--------------------------------------------------------------------------------
/Data/Texture31.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture31.jpg
--------------------------------------------------------------------------------
/Data/Texture32.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture32.jpg
--------------------------------------------------------------------------------
/Data/Texture33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture33.png
--------------------------------------------------------------------------------
/Data/Texture34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture34.png
--------------------------------------------------------------------------------
/Data/Texture35.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture35.jpg
--------------------------------------------------------------------------------
/Data/Texture36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture36.png
--------------------------------------------------------------------------------
/Data/Texture37.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture37.png
--------------------------------------------------------------------------------
/Data/Texture38.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture38.png
--------------------------------------------------------------------------------
/Data/Texture39.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture39.png
--------------------------------------------------------------------------------
/Data/Texture4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture4.png
--------------------------------------------------------------------------------
/Data/Texture40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture40.png
--------------------------------------------------------------------------------
/Data/Texture41.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture41.png
--------------------------------------------------------------------------------
/Data/Texture42.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture42.png
--------------------------------------------------------------------------------
/Data/Texture43.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture43.png
--------------------------------------------------------------------------------
/Data/Texture44.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture44.png
--------------------------------------------------------------------------------
/Data/Texture45.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture45.png
--------------------------------------------------------------------------------
/Data/Texture46.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture46.png
--------------------------------------------------------------------------------
/Data/Texture47.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture47.png
--------------------------------------------------------------------------------
/Data/Texture48.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture48.png
--------------------------------------------------------------------------------
/Data/Texture49.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture49.png
--------------------------------------------------------------------------------
/Data/Texture5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture5.png
--------------------------------------------------------------------------------
/Data/Texture50.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture50.png
--------------------------------------------------------------------------------
/Data/Texture51.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture51.png
--------------------------------------------------------------------------------
/Data/Texture52.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture52.png
--------------------------------------------------------------------------------
/Data/Texture53.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture53.png
--------------------------------------------------------------------------------
/Data/Texture54.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture54.png
--------------------------------------------------------------------------------
/Data/Texture55.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture55.png
--------------------------------------------------------------------------------
/Data/Texture56.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture56.png
--------------------------------------------------------------------------------
/Data/Texture57.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture57.png
--------------------------------------------------------------------------------
/Data/Texture58.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture58.png
--------------------------------------------------------------------------------
/Data/Texture59.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture59.png
--------------------------------------------------------------------------------
/Data/Texture60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture60.png
--------------------------------------------------------------------------------
/Data/Texture61.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture61.png
--------------------------------------------------------------------------------
/Data/Texture62.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture62.bmp
--------------------------------------------------------------------------------
/Data/Texture8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture8.png
--------------------------------------------------------------------------------
/Imgs/SynthPlot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Imgs/SynthPlot.png
--------------------------------------------------------------------------------
/Data/Texture113.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture113.png
--------------------------------------------------------------------------------
/Data/Texture115.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture115.png
--------------------------------------------------------------------------------
/Data/Texture140.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture140.png
--------------------------------------------------------------------------------
/Data/Texture142.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Texture142.png
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/VERSION:
--------------------------------------------------------------------------------
1 | v1.0 of the wrapper, using v3.0 of L-BFGS-B
2 | Spring 2015
3 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/pax_global_header:
--------------------------------------------------------------------------------
1 | 52 comment=3a9bf9c1d6fbe071330e02b2fcd1857de2c3c8d6
2 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/compatibility/parallel/labindex.m:
--------------------------------------------------------------------------------
1 | function i = labindex()
2 | i = 1 ;
3 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/compatibility/parallel/numlabs.m:
--------------------------------------------------------------------------------
1 | function n = numlabs()
2 | n = 1 ;
3 |
--------------------------------------------------------------------------------
/Data/Output/Texture13_Result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Output/Texture13_Result.jpg
--------------------------------------------------------------------------------
/Data/Output/Texture13_Result.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/Data/Output/Texture13_Result.mat
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexa64
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexw64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexw64
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexmaci64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/L-BFGS-B-C-master/Matlab/lbfgsb_wrapper.mexmaci64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnbnorm.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnbnorm.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnconv.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnconv.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnconvt.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnconvt.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnpool.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnpool.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_imreadjpeg.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_imreadjpeg.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnnormalize.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnnormalize.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/data.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "data.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnconv.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnconv.cu should be compiled instead"
3 | #endif
4 | #include "nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/mex/vl_nnbilinearsampler.mexa64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/omrysendik/DCor/HEAD/matconvnet-1.0-beta20/matlab/mex/vl_nnbilinearsampler.mexa64
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnconv.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconv.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnpool.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnpool.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/datamex.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "datamex.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnpooling.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnpooling.cu should be compiled instead"
3 | #endif
4 | #include "nnpooling.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnbnorm.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbnorm.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnconvt.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnconvt.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnnormalize.cu should be compiled instead"
3 | #endif
4 | #include "nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_imreadjpeg.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_imreadjpeg.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnnormalize.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnnormalize.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnbnorm.cu should be compiled instead"
3 | #endif
4 | #include "nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnsubsample.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnsubsample.cu"
5 |
6 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnfullyconnected.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnfullyconnected.cu should be compiled instead"
3 | #endif
4 | #include "nnfullyconnected.cu"
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/vl_nnbilinearsampler.cpp:
--------------------------------------------------------------------------------
1 | #if ENABLE_GPU
2 | #error This file should not be compiled with GPU support enabled
3 | #endif
4 | #include "vl_nnbilinearsampler.cu"
5 |
--------------------------------------------------------------------------------
/DCor_addpaths.m:
--------------------------------------------------------------------------------
1 | function DCor_addpaths()
2 | addpath(strrep('.\matconvnet-1.0-beta20\matlab\','\',filesep));
3 | addpath(strrep('.\L-BFGS-B-C-master\Matlab','\',filesep));
4 | vl_setupnn;
5 |
6 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/compatibility/parallel/gather.m:
--------------------------------------------------------------------------------
1 | function x=gather(x)
2 | % GATHER Compatibility stub for the GATHER() function
3 | % GATHER() is a function in the Parallel MATLAB toolbox. MATCONVNET
4 | % can work without it.
5 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matconvnet.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/CalcGramMatrix.m:
--------------------------------------------------------------------------------
1 | function [gramMat featVec] = CalcGramMatrix(featMat)
2 | szFeatMat = size(featMat);
3 | if(length(szFeatMat)==2) szFeatMat(3)=1; end
4 | featVec = reshape(permute(featMat,[3,1,2]),szFeatMat(3),prod(szFeatMat(1:2)));
5 | gramMat = featVec*featVec';
6 | end
--------------------------------------------------------------------------------
/ImExpand.m:
--------------------------------------------------------------------------------
1 | function [ImOut] = ImExpand(ImIn,params)
2 |
3 | ImOut = cat(2,ImIn,ImIn,ImIn);
4 | ImOut = cat(1,ImOut,ImOut,ImOut);
5 |
6 | Width = params.USFac*size(ImIn,1);
7 | Start = round(size(ImOut,1)/2)-round(Width/2);
8 |
9 | ImOut = ImOut(Start:Start+Width-1,Start:Start+Width-1,:);
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/fminunc_wrapper.m:
--------------------------------------------------------------------------------
1 | function [f,g,h] = fminunc_wrapper(x,F,G,H)
2 | % [f,g,h] = fminunc_wrapper( x, F, G, H )
3 | % for use with Matlab's "fminunc"
4 | f = F(x);
5 | if nargin > 2 && nargout > 1
6 | g = G(x);
7 | end
8 | if nargin > 3 && nargout > 2
9 | h = H(x);
10 | end
11 |
--------------------------------------------------------------------------------
/ImNorm.m:
--------------------------------------------------------------------------------
1 | function [ImOut] = ImNorm(ImIn,avgRGB)
2 |
3 | szImg = size(ImIn);
4 | szAvg = size(avgRGB);
5 |
6 |
7 | if(isequal(szImg,szAvg))
8 | avgImg = avgRGB;
9 | else
10 | avgRGB = avgRGB(:);
11 | avgImg = repmat(permute(avgRGB,[2 3 1]),[szImg(1:2),1]);
12 | end
13 |
14 | ImOut = single(ImIn);
15 | ImOut = ImOut-avgImg;
--------------------------------------------------------------------------------
/ImDenorm.m:
--------------------------------------------------------------------------------
1 | function [ImOut] = ImDenorm(ImIn,avgRGB)
2 |
3 | szImg = size(ImIn);
4 | szAvg = size(avgRGB);
5 |
6 | if(isequal(szImg,szAvg))
7 | avgImg = avgRGB;
8 | else
9 | avgRGB = avgRGB(:);
10 | avgImg = repmat(permute(avgRGB,[2 3 1]),[szImg(1:2),1]);
11 | end
12 |
13 | ImOut = uint8(ImIn+avgImg);
14 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnsigmoid.m:
--------------------------------------------------------------------------------
1 | classdef nnsigmoid < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(5,5,1,1)/test.range ;
5 | y = vl_nnsigmoid(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nnsigmoid(x,dzdy) ;
8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ;
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/GenNet.m:
--------------------------------------------------------------------------------
1 | function [net] = GenNet(LastLayerNum)
2 |
3 | nettemp = load(strrep('.\matconvnet-1.0-beta20\PreTrainedNets\imagenet-vgg-verydeep-19.mat','\',filesep));
4 | for Ind = 1:1:LastLayerNum
5 | net.layers{Ind} = nettemp.layers{Ind};
6 | if(strcmp(net.layers{Ind}.type,'pool'))
7 | net.layers{Ind}.method = 'avg';
8 | end
9 | end
10 | net.meta.normalization = nettemp.meta.normalization;
11 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nndropout.m:
--------------------------------------------------------------------------------
1 | classdef nndropout < nntest
2 | methods (Test)
3 | function basic(test)
4 | x = test.randn(4,5,10,3) ;
5 | [y,mask] = vl_nndropout(x) ;
6 | dzdy = test.randn(size(y)) ;
7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ;
8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ;
9 | end
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/setLayerInputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerInputs(obj, layer, inputs)
2 | %SETLAYERINPUTS Set or change the inputs to a layer
3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for input = inputs
8 | v(end+1) = obj.addVar(char(input)) ;
9 | end
10 | obj.layers(l).inputs = inputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Sigmoid.m:
--------------------------------------------------------------------------------
1 | classdef Sigmoid < dagnn.ElementWise
2 | methods
3 | function outputs = forward(obj, inputs, params)
4 | outputs{1} = vl_nnsigmoid(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnoffset.m:
--------------------------------------------------------------------------------
1 | classdef nnoffset < nntest
2 | methods (Test)
3 | function basic(test)
4 | param = [.34, .5] ;
5 | x = test.randn(4,5,10,3) ;
6 | y = vl_nnnoffset(x,param) ;
7 | dzdy = test.randn(size(y)) ;
8 | dzdx = vl_nnnoffset(x,param,dzdy) ;
9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ;
10 | end
11 | end
12 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/setLayerParams.m:
--------------------------------------------------------------------------------
1 | function v = setLayerParams(obj, layer, params)
2 | %SETLAYEPARAMS Set or change the parameters of a layer
3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for param = params
8 | v(end+1) = obj.addParam(char(param)) ;
9 | end
10 | obj.layers(l).params = params ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/setLayerOutputs.m:
--------------------------------------------------------------------------------
1 | function v = setLayerOutputs(obj, layer, outputs)
2 | %SETLAYEROUTPUTS Set or change the outputs of a layer
3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...})
4 |
5 | v = [] ;
6 | l = obj.getLayerIndex(layer) ;
7 | for output = outputs
8 | v(end+1) = obj.addVar(char(output)) ;
9 | end
10 | obj.layers(l).outputs = outputs ;
11 | obj.rebuild() ;
12 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_rootnn.m:
--------------------------------------------------------------------------------
1 | function root = vl_rootnn()
2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox.
3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox.
4 |
5 | % Copyright (C) 2014 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = fileparts(fileparts(mfilename('fullpath'))) ;
12 |
--------------------------------------------------------------------------------
/Verify.m:
--------------------------------------------------------------------------------
1 | function Verify(params)
2 |
3 | assert(mod(params.netImgSz(1),2)==1 && mod(params.netImgSz(2),2)==1,sprintf('The code currently supports only odd sized images: params.netImgSz=[%d,%d]',params.netImgSz(1),params.netImgSz(2)));
4 | assert(params.USFac==1 || params.DiversityLossWeight==0,sprintf('If the output image size is larger than the input (params.USFac=%f), the Diversity loss shouldnt be used (params.DiversityLossWeight=%f)',params.USFac,params.DiversityLossWeight));
5 |
6 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/reset.m:
--------------------------------------------------------------------------------
1 | function reset(obj)
2 | %RESET Reset the DagNN
3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN
4 | % object, including parameter gradients. It also calls the reset
5 | % function of every layer.
6 |
7 | [obj.vars.value] = deal([]) ;
8 | [obj.vars.der] = deal([]) ;
9 | [obj.params.der] = deal([]) ;
10 | for l = 1:numel(obj.layers)
11 | obj.layers(l).block.reset() ;
12 | end
13 |
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/driver1.m:
--------------------------------------------------------------------------------
1 | function [f,g] = driver1(x)
2 | % An example function, from "driver1.f" in the fortran releaes
3 |
4 | n = length(x);
5 | g = zeros(n,1);
6 |
7 | f = .25*(x(1)-1.0)^2;
8 | for i = 2:n
9 | f = f + ( x(i) - (x(i-1))^2 )^2;
10 | end
11 | f = 4*f;
12 |
13 | t1 = x(2) - (x(1)^2);
14 | g(1) = 2.0*( x(1) - 1.0 ) - 16*x(1)*t1;
15 | for i = 2:(n-1)
16 | t2 = t1;
17 | t1 = x(i+1) - (x(i))^2;
18 | g(i) = 8*t2 - 16*x(i)*t1;
19 | end
20 | g(n) = 8*t1;
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_test_bnorm.m:
--------------------------------------------------------------------------------
1 | %%
2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with
3 | % using VLFEAT
4 | %%
5 |
6 | gpu = false;
7 | gpu = true ;
8 |
9 | T = 1 ;
10 | x = randn(64,64,32,32,'single') ;
11 | g = randn(32,1,'single') ;
12 | b = randn(32,1,'single') ;
13 |
14 | if gpu
15 | x = gpuArray(x) ;
16 | g = gpuArray(g) ;
17 | b = gpuArray(b) ;
18 | end
19 |
20 | a=vl_nnbnorm(x,g,b);
21 | a_=vl_nnbnorm_old(x,g,b);
22 |
23 | vl_testsim(a,a_)
24 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnsoftmax.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmax < nntest
2 | properties (TestParameter)
3 | h = {1 2 3}
4 | w = {1 2}
5 | end
6 | methods (Test)
7 | function basic(test,h,w)
8 | d = 10 ;
9 | n = 3 ;
10 | x = test.randn(h,w,d,n)/test.range ;
11 | y = vl_nnsoftmax(x) ;
12 | dzdy = test.randn(size(y)) ;
13 | dzdx = vl_nnsoftmax(x, dzdy) ;
14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ;
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/SoftMax.m:
--------------------------------------------------------------------------------
1 | classdef SoftMax < dagnn.ElementWise
2 | methods
3 | function outputs = forward(self, inputs, params)
4 | outputs{1} = vl_nnsoftmax(inputs{1}) ;
5 | end
6 |
7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ;
9 | derParams = {} ;
10 | end
11 |
12 | function obj = SoftMax(varargin)
13 | obj.load(varargin) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnspnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnspnorm < nntest
2 | methods (Test)
3 | function basic(test)
4 | h = 13 ;
5 | w = 17 ;
6 | d = 4 ;
7 | n = 5 ;
8 | param = [3, 3, 0.1, 0.75] ;
9 | x = test.randn(h,w,d,n) ;
10 | y = vl_nnspnorm(x, param) ;
11 | dzdy = test.rand(h, w, d, n) ;
12 | dzdx = vl_nnspnorm(x, param, dzdy) ;
13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ;
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/DoForwardPass.m:
--------------------------------------------------------------------------------
1 | function [forward_res] = DoForwardPass(net, tgtImg, unitedLayerInds, grads, params)
2 | disp('Forward Pass');
3 |
4 | res = [];
5 | forward_res = vl_simplenn(net,single(tgtImg),[],res,...
6 | 'accumulate', false, ...
7 | 'mode', 'test', ...
8 | 'conserveMemory', 0, ...
9 | 'backPropDepth', 43, ...
10 | 'sync', 0, ...
11 | 'cudnn', 0) ;
12 |
13 | end
--------------------------------------------------------------------------------
/lbfgsb_mywrapper.m:
--------------------------------------------------------------------------------
1 | function [tgtImg] = lbfgsb_mywrapper(curTgtImg,SrcImg,fun,net,params)
2 |
3 | [l,u] = GetOptBounds(SrcImg);
4 |
5 | params.Decomp.Method = 'Normal';
6 | params.lbfgs_opts.x0 = double(curTgtImg(:));
7 |
8 | clear global;
9 | global SavedDataPrevErrors;
10 | global SavedDataPrevDeltas;
11 | global SavedDataPrevImg;
12 |
13 | [xk, ~, inform] = lbfgsb(fun, l, u, params.lbfgs_opts );
14 |
15 | tgtImg = reshape(xk,size(SrcImg));
16 | tgtImg = uint8(ImDenorm(tgtImg,net.meta.normalization.averageImage));
17 | disp('Done LBFGSB');
18 |
19 | end
--------------------------------------------------------------------------------
/DCor_compile.m:
--------------------------------------------------------------------------------
1 | function DCor_compile()
2 |
3 | fprintf('Compiling L-BFGS-B');
4 | cd L-BFGS-B-C-master/Matlab;
5 | compile_mex;
6 | cd ..; cd ..;
7 |
8 | fprintf('Compiling MatConvNet');
9 | cd matconvnet-1.0-beta20/matlab;
10 | vl_compilenn;
11 | cd ..; cd ..;
12 |
13 | fprintf('Downloading Pretrained Net (VGG-19). This may take a few minutes...');
14 | FolderToDl = [cd,'/matconvnet-1.0-beta20/PreTrainedNets/imagenet-vgg-verydeep-19.mat'];
15 | unix(['wget -O ',FolderToDl,' https://www.dropbox.com/s/ndyqhp4umkpww8t/imagenet-vgg-verydeep-19.mat?dl=0 --no-check-certificate']);
16 |
17 | end
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/INSTALL:
--------------------------------------------------------------------------------
1 | go to src/ and run
2 |
3 | make
4 |
5 | The include Makefile was tested with gcc
6 | under Mac and Linux. If you're not using gcc,
7 | then you proably know more than I do and can
8 | make the appropriate changes.
9 |
10 |
11 | To make the Matlab mex files, either
12 |
13 | 1) in a shell, go to Matlab/ and run
14 |
15 | make
16 |
17 | or,
18 | 2) in Matlab, go to Matlab/ and run
19 |
20 | compile_mex
21 |
22 | which installs the mex files and runs
23 | a simple test.
24 | If you have Windows, this second option
25 | is undoubtedly much easier
26 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/LRN.m:
--------------------------------------------------------------------------------
1 | classdef LRN < dagnn.ElementWise
2 | properties
3 | param = [5 1 0.0001/5 0.75]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = LRN(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/CalcStructureErrorLoss.m:
--------------------------------------------------------------------------------
1 | function [DiversityErrorLoss, DiversityGrads] = CalcStructureErrorLoss(tgtDiversityFeatures, srcDiversityFeatures, params)
2 |
3 | numFeatLayers = length(params.DiversityMatchLayerInds);
4 | DiversityGrads = cell(numFeatLayers,1);
5 | DiversityErrorLoss = 0;
6 | for k=1:numFeatLayers
7 |
8 | FeatDiff = tgtDiversityFeatures{k}-srcDiversityFeatures{k};
9 |
10 | DiversityDiffWeight = params.DiversityFeatureWeights(k);
11 | DiversityErrorLoss = 0.5*sum(FeatDiff(:).^2);
12 |
13 | DiversityGrads{k} = DiversityDiffWeight*FeatDiff;
14 | end
15 |
16 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/NormOffset.m:
--------------------------------------------------------------------------------
1 | classdef NormOffset < dagnn.ElementWise
2 | properties
3 | param = [1 0.5]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = NormOffset(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/SpatialNorm.m:
--------------------------------------------------------------------------------
1 | classdef SpatialNorm < dagnn.ElementWise
2 | properties
3 | param = [2 2 10 2]
4 | end
5 |
6 | methods
7 | function outputs = forward(obj, inputs, params)
8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ;
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ;
13 | derParams = {} ;
14 | end
15 |
16 | function obj = SpatialNorm(varargin)
17 | obj.load(varargin) ;
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/ElementWise.m:
--------------------------------------------------------------------------------
1 | classdef ElementWise < dagnn.Layer
2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations
3 | methods
4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes)
5 | outputSizes = inputSizes ;
6 | transforms = {eye(6)} ;
7 | end
8 |
9 | function rfs = getReceptiveFields(obj)
10 | rfs.size = [1 1] ;
11 | rfs.stride = [1 1] ;
12 | rfs.offset = [1 1] ;
13 | end
14 |
15 | function outputSizes = getOutputSizes(obj, inputSizes)
16 | outputSizes = inputSizes ;
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/Data/Output/Texture13_Result.txt:
--------------------------------------------------------------------------------
1 | styleMatchLayerInds_1,styleMatchLayerInds_2,styleMatchLayerInds_3,styleMatchLayerInds_4,styleFeatureWeights_1,styleFeatureWeights_2,styleFeatureWeights_3,styleFeatureWeights_4,ACorrMatchLayerInds,ACorrFeatureWeights,SmoothnessMatchLayerInds,SmoothnessFeatureWeights,AdversaryMatchLayerInds,AdversaryFeatureWeights,SmoothnessSigma,lastLayerNum,unitedLayerInds_1,unitedLayerInds_2,unitedLayerInds_3,unitedLayerInds_4,unitedLayerInds_5,Verbose,NNVerbose,styleLossWeight,ACorrLossWeight,SmoothnessLossWeight,AdversaryLossWeight,lbfgs_opts,OutString,USFac
2 | 5,10,19,28,0.25,0.25,0.25,0.25,10,1,1,1,10,1,0.0001,28,1,5,10,19,28,1,1,0.5,5e-05,-7.5e-06,-1e-06,,./Data/Output/Texture13_Result,1
3 |
--------------------------------------------------------------------------------
/GetOptBounds.m:
--------------------------------------------------------------------------------
1 | function [l,u] = GetOptBounds(SrcImg)
2 |
3 | % maxR = max(max(SrcImg(:,:,1))); minR = min(min(SrcImg(:,:,1)));
4 | % maxG = max(max(SrcImg(:,:,2))); minG = min(min(SrcImg(:,:,2)));
5 | % maxB = max(max(SrcImg(:,:,3))); minB = min(min(SrcImg(:,:,3)));
6 |
7 | maxR = 255; minR = -255;
8 | maxG = 255; minG = -255;
9 | maxB = 255; minB = -255;
10 |
11 | l = cat(3,minR*ones(size(SrcImg,1),size(SrcImg,2)),minG*ones(size(SrcImg,1),size(SrcImg,2)),minB*ones(size(SrcImg,1),size(SrcImg,2)));
12 | u = cat(3,maxR*ones(size(SrcImg,1),size(SrcImg,2)),maxG*ones(size(SrcImg,1),size(SrcImg,2)),maxB*ones(size(SrcImg,1),size(SrcImg,2)));
13 | l = double(l(:));
14 | u = double(u(:));
15 |
16 | end
--------------------------------------------------------------------------------
/DoBackProp.m:
--------------------------------------------------------------------------------
1 | function [ dx ] = DoBackProp( net, grads, tgtImg, params )
2 |
3 | truncatedNet = net;
4 | truncatedNet.layers = net.layers(1:params.lastLayerNum);
5 |
6 | [forwardRes] = DoForwardPass(truncatedNet,tgtImg, params.unitedLayerInds, grads, params);
7 |
8 | fprintf('Backward Pass: ');
9 | dx = 0;
10 | for k=params.lastLayerNum:-1:1
11 | truncatedNet.layers = net.layers(1:k);
12 | auxInd = find(params.unitedLayerInds == k);
13 | if(isempty(auxInd))
14 | continue;
15 | end
16 | curRes = forwardRes(1:k+1);
17 | curRes(k+1).dzdx = grads{auxInd};
18 | fprintf('Layer %d, ',k);
19 | curBackwardRes = DoBackwardPass(truncatedNet, curRes, params);
20 | dx = dx+curBackwardRes(1).dzdx;
21 | end
22 |
23 | fprintf('\n');
24 |
25 |
--------------------------------------------------------------------------------
/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2017 Omry Sendik
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms are permitted
5 | provided that the above copyright notice and this paragraph are
6 | duplicated in all such forms and that any documentation,
7 | advertising materials, and other materials related to such
8 | distribution and use acknowledge that the software was developed
9 | by the . The name of the
10 | may not be used to endorse or promote products derived
11 | from this software without specific prior written permission.
12 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 | IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/removeLayer.m:
--------------------------------------------------------------------------------
1 | function removeLayer(obj, layerName)
2 | %REMOVELAYER Remove a layer from the network
3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object
4 | % OBJ. NAME can be a string or a cell array of strings.
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | if ischar(layerName), layerName = {layerName}; end;
13 | idxs = obj.getLayerIndex(layerName);
14 | if any(isnan(idxs))
15 | error('Invalid layer name `%s`', ...
16 | strjoin(layerName(isnan(idxs)), ', '));
17 | end
18 | obj.layers(idxs) = [] ;
19 | obj.rebuild() ;
20 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/README.md:
--------------------------------------------------------------------------------
1 | # MatConvNet: CNNs for MATLAB
2 |
3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural
4 | Networks* (CNNs) for computer vision applications. It is simple,
5 | efficient, and can run and learn state-of-the-art CNNs. Several
6 | example CNNs are included to classify and encode images. Please visit
7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more.
8 |
9 | In case of compilation issues, please read first the
10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and
11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub
12 | issue. For general inquiries regarding network design and training
13 | related questions, please use the
14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet).
15 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/COPYING:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014 The MatConvNet team.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms are permitted
5 | provided that the above copyright notice and this paragraph are
6 | duplicated in all such forms and that any documentation,
7 | advertising materials, and other materials related to such
8 | distribution and use acknowledge that the software was developed
9 | by the . The name of the
10 | may not be used to endorse or promote products derived
11 | from this software without specific prior written permission.
12 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 | IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
--------------------------------------------------------------------------------
/GetSRCMats.m:
--------------------------------------------------------------------------------
1 | function [SrcMats] = GetSRCMats(SrcImg,initNoise,net,params)
2 |
3 | [srcFeatures.styleFeatures, srcFeatures.ACorrFeatures, srcFeatures.DiversityFeatures] = CalcNetFeatures(net, SrcImg, params);
4 | [~, trgFeatures.ACorrFeatures ] = CalcNetFeatures(net, initNoise, params);
5 |
6 | for Ind = 1:length(params.styleMatchLayerInds)
7 | SrcMats.GramMats{Ind} = CalcGramMatrix(srcFeatures.styleFeatures{Ind});
8 | end
9 | for Ind = 1:length(params.ACorrMatchLayerInds)
10 | SrcMats.ACorrMats{Ind} = CalcACorrTensorFull(srcFeatures.ACorrFeatures{Ind},trgFeatures.ACorrFeatures{Ind});
11 | end
12 |
13 | for Ind = 1:length(params.DiversityMatchLayerInds)
14 | SrcMats.DiversityMats{Ind} = srcFeatures.DiversityFeatures{Ind};
15 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnsigmoid.m:
--------------------------------------------------------------------------------
1 | function out = vl_nnsigmoid(x,dzdy)
2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit.
3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can
4 | % have an arbitrary size. The sigmoid is defined as follows:
5 | %
6 | % SIGMOID(X) = 1 / (1 + EXP(-X)).
7 | %
8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the
9 | % block projected onto DZDY. DZDX and DZDY have the same
10 | % dimensions as X and Y respectively.
11 |
12 | % Copyright (C) 2015 Karel Lenc.
13 | % All rights reserved.
14 | %
15 | % This file is part of the VLFeat library and is made available under
16 | % the terms of the BSD license (see the COPYING file).
17 |
18 | y = 1 ./ (1 + exp(-x));
19 |
20 | if nargin <= 1 || isempty(dzdy)
21 | out = y ;
22 | else
23 | out = dzdy .* (y .* (1 - y)) ;
24 | end
25 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/copy.hpp:
--------------------------------------------------------------------------------
1 | // @file copy.hpp
2 | // @brief Copy and other data operations
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__copy__
14 | #define __vl__copy__
15 |
16 | #include "../data.hpp"
17 |
18 | namespace vl { namespace impl {
19 |
20 | template
21 | struct operations
22 | {
23 | typedef type data_type ;
24 | static vl::Error copy(data_type * dest, data_type const * src, size_t numElements) ;
25 | static vl::Error fill(data_type * dest, size_t numElements, data_type value) ;
26 | } ;
27 | } }
28 |
29 | #endif /* defined(__vl__copy__) */
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalizelp < nntest
2 | properties (TestParameter)
3 | h = {1 2 3 4}
4 | w = {1 2 3 4}
5 | d = {2 3 4}
6 | p = {2 4}
7 | end
8 |
9 | methods (Test)
10 | function basicl2(test, h,w,d)
11 | x = test.randn(h,w,d,3) ;
12 | y = vl_nnnormalizelp(x) ;
13 | dzdy = test.rand(size(y))-0.5 ;
14 | dzdx = vl_nnnormalizelp(x,dzdy) ;
15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, 1e-4, 0.3) ;
16 | end
17 |
18 | function lp(test, p)
19 | x = test.randn(2,3,5,3) / test.range ;
20 | y = vl_nnnormalizelp(x, [], 'p', p) ;
21 | dzdy = test.rand(size(y))-0.5 ;
22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ;
23 | test.der(@(x) vl_nnnormalizelp(x,[],'p',p), x, dzdy, dzdx, 1e-4, 0.3) ;
24 | end
25 |
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/initParams.m:
--------------------------------------------------------------------------------
1 | function initParams(obj)
2 | % INITPARAM Initialize the paramers of the DagNN
3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize
4 | % the corresponding parameters (usually randomly).
5 |
6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
7 | % All rights reserved.
8 | %
9 | % This file is part of the VLFeat library and is made available under
10 | % the terms of the BSD license (see the COPYING file).
11 |
12 | for l = 1:numel(obj.layers)
13 | p = obj.getParamIndex(obj.layers(l).params) ;
14 | params = obj.layers(l).block.initParams() ;
15 | switch obj.device
16 | case 'cpu'
17 | params = cellfun(@gather, params, 'UniformOutput', false) ;
18 | case 'gpu'
19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ;
20 | end
21 | [obj.params(p).value] = deal(params{:}) ;
22 | end
23 |
--------------------------------------------------------------------------------
/WriteResults.m:
--------------------------------------------------------------------------------
1 | function [] = WriteResults(TgtImg,SrcImg,params)
2 |
3 | Pad = size(TgtImg,1)-size(SrcImg,1);
4 | if(Pad~=0)
5 | SrcImg = cat(1,zeros(floor(Pad/2),size(SrcImg,2),3) ,SrcImg,zeros(Pad-floor(Pad/2),size(SrcImg,2),3) );
6 | SrcImg = cat(2,zeros(size(TgtImg,1) ,floor(Pad/2),3),SrcImg,zeros(size(TgtImg,1) ,Pad-floor(Pad/2),3));
7 | end
8 |
9 | OutImg = cat(2,SrcImg,...
10 | zeros(size(TgtImg,1),20,3),...
11 | TgtImg);
12 |
13 | imwrite(OutImg,[params.OutString,'.jpg']);
14 |
15 | figure(100);
16 | box off;
17 | set(gca,'xcolor',get(gcf,'color'));
18 | set(gca,'xtick',[]);
19 | set(gca,'ycolor',get(gcf,'color'));
20 | set(gca,'ytick',[]);
21 | print(params.OutString, '-depsc');
22 |
23 | save([params.OutString,'.mat'],'TgtImg');
24 |
25 | writetable(struct2table(params),[params.OutString,'.txt']);
26 | fprintf('Wrote output image\n');
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/BilinearSampler.m:
--------------------------------------------------------------------------------
1 | % Wrapper for BilinearSampler block:
2 | % (c) 2016 Ankush Gupta
3 |
4 | classdef BilinearSampler < dagnn.Layer
5 | methods
6 | function outputs = forward(obj, inputs, params)
7 | outputs = vl_nnbilinearsampler(inputs{1}, inputs{2});
8 | outputs = {outputs};
9 | end
10 |
11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs)
12 | [dX,dG] = vl_nnbilinearsampler(inputs{1}, inputs{2}, derOutputs{1});
13 | derInputs = {dX,dG};
14 | derParams = {};
15 | end
16 |
17 | function outputSizes = getOutputSizes(obj, inputSizes)
18 | xSize = inputSizes{1};
19 | gSize = inputSizes{2};
20 | outputSizes = {[gSize(1), gSize(2), xSize(3), xSize(4)]};
21 | end
22 |
23 | function obj = BilinearSampler(varargin)
24 | obj.load(varargin);
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnrelu.m:
--------------------------------------------------------------------------------
1 | classdef nnrelu < nntest
2 | properties
3 | x
4 | end
5 |
6 | methods (TestClassSetup)
7 | function data(test,device)
8 | % make sure that all elements in x are different. in this way,
9 | % we can compute numerical derivatives reliably by adding a delta < .5.
10 | x = test.randn(15,14,3,2) ;
11 | x(:) = randperm(numel(x))' ;
12 | % avoid non-diff value for test
13 | x(x==0)=1 ;
14 | test.x = x ;
15 | test.range = 10 ;
16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end
17 | end
18 | end
19 |
20 | methods (Test)
21 | function basic(test)
22 | x = test.x ;
23 | y = vl_nnrelu(x) ;
24 | dzdy = test.randn(size(y)) ;
25 | dzdx = vl_nnrelu(x,dzdy) ;
26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ;
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/renameLayer.m:
--------------------------------------------------------------------------------
1 | function renameLayer(obj, oldName, newName, varargin)
2 | %RENAMELAYER Rename a layer
3 | % RENAMELAYER(OLDNAME, NEWNAME) changes the name of the layer
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing layer.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the layer to rename
11 | v = obj.getLayerIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such layer, nothing to do
14 | if ~opts.quiet
15 | warning('There is no layer ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing layer
21 | newNameExists = any(strcmp(newName, {obj.layers.name})) ;
22 | if newNameExists
23 | error('There is already a layer ''%s''.', newName) ;
24 | end
25 |
26 | % Replace oldName with newName in all the layers
27 | obj.layers(v).name = newName ;
28 | obj.rebuild() ;
29 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/move.m:
--------------------------------------------------------------------------------
1 | function move(obj, device)
2 | %MOVE Move the DagNN to either CPU or GPU
3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU.
4 | %
5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU.
6 |
7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
8 | % All rights reserved.
9 | %
10 | % This file is part of the VLFeat library and is made available under
11 | % the terms of the BSD license (see the COPYING file).
12 |
13 | obj.reset() ;
14 | obj.device = device ;
15 | switch device
16 | case 'gpu'
17 | for i=1:numel(obj.params)
18 | obj.params(i).value = gpuArray(obj.params(i).value) ;
19 | end
20 | case 'cpu'
21 | for i=1:numel(obj.params)
22 | obj.params(i).value = gather(obj.params(i).value) ;
23 | end
24 | otherwise
25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ;
26 | end
27 | for l = 1:numel(obj.layers)
28 | obj.layers(l).block.move(device) ;
29 | end
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_test_economic_relu.m:
--------------------------------------------------------------------------------
1 | % VL_TEST_ECONOMIC_RELU
2 | function vl_test_economic_relu()
3 |
4 | x = randn(11,12,8,'single');
5 | w = randn(5,6,8,9,'single');
6 | b = randn(1,9,'single') ;
7 |
8 | net.layers{1} = struct('type', 'conv', ...
9 | 'filters', w, ...
10 | 'biases', b, ...
11 | 'stride', 1, ...
12 | 'pad', 0);
13 | net.layers{2} = struct('type', 'relu') ;
14 |
15 | res = vl_simplenn(net, x) ;
16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ;
17 | clear res ;
18 |
19 | res_ = vl_simplenn(net, x, dzdy) ;
20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ;
21 |
22 | a=whos('res_') ;
23 | b=whos('res__') ;
24 | assert(a.bytes > b.bytes) ;
25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ;
26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ;
27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ;
28 |
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/src/timer.c:
--------------------------------------------------------------------------------
1 | #include "lbfgsb.h"
2 | #include
3 |
4 | int timer(double *ttime)
5 | {
6 | clock_t temp;
7 |
8 | /* this routine computes cpu time in double precision; it makes use of */
9 | /* the intrinsic f90 cpu_time therefore a conversion type is */
10 | /* needed. */
11 |
12 | /* j.l morales departamento de Matematicas, */
13 | /* instituto Tecnologico Autonomo de Mexico */
14 | /* mexico D.F. */
15 |
16 | /* j.l nocedal department of Electrical Engineering and */
17 | /* computer Science. */
18 | /* northwestern University. Evanston, IL. USA */
19 |
20 | /* january 21, 2011 */
21 |
22 | /* temp = (real) (*ttime); */
23 | /* *ttime = (double) temp; */
24 | temp = clock();
25 | *ttime = ((double) temp)/CLOCKS_PER_SEC;
26 | return 0;
27 | } /* timer */
28 |
29 |
--------------------------------------------------------------------------------
/CalcStyleErrorLoss.m:
--------------------------------------------------------------------------------
1 | function [styleErrorLoss, styleGrads] = CalcStyleErrorLoss(tgtStyleFeatures, srcGramMats, params)
2 |
3 | numFeatLayers = length(params.styleMatchLayerInds);
4 | styleGrads = cell(numFeatLayers,1);
5 | styleErrorLoss = 0;
6 | for k=1:numFeatLayers
7 | szFeatMat = size(tgtStyleFeatures{k});
8 | if(length(szFeatMat)==2) szFeatMat(3)=1; end
9 | [tgtGram,tgtFeatVec] = CalcGramMatrix(tgtStyleFeatures{k});
10 | srcGram = srcGramMats{k};
11 |
12 | M = prod(szFeatMat(1:2));
13 | N = szFeatMat(3);
14 |
15 | srcGram = srcGram/M;
16 | tgtGram = tgtGram/M;
17 |
18 | styleDiff = tgtGram-srcGram;
19 |
20 | styleDiffWeight = params.styleFeatureWeights(k);
21 | styleErrorLoss = styleErrorLoss+(styleDiffWeight/4)*(sum((styleDiff(:)).^2))/N^2;
22 |
23 | aux=(tgtFeatVec'*styleDiff)'/(M*N^2);
24 | styleGrads{k} = styleDiffWeight*reshape(single(aux'),szFeatMat);
25 | end
26 |
27 | end
--------------------------------------------------------------------------------
/CalcACorrTensor.m:
--------------------------------------------------------------------------------
1 | function [ACorrTensor] = CalcACorrTensor(featMat)
2 | NumOfFeatures = size(featMat,3);
3 | SizeOfFeatures = [size(featMat,1) size(featMat,2)];
4 | ACorrTensor = zeros(size(featMat));
5 |
6 | WeightsOutX = [round(size(featMat,1)/2):size(featMat,1) size(featMat,1)-1:-1:round(size(featMat,1)/2)];
7 |
8 | [WeightsOutX,WeightsOutY] = meshgrid(WeightsOutX,WeightsOutX);
9 |
10 | for IndN = 1:NumOfFeatures
11 | DummyACorr = xcorr2(featMat(:,:,IndN));
12 | MidCoord = ceil(size(DummyACorr)./2);
13 | DummyACorr = DummyACorr(MidCoord(1)-floor(SizeOfFeatures(1)/2):MidCoord(1)-floor(SizeOfFeatures(1)/2)+SizeOfFeatures(1)-1,...
14 | MidCoord(2)-floor(SizeOfFeatures(2)/2):MidCoord(2)-floor(SizeOfFeatures(2)/2)+SizeOfFeatures(2)-1);
15 | DummyACorr = DummyACorr./(WeightsOutX.*WeightsOutY);
16 |
17 | ACorrTensor(:,:,IndN) = DummyACorr;
18 | end
19 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnbnorm.m:
--------------------------------------------------------------------------------
1 | classdef nnbnorm < nntest
2 | properties (TestParameter)
3 | rows = {2 8 13}
4 | cols = {2 8 17}
5 | numDims = {1 3 4}
6 | batchSize = {2 7}
7 | end
8 | methods (Test)
9 | function basic(test, rows, cols, numDims, batchSize)
10 | r = rows ;
11 | c = cols ;
12 | nd = numDims ;
13 | bs = batchSize ;
14 | x = test.randn(r, c, nd, bs) ;
15 | g = test.randn(1, 1, nd, 1) ;
16 | b = test.randn(1, 1, nd, 1) ;
17 | g = test.randn(nd, 1) ;
18 | b = test.randn(nd, 1) ;
19 |
20 | y = vl_nnbnorm(x,g,b) ;
21 | dzdy = test.randn(size(y)) ;
22 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ;
23 |
24 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-2, -1e-3) ;
25 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, test.range * 1e-2, -1e-3) ;
26 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, test.range * 1e-2, -1e-3) ;
27 | end
28 | end
29 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbias.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias.hpp
2 | // @brief Bias block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias__
14 | #define __vl__nnbias__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::Error
21 | nnbias_forward(vl::Context& context,
22 | vl::Tensor output, double outputMult,
23 | vl::Tensor data, double dataMult,
24 | vl::Tensor biases, double biasesMult) ;
25 |
26 | vl::Error
27 | nnbias_backward(vl::Context& context,
28 | vl::Tensor derData, double derDataMult,
29 | vl::Tensor derBiases, double derBiasesMult,
30 | vl::Tensor derOutput, double derOutputMult) ;
31 | }
32 |
33 | #endif /* defined(__vl__nnbias__) */
34 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnnoffset.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnoffset(x, param, dzdy)
2 | %VL_NNNOFFSET CNN norm-dependent offset.
3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the
4 | % weighted norm of the feature channels:
5 | %
6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2)
7 | %
8 | % where
9 | %
10 | % L(i,j) = sum_K X(i,j,k)^2
11 | %
12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the
13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
14 | % as X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | L = sum(x.^2,3) ;
23 | L = max(L, 1e-8) ;
24 |
25 | if nargin <= 2
26 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ;
27 | else
28 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ;
29 | end
30 |
--------------------------------------------------------------------------------
/GetGrad.m:
--------------------------------------------------------------------------------
1 | function [Grad] = GetGrad(net,curTgtImg,SrcImg,srcMats,params)
2 | global SavedDataPrevDeltas;
3 | if(params.lbfgs_opts.autoconverge==1 && numel(SavedDataPrevDeltas)>=params.lbfgs_opts.autoconvergelen+1)
4 | if(prod(SavedDataPrevDeltas(end-params.lbfgs_opts.autoconvergelen:end)<=params.lbfgs_opts.autoconvergethresh))
5 | Grad = zeros(size(curTgtImg(:)));
6 | return;
7 | end
8 | end
9 |
10 | curTgtImg = reshape(curTgtImg,[round(params.USFac*size(SrcImg,1)),round(params.USFac*size(SrcImg,2)),size(SrcImg,3)]);
11 | [tgtFeatures.styleFeatures, tgtFeatures.ACorrFeatures, tgtFeatures.DiversityFeatures, tgtFeatures.SmoothnessFeatures] = CalcNetFeatures(net, curTgtImg, params);
12 |
13 | [~,CombinedGrads] = CalcErrorLoss(srcMats,tgtFeatures,params, 0);
14 |
15 | [Grad] = DoBackProp(net, CombinedGrads, curTgtImg, params);
16 | Grad = double(Grad(:));
17 |
18 |
19 | end
20 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_setupnn.m:
--------------------------------------------------------------------------------
1 | function vl_setupnn()
2 | %VL_SETUPNN Setup the MatConvNet toolbox.
3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path.
4 |
5 | % Copyright (C) 2014-15 Andrea Vedaldi.
6 | % All rights reserved.
7 | %
8 | % This file is part of the VLFeat library and is made available under
9 | % the terms of the BSD license (see the COPYING file).
10 |
11 | root = vl_rootnn() ;
12 | addpath(fullfile(root, 'matlab')) ;
13 | addpath(fullfile(root, 'matlab', 'mex')) ;
14 | addpath(fullfile(root, 'matlab', 'simplenn')) ;
15 | addpath(fullfile(root, 'matlab', 'xtest')) ;
16 | addpath(fullfile(root, 'examples')) ;
17 |
18 | if ~exist('gather')
19 | warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ;
20 | addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ;
21 | end
22 |
23 | if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0
24 | warning('MatConvNet is not compiled. Consider running `vl_compilenn`.');
25 | end
26 |
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/Matlab/Makefile:
--------------------------------------------------------------------------------
1 | # Feb 2015, Stephen Becker
2 | # Run this, or run the Matlab script
3 | # (on Windows, the Matlab script is probably easier)
4 | # Assumes you have gcc
5 |
6 |
7 | MATLAB = $(shell matlab -e | sed -n 's/MATLAB=//p')
8 | CC = $(MATLAB)/bin/mex
9 | #CC = mex # Can be mistaken for pdftex
10 | FLAGS = -largeArrayDims -lm
11 | FLAGS += -O -g
12 | #FLAGS += "CFLAGS='$$CFLAGS -Wall'"
13 | #-Wall -Wno-uninitialized
14 | # If you want, define or undefine the debug flag
15 | #FLAGS += -DDEBUG
16 | FLAGS += -UDEBUG
17 |
18 | SRC_DIR=../src
19 |
20 | INCLUDES = -I$(SRC_DIR)
21 |
22 | LBFGSB = $(SRC_DIR)/lbfgsb.c $(SRC_DIR)/linesearch.c \
23 | $(SRC_DIR)/subalgorithms.c $(SRC_DIR)/print.c
24 |
25 | LINPACK = $(SRC_DIR)/linpack.c
26 |
27 | BLAS = $(SRC_DIR)/miniCBLAS.c
28 | #CFLAGS += -D_USE_OPTIMIZED_BLAS -lblas
29 | #CFLAGS += -D_USE_OPTIMIZED_BLAS -lmwblas
30 |
31 | TIMER = $(SRC_DIR)/timer.c
32 |
33 | SRC = $(LBFGSB) $(LINPACK) $(BLAS) $(TIMER)
34 |
35 | all: mex
36 |
37 | mex: $(SRC) Makefile lbfgsb_wrapper.c
38 | $(CC) $(FLAGS) $(INCLUDES) lbfgsb_wrapper.c $(SRC)
39 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_test_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | function vl_test_imreadjpeg
2 | % VL_TEST_IMREADJPEG
3 |
4 | % Test basic file reading capability
5 | for t=1:6
6 | files{t} = which(sprintf('office_%d.jpg', t)) ;
7 | end
8 | ims = vl_imreadjpeg(files) ;
9 |
10 | % Test inserting a non-image file
11 | files_ = files ;
12 | files_{3} = [mfilename('fullpath') '.m'];
13 | ims_ = vl_imreadjpeg(files_) ;
14 | for t=setdiff(1:6,3)
15 | assert(isequal(ims{t},ims_{t})) ;
16 | end
17 |
18 | % Test inserting a non-esiting file
19 | files__ = files_ ;
20 | files__{4} = 'idontexist.jpg' ;
21 | ims__ = vl_imreadjpeg(files__) ;
22 | for t=setdiff(1:6,[3 4])
23 | assert(isequal(ims{t},ims__{t})) ;
24 | end
25 |
26 | for n = 1:4
27 | % Test prefetching
28 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ;
29 | ims___ = vl_imreadjpeg(files) ;
30 | assert(isequal(ims,ims___)) ;
31 |
32 | % Hardening: test prefetching, clearing mex, fetching
33 | vl_imreadjpeg(files,'prefetch') ;
34 | clear mex ;
35 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ;
36 | assert(isequal(ims,ims___)) ;
37 | end
38 |
--------------------------------------------------------------------------------
/CombineGrads.m:
--------------------------------------------------------------------------------
1 | function [grads] = CombineGrads(styleGrads, ACorrGrads, DiversityGrads, SmoothnessGrads, params)
2 |
3 | grads = cell(length(params.unitedLayerInds),1);
4 | for k=1:length(params.unitedLayerInds)
5 | grads{k}=0;
6 |
7 | auxInd = find(params.unitedLayerInds(k)==params.styleMatchLayerInds,1);
8 | if(~isempty(auxInd))
9 | grads{k} = grads{k}+params.styleLossWeight*styleGrads{auxInd};
10 | end
11 |
12 | auxInd = find(params.unitedLayerInds(k)==params.ACorrMatchLayerInds,1);
13 | if(~isempty(auxInd))
14 | grads{k} = grads{k}+params.ACorrLossWeight*single(ACorrGrads{auxInd});
15 | end
16 |
17 | auxInd = find(params.unitedLayerInds(k)==params.DiversityMatchLayerInds,1);
18 | if(~isempty(auxInd) && params.DiversityLossWeight ~= 0)
19 | grads{k} = grads{k}+params.DiversityLossWeight*single(DiversityGrads{auxInd});
20 | end
21 |
22 | auxInd = find(params.unitedLayerInds(k)==params.SmoothnessMatchLayerInds,1);
23 | if(~isempty(auxInd))
24 | grads{k} = grads{k}+params.SmoothnessLossWeight*single(SmoothnessGrads{auxInd});
25 | end
26 |
27 | end
28 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnsoftmax.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmax(X,dzdY)
2 | %VL_NNSOFTMAX CNN softmax.
3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X
4 | % has dimension H x W x D x N, packing N arrays of W x H
5 | % D-dimensional vectors.
6 | %
7 | % D can be thought of as the number of possible classes and the
8 | % function computes the softmax along the D dimension. Often W=H=1,
9 | % but this is not a requirement, as the operator is applied
10 | % convolutionally at all spatial locations.
11 | %
12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block
13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
14 | % X and Y respectively.
15 |
16 | % Copyright (C) 2014 Andrea Vedaldi.
17 | % All rights reserved.
18 | %
19 | % This file is part of the VLFeat library and is made available under
20 | % the terms of the BSD license (see the COPYING file).
21 |
22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ;
23 | L = sum(E,3) ;
24 | Y = bsxfun(@rdivide, E, L) ;
25 |
26 | if nargin <= 1, return ; end
27 |
28 | % backward
29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ;
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnnormalize.hpp:
--------------------------------------------------------------------------------
1 | // @file nnnormalize.hpp
2 | // @brief Normalization block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnnormalize__
14 | #define __vl__nnnormalize__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 |
21 | vl::Error
22 | nnlrn_forward(vl::Context& context,
23 | vl::Tensor output,
24 | vl::Tensor data,
25 | size_t normDetph,
26 | double kappa, double alpha, double beta) ;
27 |
28 | vl::Error
29 | nnlrn_backward(vl::Context& context,
30 | vl::Tensor derData,
31 | vl::Tensor data,
32 | vl::Tensor derOutput,
33 | size_t normDetph,
34 | double kappa, double alpha, double beta) ;
35 | }
36 |
37 | #endif /* defined(__vl__nnnormalize__) */
38 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnmnist.m:
--------------------------------------------------------------------------------
1 | classdef nnmnist < nntest
2 | properties (TestParameter)
3 | networkType = {'dagnn', 'simplenn'}
4 | end
5 |
6 | methods (TestClassSetup)
7 | function init(test)
8 | addpath(fullfile(vl_rootnn, 'examples', 'mnist'));
9 | end
10 | end
11 |
12 | methods (Test)
13 | function valErrorRate(test, networkType)
14 | clear mex ; % will reset GPU, remove MCN to avoid crashing
15 | % MATLAB on exit (BLAS issues?)
16 | if strcmp(test.dataType, 'double'), return ; end
17 | switch test.currentDevice
18 | case 'cpu'
19 | gpus = [];
20 | case 'gpu'
21 | gpus = 1;
22 | end
23 | trainOpts = struct('numEpochs', 1, 'continue', false, 'gpus', gpus, ...
24 | 'plotStatistics', false);
25 | if strcmp(networkType, 'simplenn')
26 | trainOpts.errorLabels = {'error', 'top5err'} ;
27 | end
28 | [~, info] = cnn_mnist('train', trainOpts, 'networkType', networkType);
29 | test.verifyLessThan(info.train.error, 0.08);
30 | test.verifyLessThan(info.val.error, 0.025);
31 | end
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/CalcACorrTensorFull.m:
--------------------------------------------------------------------------------
1 | function [ACorrTensor] = CalcACorrTensorFull(SrcfeatMat,TrgfeatMat)
2 | NumOfFeatures = size(SrcfeatMat,3);
3 | SizeOfFeatures = [size(SrcfeatMat,1) size(SrcfeatMat,2)];
4 | ACorrTensor = zeros(size(TrgfeatMat,1),size(TrgfeatMat,2),size(TrgfeatMat,3));
5 |
6 | WeightsInX = [1:SizeOfFeatures(1) SizeOfFeatures(1)-1:-1:1];
7 | [WeightsInX,WeightsInY] = meshgrid(WeightsInX,WeightsInX);
8 |
9 | for IndN = 1:NumOfFeatures
10 | DummyACorr = xcorr2(SrcfeatMat(:,:,IndN));
11 | DummyACorr = DummyACorr./(WeightsInX.*WeightsInY);
12 |
13 | MidCoord = ceil(size(DummyACorr)./2);
14 | Enlargement = floor((size(TrgfeatMat,1)-size(SrcfeatMat,1))/2);
15 | DummyACorr = DummyACorr(MidCoord(1)-floor(SizeOfFeatures(1)/2)-Enlargement:MidCoord(1)-floor(SizeOfFeatures(1)/2)+SizeOfFeatures(1)-1+Enlargement,...
16 | MidCoord(2)-floor(SizeOfFeatures(2)/2)-Enlargement:MidCoord(2)-floor(SizeOfFeatures(2)/2)+SizeOfFeatures(2)-1+Enlargement);
17 | assert(size(DummyACorr,1)==size(TrgfeatMat,1));
18 | ACorrTensor(:,:,IndN) = DummyACorr;
19 | end
20 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/nnbilinearsampler_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler_cudnn.hpp
2 | // @brief BilinearSampler CuDNN-based implementation.
3 | // @author Ankush Gupta, Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bilinearsampler_cudnn__
14 | #define __vl__bilinearsampler_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnbilinearsampler_cudnn
23 | {
24 | static vl::Error
25 | forward(Context& context,
26 | Tensor output,
27 | Tensor data,
28 | Tensor grid) ;
29 |
30 | static vl::Error
31 | backward(Context& context,
32 | Tensor derData,
33 | Tensor derGrid,
34 | Tensor data,
35 | Tensor grid,
36 | Tensor derOutput) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbilinearsampler_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbilinearsampler.hpp
2 | // @brief Bilinear sampler block
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbilinearsampler__
14 | #define __vl__nnbilinearsampler__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 | vl::Error
21 | nnbilinearsampler_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor grid) ;
25 |
26 | vl::Error
27 | nnbilinearsampler_backward(vl::Context& context,
28 | vl::Tensor derData,
29 | vl::Tensor derGrid,
30 | vl::Tensor data,
31 | vl::Tensor grid,
32 | vl::Tensor derOutput) ;
33 | }
34 |
35 | #endif /* defined(__vl__nnbilinearsampler__) */
36 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_bench_bnorm.m:
--------------------------------------------------------------------------------
1 | function vl_bench_bnorm(gpu)
2 | if nargin < 1
3 | gpu = false ;
4 | end
5 |
6 | T = 100 ;
7 | x = randn(64,64,32,32,'single') ;
8 | g = randn(32,1,'single') ;
9 | b = randn(32,1,'single') ;
10 |
11 | if gpu
12 | x = gpuArray(x) ;
13 | g = gpuArray(g) ;
14 | b = gpuArray(b) ;
15 | end
16 |
17 | tic
18 | for t=1:T
19 | y = vl_nnbnorm(x,g,b) ;
20 | end
21 | if gpu, wait(gpuDevice) ; end
22 | fprintf('new: %f\n',toc);
23 |
24 | tic
25 | for t=1:T
26 | y_ = vl_nnbnorm_old(x,g,b) ;
27 | end
28 | if gpu, wait(gpuDevice) ; end
29 | fprintf('old: %f\n',toc);
30 |
31 | dzdy = randn(size(y),'single') ;
32 | if gpu
33 | dzdy = gpuArray(dzdy) ;
34 | end
35 |
36 | tic
37 | for t=1:T
38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ;
39 | end
40 | if gpu, wait(gpuDevice) ; end
41 | fprintf('new deriv: %f\n',toc);
42 |
43 | tic
44 | for t=1:T
45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ;
46 | end
47 | if gpu, wait(gpuDevice) ; end
48 | fprintf('old deriv: %f\n',toc);
49 |
50 | vl_testsim(y,y_);
51 | vl_testsim(a,a_);
52 | vl_testsim(b,b_);
53 | vl_testsim(c,c_);
54 | end
55 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < dagnn.ElementWise
2 | properties
3 | size
4 | hasBias = true
5 | end
6 |
7 | methods
8 |
9 | function outputs = forward(obj, inputs, params)
10 | args = horzcat(inputs, params) ;
11 | outputs{1} = bsxfun(@times, args{1}, args{2}) ;
12 | if obj.hasBias
13 | outputs{1} = bsxfun(@plus, outputs{1}, args{3}) ;
14 | end
15 | end
16 |
17 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
18 | args = horzcat(inputs, params) ;
19 | sz = [size(args{2}) 1 1 1 1] ;
20 | sz = sz(1:4) ;
21 | dargs{1} = bsxfun(@times, derOutputs{1}, args{2}) ;
22 | dargs{2} = derOutputs{1} .* args{1} ;
23 | for k = find(sz == 1)
24 | dargs{2} = sum(dargs{2}, k) ;
25 | end
26 | if obj.hasBias
27 | dargs{3} = derOutputs{1} ;
28 | for k = find(sz == 1)
29 | dargs{3} = sum(dargs{3}, k) ;
30 | end
31 | end
32 | derInputs = dargs(1:numel(inputs)) ;
33 | derParams = dargs(numel(inputs)+(1:numel(params))) ;
34 | end
35 |
36 | function obj = Scale(varargin)
37 | obj.load(varargin) ;
38 | end
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/normalize.hpp:
--------------------------------------------------------------------------------
1 | // @file normalize.hpp
2 | // @brief Normalize block implementation
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__normalize__
14 | #define __vl__normalize__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct lrn
23 | {
24 | static vl::Error
25 | forward(type* output,
26 | type const* data,
27 | size_t height, size_t width, size_t depth, size_t size,
28 | size_t normDetph,
29 | type kappa, type alpha, type beta) ;
30 |
31 | static vl::Error
32 | backward(type* derData,
33 | type const* data,
34 | type const* derOutput,
35 | size_t height, size_t width, size_t depth, size_t size,
36 | size_t normDetph,
37 | type kappa, type alpha, type beta) ;
38 | } ;
39 |
40 | } }
41 |
42 | #endif /* __vl__normalize__ */
43 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/nnbias_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbias_blas.hpp
2 | // @brief biasolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnbias_cudnn__
14 | #define __vl__nnbias_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | // todo: data type should be handled internally?
22 |
23 | template
24 | struct nnbias_cudnn
25 | {
26 | static vl::Error
27 | forward(vl::Context& context,
28 | vl::Tensor output, double outputMult,
29 | vl::Tensor data, double dataMult,
30 | vl::Tensor biases, double biasesMult) ;
31 |
32 | static vl::Error
33 | backward(vl::Context& context,
34 | vl::Tensor derData, double derDataMult,
35 | vl::Tensor derBiases, double derBiasesMult,
36 | vl::Tensor derOutput, double derOutputMult) ;
37 | } ;
38 |
39 | } }
40 |
41 | #endif /* defined(__vl__nnbias_cudnn__) */
42 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnfullyconnected.hpp:
--------------------------------------------------------------------------------
1 | // @file nnfullyconnected.hpp
2 | // @brief Fully-connected block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 |
14 | #ifndef __vl__nnfullyconnected__
15 | #define __vl__nnfullyconnected__
16 |
17 | #include "data.hpp"
18 |
19 | namespace vl {
20 |
21 | vl::Error
22 | nnfullyconnected_forward(vl::Context& context,
23 | vl::Tensor output,
24 | vl::Tensor data,
25 | vl::Tensor filters,
26 | vl::Tensor biases) ;
27 |
28 | vl::Error
29 | nnfullyconnected_backward(vl::Context& context,
30 | vl::Tensor derData,
31 | vl::Tensor derFilters,
32 | vl::Tensor derBiases,
33 | vl::Tensor data,
34 | vl::Tensor filters,
35 | vl::Tensor derOutput) ;
36 | }
37 |
38 |
39 | #endif /* defined(__vl__nnfullyconnected__) */
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnnormalizelp.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnnormalizelp(x,dzdy,varargin)
2 | %VL_NNNORMALIZELP CNN Lp normalization
3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial
4 | % location in the array X:
5 | %
6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p)
7 | %
8 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the
9 | % function with respect to X projected onto DZDY.
10 | %
11 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options:
12 | %
13 | % `p`:: 2
14 | % The exponent of the Lp norm. Warning: currently only even
15 | % exponents are supported.
16 | %
17 | % `epsilon`: 0.01
18 | % The constant added to the sum of p-powers before taking the
19 | % 1/p square root (see the formula above).
20 | %
21 | % See also: VL_NNNORMALIZE().
22 |
23 | opts.epsilon = 1e-2 ;
24 | opts.p = 2 ;
25 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
26 |
27 | massp = (sum(x.^opts.p,3) + opts.epsilon) ;
28 | mass = massp.^(1/opts.p) ;
29 | y = bsxfun(@rdivide, x, mass) ;
30 |
31 | if nargin < 2 || isempty(dzdy)
32 | return ;
33 | else
34 | dzdy = bsxfun(@rdivide, dzdy, mass) ;
35 | y = dzdy - bsxfun(@times, sum(dzdy .* x, 3), bsxfun(@rdivide, x.^(opts.p-1), massp)) ;
36 | end
37 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matconvnet.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 14
4 | VisualStudioVersion = 14.0.24720.0
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "matconvnet", "matconvnet.vcxproj", "{B0BD9132-1D90-4267-A07A-B44DE497A9C7}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Win32 = Debug|Win32
11 | Debug|x64 = Debug|x64
12 | Release|Win32 = Release|Win32
13 | Release|x64 = Release|x64
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.ActiveCfg = Debug|Win32
17 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.Build.0 = Debug|Win32
18 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|x64.ActiveCfg = Debug|Win32
19 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.ActiveCfg = Release|Win32
20 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.Build.0 = Release|Win32
21 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|x64.ActiveCfg = Release|Win32
22 | EndGlobalSection
23 | GlobalSection(SolutionProperties) = preSolution
24 | HideSolutionNode = FALSE
25 | EndGlobalSection
26 | EndGlobal
27 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnsubsample.hpp:
--------------------------------------------------------------------------------
1 | // @file nnsubsample.hpp
2 | // @brief Subsamping block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnsubsample__
14 | #define __vl__nnsubsample__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | vl::Error
21 | nnsubsample_forward(vl::Context& context,
22 | vl::Tensor output,
23 | vl::Tensor data,
24 | vl::Tensor biases,
25 | int strideY, int strideX,
26 | int padTop, int padBottom,
27 | int padLeft, int padRight) ;
28 |
29 | vl::Error
30 | nnsubsample_backward(vl::Context& context,
31 | vl::Tensor derData,
32 | vl::Tensor derBiases,
33 | vl::Tensor derOutput,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 | }
38 |
39 | #endif /* defined(__vl__nnsubsample__) */
40 |
--------------------------------------------------------------------------------
/CalcACorrErrorLoss.m:
--------------------------------------------------------------------------------
1 | function [ACorrErrorLoss, ACorrGrads] = CalcACorrErrorLoss(TargetFeatures, SourceACorrs, params)
2 |
3 | numFeatLayers = length(params.ACorrMatchLayerInds);
4 | ACorrGrads = cell(numFeatLayers,1);
5 | ACorrErrorLoss = 0;
6 | for k=1:numFeatLayers
7 | TargetACorr = CalcACorrTensor(TargetFeatures{k});
8 | SourceACorr = SourceACorrs{k};
9 |
10 | ACorrDiff = TargetACorr-SourceACorr;
11 |
12 | ACorrDiffWeight = params.ACorrFeatureWeights(k);
13 | ACorrErrorLoss = ACorrErrorLoss+ACorrDiffWeight*(sum((ACorrDiff(:)).^2));
14 |
15 | ACorrGrads{k} = ACorrDiffWeight*CalcACorrGrad(TargetFeatures{k},ACorrDiff);
16 | end
17 |
18 | end
19 |
20 | function [ACorrGrad] = CalcACorrGrad(featMat,ACorrDiff)
21 | NumOfFeatures = size(featMat,3);
22 | SizeOfFeatures = [size(featMat,1) size(featMat,2)];
23 | ACorrGrad = zeros(size(featMat));
24 |
25 | WeightsOutX = [round(size(featMat,1)/2):size(featMat,1) size(featMat,1)-1:-1:round(size(featMat,1)/2)];
26 | [WeightsOutX,WeightsOutY] = meshgrid(WeightsOutX,WeightsOutX);
27 |
28 | for IndN = 1:NumOfFeatures
29 | ACorrGrad(:,:,IndN) = 4*conv2(double(flipud(fliplr(ACorrDiff(:,:,IndN))))./(WeightsOutX.*WeightsOutY),double(featMat(:,:,IndN)),'same');
30 | end
31 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/copy_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file copy_cpu.cpp
2 | // @brief Copy and other data operations (CPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include
15 |
16 | namespace vl { namespace impl {
17 |
18 | template
19 | struct operations
20 | {
21 | typedef type data_type ;
22 |
23 | static vl::Error
24 | copy(data_type * dest,
25 | data_type const * src,
26 | size_t numElements)
27 | {
28 | memcpy(dest, src, numElements * sizeof(data_type)) ;
29 | return vlSuccess ;
30 | }
31 |
32 | static vl::Error
33 | fill(data_type * dest,
34 | size_t numElements,
35 | data_type value)
36 | {
37 | for (size_t k = 0 ; k < numElements ; ++k) {
38 | dest[k] = value ;
39 | }
40 | return vlSuccess ;
41 | }
42 | } ;
43 |
44 | } }
45 |
46 | template struct vl::impl::operations ;
47 |
48 | #ifdef ENABLE_DOUBLE
49 | template struct vl::impl::operations ;
50 | #endif
51 |
52 |
53 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/DropOut.m:
--------------------------------------------------------------------------------
1 | classdef DropOut < dagnn.ElementWise
2 | properties
3 | rate = 0.5
4 | frozen = false
5 | end
6 |
7 | properties (Transient)
8 | mask
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if strcmp(obj.net.mode, 'test')
14 | outputs = inputs ;
15 | return ;
16 | end
17 | if obj.frozen & ~isempty(obj.mask)
18 | outputs{1} = vl_nndropout(inputs{1}, 'mask', obj.mask) ;
19 | else
20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ;
21 | end
22 | end
23 |
24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
25 | if strcmp(obj.net.mode, 'test')
26 | derInputs = derOutputs ;
27 | derParams = {} ;
28 | return ;
29 | end
30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ;
31 | derParams = {} ;
32 | end
33 |
34 | % ---------------------------------------------------------------------
35 | function obj = DropOut(varargin)
36 | obj.load(varargin{:}) ;
37 | end
38 |
39 | function obj = reset(obj)
40 | reset@dagnn.ElementWise(obj) ;
41 | obj.mask = [] ;
42 | obj.frozen = false ;
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nncrop.m:
--------------------------------------------------------------------------------
1 | function y = vl_nncrop(x, crop, dzdy, inputSize)
2 | %VL_NNCROP CNN crop.
3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the
4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT].
5 | %
6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the
7 | % function projected on the output derivative DZDY. DZDX has the same
8 | % dimension as X and DZDY the same dimension as Y.
9 | %
10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to
11 | % the previous call in which X is omitted and its size is passed as
12 | % INPUTSIZE.
13 |
14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi.
15 | % All rights reserved.
16 | %
17 | % This file is part of the VLFeat library and is made available under
18 | % the terms of the BSD license (see the COPYING file).
19 |
20 | if nargin < 4
21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ;
22 | else
23 | sz = inputSize ;
24 | end
25 |
26 | sv = 1 + crop(1) : sz(1) - crop(2) ;
27 | su = 1 + crop(3) : sz(2) - crop(4) ;
28 |
29 | if nargin <= 2 || isempty(dzdy)
30 | y = x(sv, su, :, :) ;
31 | else
32 | if isa(dzdy, 'gpuArray')
33 | y = gpuArray.zeros(sz, classUnderlying(dzdy)) ;
34 | else
35 | y = zeros(sz, class(dzdy)) ;
36 | end
37 | y(sv, su, :, :) = dzdy ;
38 | end
39 |
--------------------------------------------------------------------------------
/CalcNetFeatures.m:
--------------------------------------------------------------------------------
1 | function [styleFeatures, ACorrFeatures,DiversityFeatures,SmoothnessFeatures] = CalcNetFeatures(net, img, params)
2 |
3 | truncatedNet = net;
4 | truncatedNet.layers = net.layers(1:params.lastLayerNum);
5 |
6 | dzdy = [];
7 | res = [];
8 |
9 | newRes = vl_simplenn(truncatedNet,single(img),dzdy,res,...
10 | 'accumulate', false, ...
11 | 'mode', 'test', ...
12 | 'conserveMemory', 0, ...
13 | 'backPropDepth', 1, ...
14 | 'sync', 0, ...
15 | 'cudnn', 0) ;
16 | styleFeatures = {};
17 | for k=1:length(params.styleMatchLayerInds)
18 | curLayerInd = params.styleMatchLayerInds(k);
19 | styleFeatures{k} = newRes(curLayerInd+1).x;
20 | end
21 |
22 | ACorrFeatures = {};
23 | for k=1:length(params.ACorrMatchLayerInds)
24 | curLayerInd = params.ACorrMatchLayerInds(k);
25 | ACorrFeatures{k} = newRes(curLayerInd+1).x;
26 | end
27 |
28 | DiversityFeatures = {};
29 | for k=1:length(params.DiversityMatchLayerInds)
30 | curLayerInd = params.DiversityMatchLayerInds(k);
31 | DiversityFeatures{k} = newRes(curLayerInd+1).x;
32 | end
33 |
34 | SmoothnessFeatures = {};
35 | for k=1:length(params.SmoothnessMatchLayerInds)
36 | curLayerInd = params.SmoothnessMatchLayerInds(k);
37 | SmoothnessFeatures{k} = newRes(curLayerInd+1).x;
38 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_test_gpureset.m:
--------------------------------------------------------------------------------
1 | for explictMexReset = [false]
2 |
3 | % reset the same GPU device
4 | for t = 1:6
5 | if explictMexReset, clear mex ; end
6 | if mod(t-1,2) == 0
7 | disp('vl_test_gpureset: resetting GPU') ;
8 | gpuDevice(1) ;
9 | else
10 | disp('vl_test_gpureset: not resetting GPU') ;
11 | end
12 | if t > 1, disp(a) ; end
13 | a = gpuArray(single(ones(10))) ;
14 | b = gpuArray(single(ones(5))) ;
15 | c = vl_nnconv(a,b,[],'nocudnn') ;
16 | end
17 |
18 | % resetting GPU arguments to a MEX file should fail properly
19 | a = gpuArray(single(ones(10))) ;
20 | b = gpuArray(single(ones(5))) ;
21 | c = vl_nnconv(a,b,[],'nocudnn') ;
22 |
23 | gpuDevice(1) ;
24 | disp(a) ;
25 | try
26 | c = vl_nnconv(a,b,[],'nocudnn') ;
27 | catch e
28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ;
29 | end
30 |
31 | % switch GPU devices
32 | if gpuDeviceCount > 1
33 | disp('vl_text_gpureset: test switching GPU device') ;
34 | for t = 1:gpuDeviceCount
35 | if explictMexReset, clear mex ; end
36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ;
37 | gpuDevice(t) ;
38 | a = gpuArray(single(ones(10))) ;
39 | b = gpuArray(single(ones(5))) ;
40 | c = vl_nnconv(a,b,[],'nocudnn') ;
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/src/Makefile:
--------------------------------------------------------------------------------
1 | # Feb 17 2015, converted fortran to C
2 | # Stephen Becker, stephen.becker@colorado.edu
3 |
4 |
5 | FC = gfortran
6 | CC = gcc
7 | #CC = gcc-4.9
8 |
9 | #FFLAGS = -O -Wall -fbounds-check -g -Wno-uninitialized
10 | #FFLAGS += -fdefault-integer-8
11 | CFLAGS = -O3 -Wall -Wno-uninitialized
12 | #CFLAGS = -g -Wall -lm -DDEBUG -Wuninitialized -Wextra -Wno-unused-parameter
13 | LDFLAGS = -lm
14 |
15 | # my f2c.h is here:
16 | INCLUDES = -I/usr/local/include -I./
17 |
18 | DRIVER1_77 = driver1.c
19 | DRIVER2_77 = driver2.c
20 | DRIVER3_77 = driver3.c
21 |
22 | LBFGSB = lbfgsb.c linesearch.c subalgorithms.c print.c
23 |
24 | LINPACK = linpack.c
25 |
26 | BLAS = miniCBLAS.c
27 | #CFLAGS += -D_USE_OPTIMIZED_BLAS -lblas
28 |
29 | TIMER = timer.c
30 |
31 | SRC = $(LBFGSB) $(LINPACK) $(BLAS) $(TIMER) $(F2CFILES)
32 |
33 | default: all test_1
34 | all : lbfgsb_77_1 lbfgsb_77_2 lbfgsb_77_3
35 |
36 | lbfgsb_77_1 : $(DRIVER1_77C) $(SRC)
37 | $(CC) $(CFLAGS) $(DRIVER1_77) $(SRC) $(LDFLAGS) -o x.lbfgsb_77_1
38 |
39 | lbfgsb_77_2 : $(DRIVER2_77C) $(LBFGSB) $(LINPACK) $(BLAS) $(TIMER)
40 | $(CC) $(CFLAGS) $(DRIVER2_77) $(SRC) $(LDFLAGS) -o x.lbfgsb_77_2
41 |
42 | lbfgsb_77_3 : $(DRIVER2_77C) $(LBFGSB) $(LINPACK) $(BLAS) $(TIMER)
43 | $(CC) $(CFLAGS) $(DRIVER3_77) $(SRC) $(LDFLAGS) -o x.lbfgsb_77_3
44 |
45 | test_1 : x.lbfgsb_77_1
46 | ./x.lbfgsb_77_1
47 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/subsample.hpp:
--------------------------------------------------------------------------------
1 | // @file subsampling.hpp
2 | // @brief Subsampling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_NNSUBSAMPLE_H
15 | #define VL_NNSUBSAMPLE_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct subsample {
24 |
25 | static vl::Error
26 | forward(vl::Context& context,
27 | type* output,
28 | type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
32 |
33 | static vl::Error
34 | backward(vl::Context& context,
35 | type* derData,
36 | type const* derOutput,
37 | size_t height, size_t width, size_t depth,
38 | size_t strideY, size_t strideX,
39 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
40 | } ;
41 |
42 | } }
43 |
44 | #endif /* defined(VL_NNSUBSAMPLE_H) */
45 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Pooling.m:
--------------------------------------------------------------------------------
1 | classdef Pooling < dagnn.Filter
2 | properties
3 | method = 'max'
4 | poolSize = [1 1]
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(self, inputs, params)
10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ...
11 | 'pad', self.pad, ...
12 | 'stride', self.stride, ...
13 | 'method', self.method, ...
14 | self.opts{:}) ;
15 | end
16 |
17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs)
18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ...
19 | 'pad', self.pad, ...
20 | 'stride', self.stride, ...
21 | 'method', self.method, ...
22 | self.opts{:}) ;
23 | derParams = {} ;
24 | end
25 |
26 | function kernelSize = getKernelSize(obj)
27 | kernelSize = obj.poolSize ;
28 | end
29 |
30 | function outputSizes = getOutputSizes(obj, inputSizes)
31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
32 | outputSizes{1}(3) = inputSizes{1}(3) ;
33 | end
34 |
35 | function obj = Pooling(varargin)
36 | obj.load(varargin) ;
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/addLayer.m:
--------------------------------------------------------------------------------
1 | function addLayer(obj, name, block, inputs, outputs, params, varargin)
2 | %ADDLAYER Adds a layer to a DagNN
3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the
4 | % specified layer to the network. NAME is a string with the layer
5 | % name, used as a unique indentifier. BLOCK is the object
6 | % implementing the layer, which should be a subclass of the
7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and
8 | % PARAMS of parameter names.
9 | %
10 | % See Also REMOVELAYER().
11 | opts.skipRebuild = false;
12 | opts = vl_argparse(opts, varargin);
13 |
14 | index = find(strcmp(name, {obj.layers.name})) ;
15 | if ~isempty(index), error('There is already a layer with name ''%s''.', name), end
16 | index = numel(obj.layers) + 1 ;
17 |
18 | if nargin < 6, params = {} ; end
19 | if ischar(inputs), inputs = {inputs} ; end
20 | if ischar(outputs), outputs = {outputs} ; end
21 | if ischar(params), params = {params} ; end
22 |
23 | obj.layers(index) = struct(...
24 | 'name', {name}, ...
25 | 'inputs', {inputs}, ...
26 | 'outputs', {outputs}, ...
27 | 'params', {params}, ...
28 | 'inputIndexes', {[]}, ...
29 | 'outputIndexes', {[]}, ...
30 | 'paramIndexes', {[]}, ...
31 | 'forwardTime', {[]}, ...
32 | 'backwardTime', {[]}, ...
33 | 'block', {block}) ;
34 | obj.layers(index).block.attach(obj, index) ;
35 | if ~opts.skipRebuild, obj.rebuild() ; end;
36 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/im2row.hpp:
--------------------------------------------------------------------------------
1 | // @file im2row.hpp
2 | // @brief Stack image patches as matrix rows
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__im2row__
14 | #define __vl__im2row__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct im2row {
23 |
24 | static vl::Error
25 | forward(vl::Context& context,
26 | type* stacked,
27 | type const* data,
28 | size_t height, size_t width, size_t depth,
29 | size_t windowHeight, size_t windowWidth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
32 |
33 | static vl::Error
34 | backward(vl::Context& context,
35 | type* data,
36 | type const* stacked,
37 | size_t height, size_t width, size_t depth,
38 | size_t windowHeight, size_t windowWidth,
39 | size_t strideY, size_t strideX,
40 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
41 | } ;
42 |
43 | } }
44 |
45 | #endif /* defined(__vl__im2row__) */
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnspnorm.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnspnorm(x, param, dzdy)
2 | %VL_NNSPNORM CNN spatial normalization.
3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of
4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and
5 | % PW define the size of the spatial neighbourhood used for
6 | % nomalization.
7 | %
8 | % For each feature channel, the function computes the sum of squares
9 | % of X inside each rectangle, N2(i,j). It then divides each element
10 | % of X as follows:
11 | %
12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA.
13 | %
14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the
15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
16 | % as X and Y respectively.
17 |
18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | pad = floor((param(1:2)-1)/2) ;
25 | pad = [pad ; param(1:2)-1-pad] ;
26 |
27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ;
28 | f = 1 + param(3) * n2 ;
29 |
30 | if nargin <= 2 || isempty(dzdy)
31 | y = f.^(-param(4)) .* x ;
32 | else
33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ;
34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ;
35 | end
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/nnconv_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv_blas.hpp
2 | // @brief Convolution block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnconv_cudnn__
14 | #define __vl__nnconv_cudnn__
15 |
16 | #include "../data.hpp"
17 | #include "cudnn.h"
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct nnconv_cudnn
23 | {
24 | static vl::Error
25 | forward(Context& context,
26 | Tensor output, double outputMult,
27 | Tensor data, double dataMult,
28 | Tensor filters,
29 | Tensor biases,
30 | int strideX, int strideY,
31 | int padLeft, int padRight,
32 | int padTop, int padBottom) ;
33 |
34 | static vl::Error
35 | backward(Context& context,
36 | Tensor derData,
37 | Tensor derFilters,
38 | Tensor derBiases,
39 | Tensor data,
40 | Tensor filters,
41 | Tensor derOutput,
42 | int strideX, int strideY,
43 | int padLeft, int padRight,
44 | int padTop, int padBottom) ;
45 | } ;
46 |
47 | } }
48 | #endif /* defined(__vl__nnconv_cudnn__) */
49 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnconcat.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin)
2 | %VL_NNCONCAT CNN concatenate multiple inputs.
3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell
4 | % array INPUTS along dimension DIM generating an output Y.
5 | %
6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives
7 | % of the block projected onto DZDY. DZDINPUTS has one element for
8 | % each element of INPUTS, each of which is an array that has the same
9 | % dimensions of the corresponding array in INPUTS.
10 |
11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
12 | % All rights reserved.
13 | %
14 | % This file is part of the VLFeat library and is made available under
15 | % the terms of the BSD license (see the COPYING file).
16 |
17 | opts.inputSizes = [] ;
18 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
19 |
20 | if nargin < 2, dim = 3; end;
21 | if nargin < 3, dzdy = []; end;
22 |
23 | if isempty(dzdy)
24 | y = cat(dim, inputs{:});
25 | else
26 | if isempty(opts.inputSizes)
27 | opts.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
28 | end
29 | start = 1 ;
30 | y = cell(1, numel(opts.inputSizes)) ;
31 | s.type = '()' ;
32 | s.subs = {':', ':', ':', ':'} ;
33 | for i = 1:numel(opts.inputSizes)
34 | stop = start + opts.inputSizes{i}(dim) ;
35 | s.subs{dim} = start:stop-1 ; ;
36 | y{i} = subsref(dzdy,s) ;
37 | start = stop ;
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/simplenn/vl_simplenn_move.m:
--------------------------------------------------------------------------------
1 | function net = vl_simplenn_move(net, destination)
2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU.
3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the
4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the
5 | % network to the CPU.
6 | %
7 | % See also: VL_SIMPLENN().
8 |
9 | % Copyright (C) 2014-15 Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | switch destination
16 | case 'gpu', moveop = @(x) gpuArray(x) ;
17 | case 'cpu', moveop = @(x) gather(x) ;
18 | otherwise, error('Unknown destination ''%s''.', destination) ;
19 | end
20 | for l=1:numel(net.layers)
21 | switch net.layers{l}.type
22 | case {'conv', 'convt', 'bnorm'}
23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'}
24 | f = char(f) ;
25 | if isfield(net.layers{l}, f)
26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ;
27 | end
28 | end
29 | for f = {'weights', 'momentum'}
30 | f = char(f) ;
31 | if isfield(net.layers{l}, f)
32 | for j=1:numel(net.layers{l}.(f))
33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ;
34 | end
35 | end
36 | end
37 | otherwise
38 | % nothing to do ?
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnnormalize.m:
--------------------------------------------------------------------------------
1 | classdef nnnormalize < nntest
2 | properties (TestParameter)
3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17}
4 | sgroup = {2 3 4 5 6 7}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, group)
9 | param = [group, .1, .5, .75] ;
10 | x = test.randn(3,2,10,4) ;
11 | y = vl_nnnormalize(x,param) ;
12 | dzdy = test.rand(size(y))-0.5 ;
13 | dzdx = vl_nnnormalize(x,param,dzdy) ;
14 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ;
15 | end
16 |
17 | function compare_to_naive(test, sgroup)
18 | param = [sgroup, .1, .5, .75] ;
19 | x = test.randn(3,2,10,4) ;
20 | y = vl_nnnormalize(gather(x),param) ;
21 | y_ = test.zeros(size(y)) ;
22 | x_ = gather(x) ;
23 | for i=1:size(x,1)
24 | for j=1:size(x,2)
25 | for n=1:size(x,4)
26 | t = test.zeros(1,1,size(x,3),1) ;
27 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ...
28 | ones(param(1),1), 'same')).^(-param(4)) ;
29 | y_(i,j,:,n) = x_(i,j,:,n) .* t ;
30 | end
31 | end
32 | end
33 | test.eq(y,y_) ;
34 | end
35 |
36 | function l2(test)
37 | x = test.randn(1,1,10,1) ;
38 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ;
39 | test.eq(sum(y(:).^2), test.toDataType(1), 1e-2) ;
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnpooling.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling.hpp
2 | // @brief Pooling block
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling__
14 | #define __vl__nnpooling__
15 |
16 | #include "data.hpp"
17 | #include
18 |
19 | namespace vl {
20 |
21 | enum PoolingMethod { vlPoolingMax, vlPoolingAverage } ;
22 |
23 | vl::Error
24 | nnpooling_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor data,
27 | PoolingMethod method,
28 | int poolHeight, int poolWidth,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | vl::Error
34 | nnpooling_backward(vl::Context& context,
35 | vl::Tensor derData,
36 | vl::Tensor data,
37 | vl::Tensor derOutput,
38 | PoolingMethod method,
39 | int poolHeight, int poolWidth,
40 | int strideY, int strideX,
41 | int padTop, int padBottom,
42 | int padLeft, int padRight) ;
43 | }
44 |
45 | #endif /* defined(__vl__nnpooling__) */
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbias.cpp:
--------------------------------------------------------------------------------
1 | #ifdef ENABLE_GPU
2 | #error "The file nnsubsample.cu should be compiled instead"
3 | #endif
4 | #include "nnbias.cu"
5 |
6 | /**
7 | @brief nnbias_forward
8 | @param context context.
9 | @param output output tensor $\by$ [output].
10 | @param outputMult output tensor multiplier $\alpha$.
11 | @param data data tensor $\bx$.
12 | @param dataMult data tensor multiplier $\beta$.
13 | @param biases biases tensor $\bb$.
14 | @param biasesMult biases tensor multiplier $\gamma$.
15 |
16 | The function computes
17 | @f[
18 | y_{ijkd} \leftarrow
19 | \alpha y_{ijkd} +
20 | \beta x_{ijkd} +
21 | \gamma b_k.
22 | @f]
23 |
24 | @a data can be the null tensor, in which case this tensor
25 | is dropped in the summation.
26 | */
27 |
28 | /**
29 | @brief nnbias_backward
30 | @param context context.
31 | @param derData data derivative tensor $d\bx$ [output].
32 | @param derDataMult data derivative tensor multiplier $\eta$.
33 | @param derBiases biases derivative tensor $d\bb$ [output].
34 | @param derBiasesMult biased derivative tensor multiplier $\tau$.
35 | @param data data tensor $\bx$.
36 | @param dataMult data tensor multiplier $\beta$.
37 | @param biases biases tensor $\bb$.
38 | @param biasesMult biases tensor multiplier $\gamma$.
39 |
40 | If @a derData is the null tensor, this derivative is not comptued and
41 | @param biases can also be null.
42 |
43 | If @a derBiases is the null tensor, this derivative is not computed and
44 | @param data can also be null.
45 | */
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Filter.m:
--------------------------------------------------------------------------------
1 | classdef Filter < dagnn.Layer
2 | properties
3 | pad = [0 0 0 0]
4 | stride = [1 1]
5 | end
6 | methods
7 | function set.pad(obj, pad)
8 | if numel(pad) == 1
9 | obj.pad = [pad pad pad pad] ;
10 | elseif numel(pad) == 2
11 | obj.pad = pad([1 1 2 2]) ;
12 | else
13 | obj.pad = pad ;
14 | end
15 | end
16 |
17 | function set.stride(obj, stride)
18 | if numel(stride) == 1
19 | obj.stride = [stride stride] ;
20 | else
21 | obj.stride = stride ;
22 | end
23 | end
24 |
25 | function kernelSize = getKernelSize(obj)
26 | kernelSize = [1 1] ;
27 | end
28 |
29 | function outputSizes = getOutputSizes(obj, inputSizes)
30 | ks = obj.getKernelSize() ;
31 | outputSizes{1} = [...
32 | fix((inputSizes{1}(1) + obj.pad(1) + obj.pad(2) - ks(1)) / obj.stride(1)) + 1, ...
33 | fix((inputSizes{1}(2) + obj.pad(3) + obj.pad(4) - ks(2)) / obj.stride(2)) + 1, ...
34 | 1, ...
35 | inputSizes{1}(4)] ;
36 | end
37 |
38 | function rfs = getReceptiveFields(obj)
39 | ks = obj.getKernelSize() ;
40 | y1 = 1 - obj.pad(1) ;
41 | y2 = 1 - obj.pad(1) + ks(1) - 1 ;
42 | x1 = 1 - obj.pad(3) ;
43 | x2 = 1 - obj.pad(3) + ks(2) - 1 ;
44 | h = y2 - y1 + 1 ;
45 | w = x2 - x1 + 1 ;
46 | rfs.size = [h, w] ;
47 | rfs.stride = obj.stride ;
48 | rfs.offset = [y1+y2, x1+x2]/2 ;
49 | end
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/bilinearsampler.hpp:
--------------------------------------------------------------------------------
1 | // @file bilinearsampler.hpp
2 | // @brief Bilinear sampler implementation
3 | // @author Ankush Gupta
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_BILINEARSAMPLER_H
15 | #define VL_BILINEARSAMPLER_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | // defines the dispatcher for CUDA kernels:
21 | namespace vl { namespace impl {
22 |
23 | template
24 | struct bilinearsampler {
25 |
26 | static vl::Error
27 | forward(Context& context,
28 | type* output,
29 | type const* data,
30 | type const* grid,
31 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
32 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
33 |
34 |
35 | static vl::Error
36 | backward(Context& context,
37 | type* derData,
38 | type* derGrid,
39 | type const* data,
40 | type const* grid,
41 | type const* derOutput,
42 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
43 | size_t inHeight, size_t inWidth, size_t inCardinality) ;
44 | } ;
45 |
46 | } }
47 |
48 | #endif /* defined(VL_BILINEARSAMPLER_H) */
49 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Loss.m:
--------------------------------------------------------------------------------
1 | classdef Loss < dagnn.ElementWise
2 | properties
3 | loss = 'softmaxlog'
4 | opts = {}
5 | end
6 |
7 | properties (Transient)
8 | average = 0
9 | numAveraged = 0
10 | end
11 |
12 | methods
13 | function outputs = forward(obj, inputs, params)
14 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss, obj.opts{:}) ;
15 | n = obj.numAveraged ;
16 | m = n + size(inputs{1},4) ;
17 | obj.average = (n * obj.average + gather(outputs{1})) / m ;
18 | obj.numAveraged = m ;
19 | end
20 |
21 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
22 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss, obj.opts{:}) ;
23 | derInputs{2} = [] ;
24 | derParams = {} ;
25 | end
26 |
27 | function reset(obj)
28 | obj.average = 0 ;
29 | obj.numAveraged = 0 ;
30 | end
31 |
32 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes)
33 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ;
34 | end
35 |
36 | function rfs = getReceptiveFields(obj)
37 | % the receptive field depends on the dimension of the variables
38 | % which is not known until the network is run
39 | rfs(1,1).size = [NaN NaN] ;
40 | rfs(1,1).stride = [NaN NaN] ;
41 | rfs(1,1).offset = [NaN NaN] ;
42 | rfs(2,1) = rfs(1,1) ;
43 | end
44 |
45 | function obj = Loss(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Sum.m:
--------------------------------------------------------------------------------
1 | classdef Sum < dagnn.ElementWise
2 | %SUM DagNN sum layer
3 | % The SUM layer takes the sum of all its inputs and store the result
4 | % as its only output.
5 |
6 | properties (Transient)
7 | numInputs
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | obj.numInputs = numel(inputs) ;
13 | outputs{1} = inputs{1} ;
14 | for k = 2:obj.numInputs
15 | outputs{1} = outputs{1} + inputs{k} ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | for k = 1:obj.numInputs
21 | derInputs{k} = derOutputs{1} ;
22 | end
23 | derParams = {} ;
24 | end
25 |
26 | function outputSizes = getOutputSizes(obj, inputSizes)
27 | outputSizes{1} = inputSizes{1} ;
28 | for k = 2:numel(inputSizes)
29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1}))
30 | if ~isequal(inputSizes{k}, outputSizes{1})
31 | warning('Sum layer: the dimensions of the input variables is not the same.') ;
32 | end
33 | end
34 | end
35 | end
36 |
37 | function rfs = getReceptiveFields(obj)
38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
39 | rfs.size = [1 1] ;
40 | rfs.stride = [1 1] ;
41 | rfs.offset = [1 1] ;
42 | rfs = repmat(rfs, numInputs, 1) ;
43 | end
44 |
45 | function obj = Sum(varargin)
46 | obj.load(varargin) ;
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/nnpooling_cudnn.hpp:
--------------------------------------------------------------------------------
1 | // @file nnpooling_blas.hpp
2 | // @brief Pooling block CuDNN-based implementation.
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__nnpooling_cudnn__
14 | #define __vl__nnpooling_cudnn__
15 |
16 | #include "../nnpooling.hpp"
17 | #include "../data.hpp"
18 | #include "cudnn.h"
19 |
20 |
21 | namespace vl { namespace impl {
22 |
23 | // todo: data type should be handled internally?
24 |
25 | template
26 | struct nnpooling_cudnn
27 | {
28 | static vl::Error
29 | forward(Context& context,
30 | Tensor output,
31 | Tensor data,
32 | vl::PoolingMethod method,
33 | int poolHeight, int poolWidth,
34 | int strideY, int strideX,
35 | int padTop, int padBottom,
36 | int padLeft, int padRight) ;
37 |
38 | static vl::Error
39 | backward(Context& context,
40 | Tensor derData,
41 | Tensor data,
42 | Tensor output,
43 | Tensor derOutput,
44 | vl::PoolingMethod method,
45 | int poolHeight, int poolWidth,
46 | int strideY, int strideX,
47 | int padTop, int padBottom,
48 | int padLeft, int padRight) ;
49 | };
50 |
51 | } }
52 |
53 | #endif /* defined(__vl__nnpooling_cudnn__) */
54 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/renameVar.m:
--------------------------------------------------------------------------------
1 | function renameVar(obj, oldName, newName, varargin)
2 | %RENAMEVAR Rename a variable
3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable
4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an
5 | % existing variable.
6 |
7 | opts.quiet = false ;
8 | opts = vl_argparse(opts, varargin) ;
9 |
10 | % Find the variable to rename
11 | v = obj.getVarIndex(oldName) ;
12 | if isnan(v)
13 | % There is no such a variable, nothing to do
14 | if ~opts.quiet
15 | warning('There is no variable ''%s''.', oldName) ;
16 | end
17 | return ;
18 | end
19 |
20 | % Check if newName is an existing variable
21 | newNameExists = any(strcmp(newName, {obj.vars.name})) ;
22 |
23 | % Replace oldName with newName in all the layers
24 | for l = 1:numel(obj.layers)
25 | for f = {'inputs', 'outputs'}
26 | f = char(f) ;
27 | sel = find(strcmp(oldName, obj.layers(l).(f))) ;
28 | [obj.layers(l).(f){sel}] = deal(newName) ;
29 | end
30 | end
31 |
32 | % If newVariable is a variable in the graph, then there is not
33 | % anything else to do. obj.rebuild() will remove the slot
34 | % in obj.vars() for oldName as that variable becomes unused.
35 | %
36 | % If, however, newVariable is not in the graph already, then
37 | % the slot in obj.vars() is preserved and only the variable name
38 | % is changed.
39 |
40 | if ~newNameExists
41 | obj.vars(v).name = newName ;
42 | % update variable name hash otherwise rebuild() won't find this var
43 | % corectly
44 | obj.varNames.(newName) = v ;
45 | end
46 |
47 | obj.rebuild() ;
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnpdist.m:
--------------------------------------------------------------------------------
1 | classdef nnpdist < nntest
2 | properties (TestParameter)
3 | oneToOne = {false, true}
4 | noRoot = {false, true}
5 | p = {.5 1 2 3}
6 | aggregate = {false, true}
7 | end
8 | methods (Test)
9 | function basic(test,oneToOne, noRoot, p, aggregate)
10 | if aggregate
11 | % make it smaller to avoid numerical derivative issues with
12 | % float
13 | h = 3 ;
14 | w = 2 ;
15 | else
16 | h = 13 ;
17 | w = 17 ;
18 | end
19 | d = 4 ;
20 | n = 5 ;
21 | x = test.randn(h,w,d,n) ;
22 | if oneToOne
23 | x0 = test.randn(h,w,d,n) ;
24 | else
25 | x0 = test.randn(1,1,d,n) ;
26 | end
27 | opts = {'noRoot', noRoot, 'aggregate', aggregate} ;
28 |
29 | y = vl_nnpdist(x, x0, p, opts{:}) ;
30 |
31 | % make sure they are not too close in any dimension as this may be a
32 | % problem for the finite difference dereivatives as one could
33 | % approach 0 which is not differentiable for some p-norms
34 |
35 | s = abs(bsxfun(@minus, x, x0)) < test.range*1e-1 ;
36 | x(s) = x(s) + 5*test.range ;
37 |
38 | dzdy = test.rand(size(y)) ;
39 | [dzdx, dzdx0] = vl_nnpdist(x,x0,p,dzdy,opts{:}) ;
40 | test.der(@(x) vl_nnpdist(x,x0,p,opts{:}), x, dzdy, dzdx, test.range * 1e-3) ;
41 | if oneToOne
42 | % Pdist does not implement backprop of the bsxfun
43 | test.der(@(x0) vl_nnpdist(x,x0,p,opts{:}), x0, dzdy, dzdx0, test.range * 1e-3) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, OmrySendik
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/getVarSizes.m:
--------------------------------------------------------------------------------
1 | function sizes = getVarSizes(obj, inputSizes)
2 | %GETVARSIZES Get the size of the variables
3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the
4 | % DagNN variables given the size of the inputs. `inputSizes` is
5 | % a cell array of the type `{'inputName', inputSize, ...}`
6 | % Returns a cell array with sizes of all network variables.
7 | %
8 | % Example, compute the storage needed for a batch size of 256 for an
9 | % imagenet-like network:
10 | % ```
11 | % batch_size = 256; single_num_bytes = 4;
12 | % input_size = [net.meta.normalization.imageSize, batch_size];
13 | % var_sizes = net.getVarSizes({'data', input_size});
14 | % fprintf('Network activations will take %.2fMiB in single.\n', ...
15 | % sum(prod(cell2mat(var_sizes, 1))) * single_num_bytes ./ 1024^3);
16 | % ```
17 |
18 | % Copyright (C) 2015 Andrea Vedaldi, Karel Lenc.
19 | % All rights reserved.
20 | %
21 | % This file is part of the VLFeat library and is made available under
22 | % the terms of the BSD license (see the COPYING file).
23 |
24 | nv = numel(obj.vars) ;
25 | sizes = num2cell(NaN(nv, 4),2)' ;
26 |
27 | for i = 1:2:numel(inputSizes)
28 | v = obj.getVarIndex(inputSizes{i}) ;
29 | if isnan(v)
30 | error('Variable `%s` not found in the network.', inputSizes{i});
31 | end;
32 | sizes{v} = [inputSizes{i+1}(:)' ones(1, 4 - numel(inputSizes{i+1}))] ;
33 | end
34 |
35 | for layer = obj.layers(obj.executionOrder)
36 | in = layer.inputIndexes ;
37 | out = layer.outputIndexes ;
38 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ;
39 | end
40 |
--------------------------------------------------------------------------------
/L-BFGS-B-C-master/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-clause license
2 | Copyright (c) 2015, Stephen Becker
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | * Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | * Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | classdef nnsoftmaxloss < nntest
2 | properties (TestParameter)
3 | weighed = {false true}
4 | multilab = {false true}
5 | end
6 |
7 | methods (Test)
8 | function basic(test, multilab, weighed)
9 | C = 10 ;
10 | n = 3 ;
11 | if multilab
12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ;
13 | else
14 | c = reshape([7 2 1],1,1,1,[]) ;
15 | end
16 | if weighed
17 | c = cat(3, c, test.rand(size(c))) ;
18 | end
19 |
20 | % compare direct and indirect composition; this cannot
21 | % take large test.ranges
22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative
23 | y = vl_nnsoftmaxloss(x,c) ;
24 | if size(c,3) == 1
25 | opts = {'loss','log'} ;
26 | else
27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ;
28 | end
29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ;
30 | dzdy = test.randn(size(y)) ;
31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ;
33 | test.eq(y,y_) ;
34 | test.eq(dzdx,dzdx_) ;
35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ;
36 |
37 | % now larger input range
38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative
39 | y = vl_nnsoftmaxloss(x,c) ;
40 | dzdy = test.randn(size(y)) ;
41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ;
42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ...
43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ;
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnnormalize.m:
--------------------------------------------------------------------------------
1 | %VL_NNNORMALIZE CNN Local Response Normalization (LRN)
2 | % Y = VL_NNORMALIZE(X, PARAM) computes the so-called Local Response
3 | % Normalization (LRN) operator. This operator performs a
4 | % channel-wise sliding window normalization of each column of the
5 | % input array X. The normalized output is given by:
6 | %
7 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA
8 | %
9 | % where the normalization factor is given by
10 | %
11 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2,
12 | %
13 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The
14 | % window Q(k) is defined as:
15 | %
16 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))].
17 | %
18 | % where D is the number of feature channels in X. Note in particular
19 | % that, by setting N >= 2D, the function can be used to normalize
20 | % all the channels as a single group (useful to achieve L2
21 | % normalization).
22 | %
23 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of
24 | % the block projected onto DZDY. DZDX and DZDY have the same
25 | % dimensions as X and Y respectively.
26 | %
27 | % **Remark:** Some CNN libraries (e.g. Caffe) use a slightly
28 | % different convention for the parameters of the LRN. Caffe in
29 | % particular uses the convention:
30 | %
31 | % PARAM_CAFFE = [N KAPPA N*ALPHA BETA]
32 | %
33 | % i.e. the ALPHA paramter is multiplied by N.
34 |
35 | % Copyright (C) 2014 Andrea Vedaldi.
36 | % All rights reserved.
37 | %
38 | % This file is part of the VLFeat library and is made available under
39 | % the terms of the BSD license (see the COPYING file).
40 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/saveobj.m:
--------------------------------------------------------------------------------
1 | function s = saveobj(obj)
2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure
3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB
4 | % structure S. This is particularly convenient to preserve future
5 | % compatibility and to ship networks that are pure structures,
6 | % instead of embedding dependencies to code.
7 | %
8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`.
9 | %
10 | % As a side-effect the network is being reset (all variables are cleared)
11 | % and is transfered to CPU.
12 | %
13 | % See Also: dagnn.DagNN.loadobj, dagnn.DagNN.reset
14 |
15 | % Copyright (C) 2015-2016 Karel Lenc and Andrea Vedaldi.
16 | % All rights reserved.
17 | %
18 | % This file is part of the VLFeat library and is made available under
19 | % the terms of the BSD license (see the COPYING file).
20 |
21 | device = obj.device ;
22 | obj.move('cpu') ;
23 | s.vars = struct(...
24 | 'name', {obj.vars.name}, ...
25 | 'precious', {obj.vars.precious}) ;
26 | s.params = struct(...
27 | 'name', {obj.params.name}, ...
28 | 'value', {obj.params.value}, ...
29 | 'learningRate', {obj.params.learningRate}, ...
30 | 'weightDecay', {obj.params.weightDecay}) ;
31 | s.layers = struct(...
32 | 'name', {obj.layers.name}, ...
33 | 'type', {[]}, ...
34 | 'inputs', {obj.layers.inputs}, ...
35 | 'outputs', {obj.layers.outputs}, ...
36 | 'params', {obj.layers.params}, ...
37 | 'block', {[]}) ;
38 | s.meta = obj.meta ;
39 |
40 | for l = 1:numel(obj.layers)
41 | block = obj.layers(l).block ;
42 | slayer = block.save() ;
43 | s.layers(l).type = class(block) ;
44 | s.layers(l).block = slayer ;
45 | end
46 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/imread.hpp:
--------------------------------------------------------------------------------
1 | // @file imread.hpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__imread__
14 | #define __vl__imread__
15 |
16 | #include "data.hpp"
17 |
18 | namespace vl {
19 |
20 | #define VL_IMAGE_ERROR_MSG_MAX_LENGTH 256
21 |
22 | struct ImageShape
23 | {
24 | size_t height ;
25 | size_t width ;
26 | size_t depth ;
27 |
28 | ImageShape() ;
29 | ImageShape(size_t height, size_t width, size_t depth) ;
30 | ImageShape(ImageShape const & im) ;
31 | ImageShape & operator = (ImageShape const & im) ;
32 | bool operator == (ImageShape const & im) ;
33 |
34 | size_t getNumElements() const ;
35 | void clear() ;
36 | } ;
37 |
38 | class Image
39 | {
40 | public:
41 | Image() ;
42 | Image(Image const & im) ;
43 | Image(ImageShape const & shape, float * memory = NULL) ;
44 | ImageShape const & getShape() const ;
45 | float * getMemory() const ;
46 | void clear() ;
47 |
48 | protected:
49 | ImageShape shape ;
50 | float * memory ;
51 | } ;
52 |
53 | class ImageReader
54 | {
55 | public:
56 | ImageReader() ;
57 | ~ImageReader() ;
58 | vl::Error readShape(ImageShape & image, char const * fileName) ;
59 | vl::Error readPixels(float * memory, char const * fileName) ;
60 | char const * getLastErrorMessage() const ;
61 |
62 | private:
63 | class Impl ;
64 | Impl * impl ;
65 | } ;
66 | }
67 |
68 | #endif
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing guidelines
2 |
3 | ## How to contribute to MatConvNet
4 |
5 | For a description of how the library is structured, take a look at the
6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on
7 | the MatConvNet website.
8 |
9 | ### Issues
10 |
11 | We are grateful for any reported issues which help to remove bugs and
12 | improve the overall quality of the library. In particular, you can use
13 | the issue tracker to:
14 |
15 | * report bugs and unexpected crashes
16 | * discuss library design decisions
17 | * request new features
18 |
19 | When reporting bugs, it really helps if you can provide the following:
20 |
21 | * Which steps are needed to reproduce the issue
22 | * MATLAB, compiler and CUDA version (where appropriate)
23 |
24 | Before opening an issue to report a bug, please make sure that the bug
25 | is reproducible on the latest version of the master branch.
26 |
27 | The most difficult bugs to remove are those which cause crashes of the
28 | core functions (e.g. CUDA errors etc.). In those cases, it is really
29 | useful to create a *minimal example* which is able to reproduce the
30 | issue. We know that this may mean a bit of work, but it helps us to
31 | remove the bug more quickly.
32 |
33 | ### Pull requests
34 |
35 | Please make any Pull Requests against the `devel` branch rather than
36 | the `master` branch which is maintained as the latest stable release
37 | of the library.
38 |
39 | As a general rule, it is much easier to accept small Pull Requests
40 | that make a single improvement to the library than complex code
41 | changes that affect multiple parts of the library. When submitting
42 | substantial changes, it is useful if unit tests are provided with the
43 | code.
44 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/copy_gpu.cu:
--------------------------------------------------------------------------------
1 | // @file copy_gpu.cu
2 | // @brief Copy and other data operations (GPU)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "copy.hpp"
14 | #include "../datacu.hpp"
15 | #include
16 |
17 | template __global__ void
18 | fill_kernel (type * data, type value, size_t size)
19 | {
20 | int index = threadIdx.x + blockIdx.x * blockDim.x ;
21 | if (index < size) data[index] = value ;
22 | }
23 |
24 | namespace vl { namespace impl {
25 |
26 | template
27 | struct operations
28 | {
29 | typedef type data_type ;
30 |
31 | static vl::Error
32 | copy(data_type * dest,
33 | data_type const * src,
34 | size_t numElements)
35 | {
36 | cudaMemcpy(dest, src, numElements * sizeof(data_type), cudaMemcpyDeviceToDevice) ;
37 | return vlSuccess ;
38 | }
39 |
40 | static vl::Error
41 | fill(data_type * dest,
42 | size_t numElements,
43 | data_type value)
44 | {
45 | fill_kernel
46 | <<>>
47 | (dest, numElements, value) ;
48 |
49 | cudaError_t error = cudaGetLastError() ;
50 | if (error != cudaSuccess) {
51 | return vlErrorCuda ;
52 | }
53 | return vlSuccess ;
54 | }
55 | } ;
56 |
57 | } }
58 |
59 | template struct vl::impl::operations ;
60 |
61 | #ifdef ENABLE_DOUBLE
62 | template struct vl::impl::operations ;
63 | #endif
--------------------------------------------------------------------------------
/Synth.m:
--------------------------------------------------------------------------------
1 | % Copyright (C) 2017 Omry Sendik
2 | % All rights reserved.
3 | %
4 | % This file is part of the Deep Correlations for Texture Synthesis code and is made available under
5 | % the terms of the BSD license (see the COPYING file).
6 |
7 | clc;
8 | clear all;
9 | close all;
10 |
11 | %%
12 | FirstTime = 0;
13 |
14 | if(FirstTime) %% Change this flag to 1 if this is the first time you run the code and need to compile it
15 | DCor_compile();
16 | end
17 | DCor_addpaths();
18 |
19 | %% Load the images and the net
20 | origSrcImg = imread(strrep('.\Data\Texture13.png','\',filesep));
21 |
22 | params = GetSynthParams();
23 | net = GenNet(params.lastLayerNum);
24 | params.netImgSz = [225 225 3];
25 |
26 | params.OutString = strrep('.\Data\Output\Texture13_Result','\',filesep);
27 | params.USFac = 1; %% Upscaling factor
28 |
29 | SrcImg = ImNorm(imresize(origSrcImg,params.netImgSz(1:2)),net.meta.normalization.averageImage);
30 |
31 | %% Toroidal Expansion
32 | % SrcImg = ImExpand(SrcImg,params);
33 | % params.USFac = 1; %% For the toroidal expansion only
34 |
35 | %% Synthesis
36 | Verify(params);
37 |
38 | initNoise = 0.1*randn(round(params.USFac*size(SrcImg,1)),round(params.USFac*size(SrcImg,2)),size(SrcImg,3));
39 | curTgtImg = initNoise;
40 | clear errorLoss;
41 | errorLoss = [inf];
42 |
43 | SrcMats = GetSRCMats(SrcImg,initNoise,net,params);
44 |
45 | funloss = @(x) (GetLossFromImg(net,x,SrcImg,SrcMats,params));
46 | fungrad = @(x) (GetGrad(net,x,SrcImg,SrcMats,params));
47 | fun = @(x)fminunc_wrapper( x, funloss, fungrad);
48 |
49 | tgtImg = lbfgsb_mywrapper(curTgtImg,initNoise,fun,net,params);
50 |
51 | %% Save outputs
52 | WriteResults(tgtImg,ImDenorm(SrcImg,net.meta.normalization.averageImage),params);
53 |
--------------------------------------------------------------------------------
/CalcErrorLoss.m:
--------------------------------------------------------------------------------
1 | function [ ErrorLoss,Grads ] = calcErrorLoss( srcMats, tgtFeatures, params , verbose)
2 | [StyleErrorLoss,StyleGrads] = CalcStyleErrorLoss(tgtFeatures.styleFeatures, srcMats.GramMats,params);
3 | if(verbose) disp(['Alpha*StyleErrorLoss=',num2str(params.styleLossWeight*StyleErrorLoss)]); end
4 |
5 | if(params.ACorrLossWeight~=0)
6 | [ACorrerrorLoss,ACorrGrads] = CalcACorrErrorLoss(tgtFeatures.ACorrFeatures, srcMats.ACorrMats,params);
7 | if(verbose) disp(['Beta*ACorrerrorLoss=',num2str(params.ACorrLossWeight*ACorrerrorLoss)]); end
8 | else
9 | ACorrerrorLoss=0;ACorrGrads=[];
10 | end
11 |
12 | if(params.DiversityLossWeight~=0)
13 | [DiversityErrorLoss,DiversityGrads] = CalcStructureErrorLoss(tgtFeatures.DiversityFeatures, srcMats.DiversityMats,params);
14 | if(verbose) disp(['Delta*DiversityErrorLoss=',num2str(params.DiversityLossWeight*DiversityErrorLoss)]); end
15 | else
16 | DiversityErrorLoss=0;DiversityGrads=[];
17 | end
18 |
19 | if(params.SmoothnessLossWeight~=0)
20 | [SmoothnessErrorLoss,SmoothnessGrads] = CalcSoftMinSmoothnessErrorLoss(tgtFeatures.SmoothnessFeatures, params);
21 | if(verbose) disp(['Gamma*SmoothnessErrorLoss=',num2str(params.SmoothnessLossWeight*SmoothnessErrorLoss)]); end
22 | else
23 | SmoothnessErrorLoss=0;SmoothnessGrads=[];
24 | end
25 |
26 | ErrorLoss = params.styleLossWeight*StyleErrorLoss+...
27 | params.ACorrLossWeight*ACorrerrorLoss+...
28 | params.DiversityLossWeight*DiversityErrorLoss+...
29 | params.SmoothnessLossWeight*SmoothnessErrorLoss;
30 | Grads = CombineGrads(StyleGrads,ACorrGrads,DiversityGrads,SmoothnessGrads,params);
31 | end
32 |
33 |
34 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnrelu.m:
--------------------------------------------------------------------------------
1 | function y = vl_nnrelu(x,dzdy,varargin)
2 | %VL_NNRELU CNN rectified linear unit.
3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data
4 | % X. X can have arbitrary size.
5 | %
6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block
7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as
8 | % X and Y respectively.
9 | %
10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options:
11 | %
12 | % `Leak`:: 0
13 | % Set the leak factor, a non-negative number. Y is equal to X if
14 | % X is not smaller than zero; otherwise, Y is equal to X
15 | % multipied by the leak factor. By default, the leak factor is
16 | % zero; for values greater than that one obtains the leaky ReLU
17 | % unit.
18 | %
19 | % ADVANCED USAGE
20 | %
21 | % As a further optimization, in the backward computation it is
22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then
23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY).
24 | % This is useful because it means that the buffer X does not need to
25 | % be remembered in the backward pass.
26 |
27 | % Copyright (C) 2014-15 Andrea Vedaldi.
28 | % All rights reserved.
29 | %
30 | % This file is part of the VLFeat library and is made available under
31 | % the terms of the BSD license (see the COPYING file).
32 |
33 | opts.leak = 0 ;
34 | opts = vl_argparse(opts, varargin, 'nonrecursive') ;
35 |
36 | if opts.leak == 0
37 | if nargin <= 1 || isempty(dzdy)
38 | y = max(x, 0) ;
39 | else
40 | y = dzdy .* (x > 0) ;
41 | end
42 | else
43 | if nargin <= 1 || isempty(dzdy)
44 | y = x .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
45 | else
46 | y = dzdy .* (opts.leak + (1 - opts.leak) * (x > 0)) ;
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Concat.m:
--------------------------------------------------------------------------------
1 | classdef Concat < dagnn.ElementWise
2 | properties
3 | dim = 3
4 | end
5 |
6 | properties (Transient)
7 | inputSizes = {}
8 | end
9 |
10 | methods
11 | function outputs = forward(obj, inputs, params)
12 | outputs{1} = vl_nnconcat(inputs, obj.dim) ;
13 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
14 | end
15 |
16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
17 | derInputs = vl_nnconcat(inputs, obj.dim, derOutputs{1}, 'inputSizes', obj.inputSizes) ;
18 | derParams = {} ;
19 | end
20 |
21 | function reset(obj)
22 | obj.inputSizes = {} ;
23 | end
24 |
25 | function outputSizes = getOutputSizes(obj, inputSizes)
26 | sz = inputSizes{1} ;
27 | for k = 2:numel(inputSizes)
28 | sz(obj.dim) = sz(obj.dim) + inputSizes{k}(obj.dim) ;
29 | end
30 | outputSizes{1} = sz ;
31 | end
32 |
33 | function rfs = getReceptiveFields(obj)
34 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ;
35 | if obj.dim == 3 || obj.dim == 4
36 | rfs = getReceptiveFields@dagnn.ElementWise(obj) ;
37 | rfs = repmat(rfs, numInputs, 1) ;
38 | else
39 | for i = 1:numInputs
40 | rfs(i,1).size = [NaN NaN] ;
41 | rfs(i,1).stride = [NaN NaN] ;
42 | rfs(i,1).offset = [NaN NaN] ;
43 | end
44 | end
45 | end
46 |
47 | function load(obj, varargin)
48 | s = dagnn.Layer.argsToStruct(varargin{:}) ;
49 | % backward file compatibility
50 | if isfield(s, 'numInputs'), s = rmfield(s, 'numInputs') ; end
51 | load@dagnn.Layer(obj, s) ;
52 | end
53 |
54 | function obj = Concat(varargin)
55 | obj.load(varargin{:}) ;
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/imread.cpp:
--------------------------------------------------------------------------------
1 | // @file imread.cpp
2 | // @brief Image reader
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #include "imread.hpp"
14 | #include
15 |
16 | vl::ImageShape::ImageShape()
17 | : height(0), width(0), depth(0)
18 | { }
19 |
20 | vl::ImageShape::ImageShape(size_t height, size_t width, size_t depth)
21 | : height(height), width(width), depth(depth)
22 | { }
23 |
24 | vl::ImageShape::ImageShape(ImageShape const & im)
25 | : height(im.height), width(im.width), depth(im.depth)
26 | { }
27 |
28 | vl::ImageShape & vl::ImageShape::operator =(vl::ImageShape const & im)
29 | {
30 | height = im.height ;
31 | width = im.width ;
32 | depth = im.depth ;
33 | return *this ;
34 | }
35 |
36 | bool vl::ImageShape::operator == (vl::ImageShape const & im)
37 | {
38 | return
39 | (height == im.height) &
40 | (width == im.width) &
41 | (depth == im.depth) ;
42 | }
43 |
44 | size_t vl::ImageShape::getNumElements() const
45 | {
46 | return height*width*depth ;
47 | }
48 |
49 | void vl::ImageShape::clear()
50 | {
51 | height = 0 ;
52 | width = 0 ;
53 | depth = 0 ;
54 | }
55 |
56 | vl::Image::Image()
57 | : shape(), memory(NULL)
58 | { }
59 |
60 | vl::Image::Image(Image const & im)
61 | : shape(im.shape), memory(im.memory)
62 | { }
63 |
64 | vl::Image::Image(vl::ImageShape const & shape, float * memory)
65 | : shape(shape), memory(memory)
66 | { }
67 |
68 | vl::ImageShape const & vl::Image::getShape() const { return shape ; }
69 | float * vl::Image::getMemory() const { return memory ; }
70 |
71 | void vl::Image::clear()
72 | {
73 | shape.clear() ;
74 | memory = 0 ;
75 | }
76 |
--------------------------------------------------------------------------------
/GetSynthParams.m:
--------------------------------------------------------------------------------
1 | function [ params ] = GetSynthParams()
2 |
3 | params.styleMatchLayerInds = [5 10 19 28];
4 | params.styleFeatureWeights = [1 1 1 1];
5 |
6 | params.ACorrMatchLayerInds = [10];
7 | params.ACorrFeatureWeights = [1];
8 |
9 | params.SmoothnessMatchLayerInds = [1];
10 | params.SmoothnessFeatureWeights = [1];
11 |
12 | params.DiversityMatchLayerInds = [10];
13 | params.DiversityFeatureWeights = [1];
14 |
15 | params.styleFeatureWeights = params.styleFeatureWeights./sum(params.styleFeatureWeights);
16 | params.ACorrFeatureWeights = params.ACorrFeatureWeights./sum(params.ACorrFeatureWeights);
17 | params.DiversityFeatureWeights = params.DiversityFeatureWeights./sum(params.DiversityFeatureWeights);
18 | params.SmoothnessFeatureWeights = params.SmoothnessFeatureWeights./sum(params.SmoothnessFeatureWeights);
19 | params.SmoothnessSigma = 0.0001;
20 | %%
21 | params.lastLayerNum = max([params.styleMatchLayerInds params.ACorrMatchLayerInds params.SmoothnessMatchLayerInds params.DiversityFeatureWeights]);
22 | params.unitedLayerInds = unique(sort([params.styleMatchLayerInds params.ACorrMatchLayerInds params.SmoothnessMatchLayerInds params.DiversityMatchLayerInds]));
23 |
24 | % params.NormGrads = 0;
25 | % params.NormLosses = 0;
26 | params.Verbose = 1;
27 | params.NNVerbose = 1;
28 |
29 | %% Hyper Params
30 | params.styleLossWeight = 0.5;
31 | params.ACorrLossWeight = 0.5*1E-4;
32 | params.SmoothnessLossWeight = -0.0000075;
33 | params.DiversityLossWeight = -1*1E-6;
34 |
35 | %% LBFGSB opts
36 | params.lbfgs_opts.factr = 1e7;
37 | params.lbfgs_opts.pgtol = 1e-5;
38 | params.lbfgs_opts.printEvery = 5;
39 | params.lbfgs_opts.maxIts = 300;
40 | params.lbfgs_opts.m = 5;
41 | params.lbfgs_opts.autoconverge = 1;
42 | params.lbfgs_opts.autoconvergelen = 50;
43 | params.lbfgs_opts.autoconvergethresh = 0.5;
44 |
45 |
46 | end
47 |
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/BatchNorm.m:
--------------------------------------------------------------------------------
1 | classdef BatchNorm < dagnn.ElementWise
2 | properties
3 | numChannels
4 | epsilon = 1e-4
5 | end
6 |
7 | methods
8 | function outputs = forward(obj, inputs, params)
9 | if strcmp(obj.net.mode, 'test')
10 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
11 | 'moments', params{3}, ...
12 | 'epsilon', obj.epsilon) ;
13 | else
14 | outputs{1} = vl_nnbnorm(inputs{1}, params{1}, params{2}, ...
15 | 'epsilon', obj.epsilon) ;
16 | end
17 | end
18 |
19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
20 | [derInputs{1}, derParams{1}, derParams{2}, derParams{3}] = ...
21 | vl_nnbnorm(inputs{1}, params{1}, params{2}, derOutputs{1}, ...
22 | 'epsilon', obj.epsilon) ;
23 | % multiply the moments update by the number of images in the batch
24 | % this is required to make the update additive for subbatches
25 | % and will eventually be normalized away
26 | derParams{3} = derParams{3} * size(inputs{1},4) ;
27 | end
28 |
29 | % ---------------------------------------------------------------------
30 | function obj = BatchNorm(varargin)
31 | obj.load(varargin{:}) ;
32 | end
33 |
34 | function params = initParams(obj)
35 | params{1} = ones(obj.numChannels,1,'single') ;
36 | params{2} = zeros(obj.numChannels,1,'single') ;
37 | params{3} = zeros(obj.numChannels,2,'single') ;
38 | end
39 |
40 | function attach(obj, net, index)
41 | attach@dagnn.ElementWise(obj, net, index) ;
42 | p = net.getParamIndex(net.layers(index).params{3}) ;
43 | net.params(p).trainMethod = 'average' ;
44 | net.params(p).learningRate = 0.01 ;
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/nnconcat.m:
--------------------------------------------------------------------------------
1 | classdef nnconcat < nntest
2 | methods (Test)
3 | function basic(test)
4 | pick = @(i,x) x{i} ;
5 | sz = [4,5,10,3] ;
6 | for dim = 1:3
7 | sz1 = sz ; sz1(dim) = 3 ;
8 | sz2 = sz ; sz2(dim) = 7 ;
9 | sz3 = sz ; sz3(dim) = 2 ;
10 | x1 = test.randn(sz1) ;
11 | x2 = test.randn(sz2) ;
12 | x3 = test.randn(sz3) ;
13 |
14 | y = vl_nnconcat({x1, x2, x3}, dim) ;
15 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
16 | dzdy = test.randn(size(y)) ;
17 | dzdx = vl_nnconcat({x1, x2, x3} ,dim, dzdy) ;
18 |
19 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
20 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
21 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
22 | end
23 | end
24 |
25 | function by_size(test)
26 | pick = @(i,x) x{i} ;
27 | sz = [4,5,10,3] ;
28 | for dim = 1:3
29 | sz1 = sz ; sz1(dim) = 3 ;
30 | sz2 = sz ; sz2(dim) = 7 ;
31 | sz3 = sz ; sz3(dim) = 2 ;
32 | x1 = test.randn(sz1) ;
33 | x2 = test.randn(sz2) ;
34 | x3 = test.randn(sz3) ;
35 |
36 | y = vl_nnconcat({x1, x2, x3}, dim) ;
37 | test.verifyEqual(size(y,dim), size(x1,dim)+size(x2,dim)+size(x3,dim)) ;
38 | dzdy = test.randn(size(y)) ;
39 | dzdx = vl_nnconcat({}, dim, dzdy, 'inputSizes', {sz1, sz2, sz3}) ;
40 |
41 | test.der(@(x1) vl_nnconcat({x1, x2, x3},dim), x1, dzdy, dzdx{1}, 1e-3*test.range) ;
42 | test.der(@(x2) vl_nnconcat({x1, x2, x3},dim), x2, dzdy, dzdx{2}, 1e-3*test.range) ;
43 | test.der(@(x3) vl_nnconcat({x1, x2, x3},dim), x3, dzdy, dzdx{3}, 1e-3*test.range) ;
44 | end
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Conv.m:
--------------------------------------------------------------------------------
1 | classdef Conv < dagnn.Filter
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | opts = {'cuDNN'}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | if ~obj.hasBias, params{2} = [] ; end
11 | outputs{1} = vl_nnconv(...
12 | inputs{1}, params{1}, params{2}, ...
13 | 'pad', obj.pad, ...
14 | 'stride', obj.stride, ...
15 | obj.opts{:}) ;
16 | end
17 |
18 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
19 | if ~obj.hasBias, params{2} = [] ; end
20 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconv(...
21 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
22 | 'pad', obj.pad, ...
23 | 'stride', obj.stride, ...
24 | obj.opts{:}) ;
25 | end
26 |
27 | function kernelSize = getKernelSize(obj)
28 | kernelSize = obj.size(1:2) ;
29 | end
30 |
31 | function outputSizes = getOutputSizes(obj, inputSizes)
32 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ;
33 | outputSizes{1}(3) = obj.size(4) ;
34 | end
35 |
36 | function params = initParams(obj)
37 | sc = sqrt(2 / prod(obj.size(1:3))) ;
38 | params{1} = randn(obj.size,'single') * sc ;
39 | if obj.hasBias
40 | params{2} = zeros(obj.size(4),1,'single') * sc ;
41 | end
42 | end
43 |
44 | function set.size(obj, ksize)
45 | % make sure that ksize has 4 dimensions
46 | ksize = [ksize(:)' 1 1 1 1] ;
47 | obj.size = ksize(1:4) ;
48 | end
49 |
50 | function obj = Conv(varargin)
51 | obj.load(varargin) ;
52 | % normalize field by implicitly calling setters defined in
53 | % dagnn.Filter and here
54 | obj.size = obj.size ;
55 | obj.stride = obj.stride ;
56 | obj.pad = obj.pad ;
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_bench_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | % VL_BENCH_IMREADJPEG Evaluates the speed of imreadjpeg
2 |
3 | numThreads = 4 ;
4 | base = 'data/bench-imreadjpeg' ;
5 |
6 | files = {} ;
7 | files = dir(fullfile(base,'*.jpg')) ;
8 | files = fullfile(base, {files.name}) ;
9 | if numel(files) > 256, files = files(1:256) ; end
10 |
11 | for preallocate = [true, false]
12 | opts={'verbose','verbose', 'preallocate', preallocate} ;
13 | for t=1:4
14 | % simple read
15 | fprintf('direct read single thread\n') ;
16 | clear ims ;
17 | tic ;
18 | ims = vl_imreadjpeg(files, 'numThreads', 1, opts{:}) ;
19 | directSingle(t) = toc ;
20 | fprintf(' done\n') ;
21 | pause(1) ;
22 |
23 | % simple read
24 | fprintf('direct read multi thread\n') ;
25 | clear ims ;
26 | tic ;
27 | ims = vl_imreadjpeg(files, 'numThreads', numThreads, opts{:}) ;
28 | direct(t) = toc ;
29 | fprintf(' done\n') ;
30 | pause(1) ;
31 |
32 | % threaded read
33 | fprintf('issue prefetch\n') ;
34 | tic ;
35 | vl_imreadjpeg(files, 'prefetch', opts{:}) ;
36 | prefetch(t) = toc ;
37 | fprintf(' done [pause 6]\n') ;
38 | pause(6)
39 |
40 | fprintf('prefetched read\n') ;
41 | clear ims_ ; % do not accoutn for the time requried to delete this
42 | tic ;
43 | ims_ = vl_imreadjpeg(files, opts{:}) ;
44 | indirect(t) = toc ;
45 | pause(1) ;
46 | end
47 |
48 | n = numel(ims) ;
49 | fprintf('** test results preallcoate %d\n', preallocate) ;
50 | fprintf('\tsingle tread: %.1f pm %.1f\n', mean(n./directSingle), std(n./directSingle)) ;
51 | fprintf('\t%d threads: %.1f pm %.1f\n', numThreads, mean(n./direct), std(n./direct)) ;
52 | fprintf('\tissue prefetch: %.1f pm %.1f\n', mean(n./prefetch), std(n./prefetch)) ;
53 | fprintf('\tretrieve prefetched: %.1f pm %.1f\n', mean(n./indirect), std(n./indirect)) ;
54 | fprintf('\n\n') ;
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/Crop.m:
--------------------------------------------------------------------------------
1 | classdef Crop < dagnn.ElementWise
2 | %CROP DagNN cropping layer.
3 | % This is a pecurial layer from FCN. It crops inputs{1} to
4 | % match the size of inputs{2} (starting with a base crop amount).
5 | % A future version
6 |
7 | properties
8 | crop = [0 0]
9 | end
10 |
11 | properties (Transient)
12 | inputSizes = {}
13 | end
14 |
15 | methods
16 | function crop = getAdaptedCrops(obj)
17 | cropv = obj.inputSizes{1}(1) - obj.inputSizes{2}(1) ;
18 | cropu = obj.inputSizes{1}(2) - obj.inputSizes{2}(2) ;
19 | cropv1 = max(0, cropv - obj.crop(1)) ;
20 | cropu1 = max(0, cropu - obj.crop(2)) ;
21 | crop = [cropv - cropv1, cropv1, cropu - cropu1, cropu1] ;
22 | end
23 |
24 | function outputs = forward(obj, inputs, params)
25 | obj.inputSizes = cellfun(@size, inputs, 'UniformOutput', false) ;
26 | adjCrop = obj.getAdaptedCrops() ;
27 | outputs{1} = vl_nncrop(inputs{1}, adjCrop) ;
28 | end
29 |
30 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
31 | adjCrop = obj.getAdaptedCrops() ;
32 | derInputs{1} = vl_nncrop(inputs{1}, adjCrop, derOutputs{1}, obj.inputSizes{1}) ;
33 | derInputs{2} = [] ;
34 | derParams = {} ;
35 | end
36 |
37 | function reset(obj)
38 | obj.inputSizes = {} ;
39 | end
40 |
41 | function outputSizes = getOutputSizes(obj, inputSizes)
42 | obj.inputSizes = inputSizes ;
43 | crop = obj.getAdaptedCrops() ;
44 | outputSizes{1} = inputSizes{1} - [crop(1)+crop(2), crop(3)+crop(4), 0, 0] ;
45 | end
46 |
47 | function rfs = getReceptiveFields(obj)
48 | rfs(1,1).size = [1 1] ;
49 | rfs(1,1).stride = [1 1] ;
50 | rfs(1,1).offset = 1 + obj.crop ;
51 | rfs(2,1).size = [] ;
52 | rfs(2,1).stride = [] ;
53 | rfs(2,1).offset = [] ;
54 | end
55 |
56 | function obj = Crop(varargin)
57 | obj.load(varargin) ;
58 | end
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/GetLossFromImg.m:
--------------------------------------------------------------------------------
1 | function [errorLoss] = GetLossFromImg(net,curTgtImg,SrcImg,srcMats,params)
2 | global SavedDataPrevErrors;
3 | global SavedDataPrevDeltas;
4 | global SavedDataPrevImg;
5 |
6 | if(params.lbfgs_opts.autoconverge==1 && numel(SavedDataPrevDeltas)>=params.lbfgs_opts.autoconvergelen+1)
7 | if(prod(SavedDataPrevDeltas(end-params.lbfgs_opts.autoconvergelen:end)<=params.lbfgs_opts.autoconvergethresh))
8 | errorLoss = 0;
9 | disp('Auto Converge');
10 | return;
11 | end
12 | end
13 |
14 |
15 | curTgtImg = reshape(curTgtImg,[round(params.USFac*size(SrcImg,1)),round(params.USFac*size(SrcImg,2)),size(SrcImg,3)]);
16 | [tgtFeatures.styleFeatures, tgtFeatures.ACorrFeatures, tgtFeatures.DiversityFeatures, tgtFeatures.SmoothnessFeatures] = CalcNetFeatures(net, curTgtImg, params);
17 |
18 | [errorLoss] = CalcErrorLoss(srcMats,tgtFeatures,params, 1);
19 | disp(['Total Loss=',num2str(errorLoss)]);
20 |
21 | if(~isempty(SavedDataPrevImg))
22 | CurrMaxPixDiff = max(abs(SavedDataPrevImg(:)-curTgtImg(:)));
23 | else
24 | CurrMaxPixDiff = 0;
25 | end
26 | SavedDataPrevErrors = [SavedDataPrevErrors errorLoss];
27 | SavedDataPrevDeltas = [SavedDataPrevDeltas CurrMaxPixDiff];
28 | SavedDataPrevImg = curTgtImg;
29 |
30 | figure(100);
31 | subplot(2,2,1); imshow(uint8(ImDenorm(curTgtImg,net.meta.normalization.averageImage))); title('Result');
32 | subplot(2,2,2); imshow(uint8(ImDenorm(SrcImg,net.meta.normalization.averageImage))); title('Input');
33 | subplot(2,2,3); plot(1:numel(SavedDataPrevErrors),log10(SavedDataPrevErrors+1)); title('Error');
34 | subplot(2,2,4); plot(1:numel(SavedDataPrevDeltas),SavedDataPrevDeltas); title('Max Pixel Diffs'); ylim([0 30]);
35 |
36 | drawnow
37 | save([params.OutString,'__CurTgtImg.mat'],'curTgtImg');
38 |
39 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/suite/Scale.m:
--------------------------------------------------------------------------------
1 | classdef Scale < nntest
2 | properties
3 | x
4 | a
5 | b
6 | end
7 |
8 | properties (TestParameter)
9 | dim = {1 2 3 4}
10 | end
11 |
12 | methods (TestClassSetup)
13 | function data(test,device)
14 | test.x = test.randn(15,14,3,2) ;
15 | test.a = test.randn(15,14,3,2) ;
16 | test.b = test.randn(15,14,3,2) ;
17 | end
18 | end
19 |
20 | methods (Test)
21 | function data_and_parameters(test, dim)
22 | x = test.x ;
23 | a = test.a ;
24 | b = test.b ;
25 |
26 | a = sum(a, dim) ;
27 | b = sum(b, dim) ;
28 |
29 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
30 |
31 | output = scale.forward({x}, {a,b}) ;
32 | dzdy = test.randn(size(output{1})) ;
33 | [derInputs, derParams] = scale.backward({x}, {a,b}, {dzdy}) ;
34 |
35 | pick = @(x) x{1} ;
36 | dzdx = derInputs{1} ;
37 | dzda = derParams{1} ;
38 | dzdb = derParams{2} ;
39 |
40 | test.der(@(x) pick(scale.forward({x},{a,b})), x, dzdy, dzdx, 1e-2 * test.range) ;
41 | test.der(@(a) pick(scale.forward({x},{a,b})), a, dzdy, dzda, 1e-2 * test.range) ;
42 | test.der(@(b) pick(scale.forward({x},{a,b})), b, dzdy, dzdb, 1e-2 * test.range) ;
43 | end
44 |
45 | function data_only(test, dim)
46 | x = test.x ;
47 | a = test.a ;
48 | b = test.b ;
49 |
50 | a = sum(a, dim) ;
51 | b = sum(b, dim) ;
52 |
53 | scale = dagnn.Scale('size', size(a), 'hasBias', true) ;
54 |
55 | output = scale.forward({x,a,b}, {}) ;
56 | dzdy = test.randn(size(output{1})) ;
57 | [derInputs, derParams] = scale.backward({x,a,b}, {}, {dzdy}) ;
58 |
59 | pick = @(x) x{1} ;
60 | dzdx = derInputs{1} ;
61 | dzda = derInputs{2} ;
62 | dzdb = derInputs{3} ;
63 |
64 | test.der(@(x) pick(scale.forward({x,a,b},{})), x, dzdy, dzdx, 1e-2 * test.range) ;
65 | test.der(@(a) pick(scale.forward({x,a,b},{})), a, dzdy, dzda, 1e-2 * test.range) ;
66 | test.der(@(b) pick(scale.forward({x,a,b},{})), b, dzdy, dzdb, 1e-2 * test.range) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnbnorm.m:
--------------------------------------------------------------------------------
1 | %VL_NNBNORM CNN batch normalisation.
2 | % Y = VL_NNBNORM(X,G,B) applies batch normalization to the input
3 | % X. Batch normalization is defined as:
4 | %
5 | % Y(i,j,k,t) = G(k) * (X(i,j,k,t) - mu(k)) / sigma(k) + B(k)
6 | %
7 | % where:
8 | %
9 | % mu(k) = mean_ijt X(i,j,k,t),
10 | % sigma2(k) = mean_ijt (X(i,j,k,t) - mu(k))^2,
11 | % sigma(k) = sqrt(sigma2(k) + EPSILON)
12 | %
13 | % are respectively the per-channel mean, variance, and standard
14 | % deviation of each feature channel in the data X. The parameters
15 | % G(k) and B(k) are multiplicative and additive constants use to
16 | % scale each data channel.
17 | %
18 | % Means and variances are accumulated across all the data items
19 | % (images) stored in the 4D tensor X (from which the name batch
20 | % normalization is derived). The constant EPSILON is used to
21 | % regularize the computation of sigma(k) and to avoid division by
22 | % zero.
23 | %
24 | % [DZDX,DZDG,DZDB] = VL_NNBNORM(X,G,B,DZDY) computes the derviatives
25 | % of the block projected onto DZDY. DZDX, DZDG, DZDB and DZDY have
26 | % the same dimensions as X, G, B, and Y respectivey.
27 | %
28 | % Optionally, [Y,MOMENTS] = VL_NNBNORM(...) and
29 | % [DZDX,DZDG,DZDB,MOMENTS] = VL_NNBNORM(...,DZDY) return the values
30 | % of the vectors mu and sigma in the formulas above. Here, MOMENTS
31 | % is a DEPTH x 2 array [MU, SIGMA].
32 | %
33 | % VL_NNBNROM(..., 'Option', value) takes the following options:
34 | %
35 | % `Epsilon`:: 1e-4
36 | % Specifies the constant EPSILON in the formuals above.
37 | %
38 | % `Moments`:: unspecified
39 | % Specifies an array MOMENTS with the values of mu and sigma to
40 | % use instead of computing them according to the equations
41 | % above. This is useful to disable batch normalization during
42 | % testing.
43 | %
44 | % See also: VL_NNNORMALIZE().
45 |
46 | % Copyright (C) 2015 Sébastien Ehrhardt, Karel Lenc and Andrea Vedaldi.
47 | % All rights reserved.
48 | %
49 | % This file is part of the VLFeat library and is made available under
50 | % the terms of the BSD license (see the COPYING file).
51 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/datamex.hpp:
--------------------------------------------------------------------------------
1 | // @file datamex.hpp
2 | // @brief Basic data structures (MEX support)
3 | // @author Andrea Vedaldi
4 |
5 | /*
6 | Copyright (C) 2015-16 Andrea Vedaldi.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__datamex__
14 | #define __vl__datamex__
15 |
16 | #include "mex.h"
17 |
18 | #if ENABLE_GPU
19 | #include "gpu/mxGPUArray.h"
20 | #endif
21 |
22 | #include "data.hpp"
23 |
24 | namespace vl {
25 |
26 | class MexTensor ;
27 |
28 | class MexContext : public Context
29 | {
30 | public:
31 | MexContext() ;
32 | ~MexContext() ;
33 |
34 | protected:
35 | #if ENABLE_GPU
36 | vl::Error initGpu() ;
37 | vl::Error validateGpu() ;
38 | mxArray * canary ; // if it breathes, the GPU state is valid
39 | bool gpuIsInitialized ;
40 | #endif
41 |
42 | friend class MexTensor ;
43 | } ;
44 |
45 | class MexTensor : public Tensor
46 | {
47 | public:
48 | MexTensor(MexContext & context) ;
49 | vl::Error init(mxArray const * array) ;
50 | vl::Error init(Device deviceType, Type dataType, TensorShape const & shape) ;
51 | vl::Error initWithZeros(Device deviceType, Type dataType, TensorShape const & shape) ;
52 | vl::Error initWithValue(Device deviceType, Type dataType, TensorShape const & shape, double value) ;
53 |
54 | mxArray * relinquish() ;
55 | void clear() ;
56 | ~MexTensor() ;
57 |
58 | size_t getMemorySize() const ;
59 |
60 | protected:
61 | MexContext & context ;
62 | mxArray const * array ;
63 | #ifdef ENABLE_GPU
64 | mxGPUArray const * gpuArray ;
65 | #endif
66 | bool isArrayOwner ;
67 |
68 | private: // prevention
69 | MexTensor(MexTensor const &) ;
70 | MexTensor & operator= (MexTensor & tensor) ;
71 | vl::Error initHelper(Device deviceType, Type dataType, TensorShape const & shape, bool fillWithZeros = false) ;
72 | } ;
73 |
74 | void print(char const * str, MexTensor const & tensor) ;
75 |
76 | void mexThrowError(Context const& context, vl::Error error) ;
77 | }
78 |
79 |
80 | #endif /* defined(__vl__datamex__) */
81 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nndropout.m:
--------------------------------------------------------------------------------
1 | function [y,mask] = vl_nndropout(x,varargin)
2 | %VL_NNDROPOUT CNN dropout.
3 | % [Y,MASK] = VL_NNDROPOUT(X) applies dropout to the data X. MASK
4 | % is the randomly sampled dropout mask. Both Y and MASK have the
5 | % same size as X.
6 | %
7 | % VL_NNDROPOUT(X, 'rate', R) sets the dropout rate to R. Rate is defined
8 | % as a probability of a variable *not* to be zeroed (i.e. it is the
9 | % expected value of MASK).
10 | %
11 | % [DZDX] = VL_NNDROPOUT(X, DZDY, 'mask', MASK) computes the
12 | % derivatives of the blocks projected onto DZDY. Note that MASK must
13 | % be specified in order to compute the derivative consistently with
14 | % the MASK randomly sampled in the forward pass. DZDX and DZDY have
15 | % the same dimesnions as X and Y respectivey.
16 | %
17 | % Note that in the original paper on dropout, at test time the
18 | % network weights for the dropout layers are scaled down to
19 | % compensate for having all the neurons active. In this
20 | % implementation the dropout function itself already does this
21 | % compensation during training. So at test time no alterations are
22 | % required.
23 |
24 | % Copyright (C) 2014-16 Andrea Vedaldi, Karel Lenc.
25 | % All rights reserved.
26 | %
27 | % This file is part of the VLFeat library and is made available under
28 | % the terms of the BSD license (see the COPYING file).
29 |
30 | opts.rate = 0.5 ;
31 | opts.mask = [] ;
32 |
33 | backMode = numel(varargin) > 0 && ~ischar(varargin{1}) ;
34 | if backMode
35 | dzdy = varargin{1} ;
36 | opts = vl_argparse(opts, varargin(2:end)) ;
37 | else
38 | opts = vl_argparse(opts, varargin) ;
39 | end
40 |
41 | % determine mask
42 | mask = opts.mask ;
43 | scale = cast(1 / (1 - opts.rate), 'like', x) ;
44 | if backMode && isempty(mask)
45 | warning('vl_nndropout: when using in backward mode, the mask should be specified') ;
46 | end
47 | if isempty(mask)
48 | if isa(x,'gpuArray')
49 | mask = scale * (gpuArray.rand(size(x), classUnderlying(x)) >= opts.rate) ;
50 | else
51 | mask = scale * (rand(size(x), 'like', x) >= opts.rate) ;
52 | end
53 | end
54 |
55 | % do job
56 | if ~backMode
57 | y = mask .* x ;
58 | else
59 | y = mask .* dzdy ;
60 | end
61 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/pooling.hpp:
--------------------------------------------------------------------------------
1 | // @file pooling.hpp
2 | // @brief Pooling block implementation
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef VL_POOLING_H
15 | #define VL_POOLING_H
16 |
17 | #include "../data.hpp"
18 | #include
19 |
20 | namespace vl { namespace impl {
21 |
22 | template
23 | struct pooling_max {
24 | typedef type data_type ;
25 |
26 | static vl::Error
27 | forward(data_type* output,
28 | data_type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t poolHeight, size_t poolWidth,
31 | size_t strideY, size_t strideX,
32 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
33 |
34 | static vl::Error
35 | backward(data_type* derData,
36 | data_type const* data,
37 | data_type const* derOutput,
38 | size_t height, size_t width, size_t depth,
39 | size_t poolHeight, size_t poolWidth,
40 | size_t strideY, size_t strideX,
41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
42 | } ;
43 |
44 | template
45 | struct pooling_average {
46 | typedef type data_type ;
47 |
48 | static vl::Error
49 | forward(data_type* output,
50 | data_type const* data,
51 | size_t height, size_t width, size_t depth,
52 | size_t poolHeight, size_t poolWidth,
53 | size_t strideY, size_t strideX,
54 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
55 |
56 | static vl::Error
57 | backward(type* derData,
58 | type const* derOutput,
59 | size_t height, size_t width, size_t depth,
60 | size_t poolHeight, size_t poolWidth,
61 | size_t strideY, size_t strideX,
62 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ;
63 | } ;
64 |
65 | } }
66 |
67 | #endif /* defined(VL_POOLING_H) */
68 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/@DagNN/loadobj.m:
--------------------------------------------------------------------------------
1 | function obj = loadobj(s)
2 | % LOADOBJ Initialize a DagNN object from a structure.
3 | % OBJ = LOADOBJ(S) initializes a DagNN objet from the structure
4 | % S. It is the opposite of S = OBJ.SAVEOBJ().
5 | % If S is a string, initializes the DagNN object with data
6 | % from a mat-file S. Otherwise, if S is an instance of `dagnn.DagNN`,
7 | % returns S.
8 |
9 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi.
10 | % All rights reserved.
11 | %
12 | % This file is part of the VLFeat library and is made available under
13 | % the terms of the BSD license (see the COPYING file).
14 |
15 | if ischar(s) s = load(s); end
16 | if isstruct(s)
17 | assert(isfield(s, 'layers'), 'Invalid model.');
18 | if ~isstruct(s.layers)
19 | warning('The model appears to be `simplenn` model. Using `fromSimpleNN` instead.');
20 | obj = dagnn.DagNN.fromSimpleNN(s);
21 | return;
22 | end
23 | obj = dagnn.DagNN() ;
24 | try
25 | for l = 1:numel(s.layers)
26 | constr = str2func(s.layers(l).type) ;
27 | block = constr() ;
28 | block.load(struct(s.layers(l).block)) ;
29 | obj.addLayer(...
30 | s.layers(l).name, ...
31 | block, ...
32 | s.layers(l).inputs, ...
33 | s.layers(l).outputs, ...
34 | s.layers(l).params,...
35 | 'skipRebuild', true) ;
36 | end
37 | catch e % Make sure the DagNN object is in valid state
38 | obj.rebuild();
39 | rethrow(e);
40 | end
41 | obj.rebuild();
42 | if isfield(s, 'params')
43 | for f = setdiff(fieldnames(s.params)','name')
44 | f = char(f) ;
45 | for i = 1:numel(s.params)
46 | p = obj.getParamIndex(s.params(i).name) ;
47 | obj.params(p).(f) = s.params(i).(f) ;
48 | end
49 | end
50 | end
51 | if isfield(s, 'vars')
52 | for f = setdiff(fieldnames(s.vars)','name')
53 | f = char(f) ;
54 | for i = 1:numel(s.vars)
55 | p = obj.getVarIndex(s.vars(i).name) ;
56 | obj.vars(p).(f) = s.vars(i).(f) ;
57 | end
58 | end
59 | end
60 | for f = setdiff(fieldnames(s)', {'vars','params','layers'})
61 | f = char(f) ;
62 | obj.(f) = s.(f) ;
63 | end
64 | elseif isa(s, 'dagnn.DagNN')
65 | obj = s ;
66 | else
67 | error('Unknown data type %s for `loadobj`.', class(s));
68 | end
69 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnconv.hpp:
--------------------------------------------------------------------------------
1 | // @file nnconv.cu
2 | // @brief Convolution block
3 | // @author Andrea Vedaldi
4 | // @author Max Jaderberg
5 |
6 | /*
7 | Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg
8 | Copyright (C) 2015 Andrea Vedaldi.
9 |
10 | All rights reserved.
11 |
12 | This file is part of the VLFeat library and is made available under
13 | the terms of the BSD license (see the COPYING file).
14 | */
15 |
16 | #ifndef __vl__nnconv__
17 | #define __vl__nnconv__
18 |
19 | #include "data.hpp"
20 |
21 | namespace vl {
22 |
23 | vl::Error
24 | nnconv_forward(vl::Context& context,
25 | vl::Tensor output, double outputMult,
26 | vl::Tensor data, double dataMult,
27 | vl::Tensor filters,
28 | vl::Tensor biases,
29 | int strideY, int strideX,
30 | int padTop, int padBottom,
31 | int padLeft, int padRight) ;
32 |
33 | vl::Error
34 | nnconv_backward(vl::Context& context,
35 | vl::Tensor derData,
36 | vl::Tensor derFilters,
37 | vl::Tensor derBiases,
38 | vl::Tensor data,
39 | vl::Tensor filters,
40 | vl::Tensor derOutput,
41 | int strideY, int strideX,
42 | int padTop, int padBottom,
43 | int padLeft, int padRight) ;
44 |
45 | vl::Error
46 | nnconvt_forward(vl::Context& context,
47 | vl::Tensor output,
48 | vl::Tensor data,
49 | vl::Tensor filters,
50 | vl::Tensor biases,
51 | int upsampleY, int upsampleX,
52 | int cropTop, int cropBottom,
53 | int cropLeft, int cropRight) ;
54 |
55 | vl::Error
56 | nnconvt_backward(vl::Context& context,
57 | vl::Tensor derData,
58 | vl::Tensor derFilters,
59 | vl::Tensor derBiases,
60 | vl::Tensor data,
61 | vl::Tensor filters,
62 | vl::Tensor derOutput,
63 | int upsampleY, int upsampleX,
64 | int cropTop, int cropBottom,
65 | int cropLeft, int cropRight) ;
66 | }
67 |
68 |
69 | #endif /* defined(__vl__nnconv__) */
70 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_imreadjpeg.m:
--------------------------------------------------------------------------------
1 | %VL_IMREADJPEG (A)synchronous multithreaded JPEG image loader.
2 | % IMAGES = VL_IMREADJPEG(FILES) reads the specified cell array
3 | % FILES of JPEG files into the cell array of images IMAGES.
4 | %
5 | % IMAGES = VL_IMREADJPEG(FILES, 'NumThreads', T) uses T parallel
6 | % threads to accelerate the operation. Note that this is
7 | % independent of the number of computational threads used by
8 | % MATLAB.
9 | %
10 | % VL_IMREADJPEG(FILES, 'Prefetch') starts reading the specified
11 | % images but returns immediately to MATLAB. Reading happens
12 | % concurrently with MATLAB in one or more separated threads. A
13 | % subsequent call IMAGES=VL_IMREADJPEG(FILES) *specifying exactly
14 | % the same files in the same order* will then return the loaded
15 | % images. This can be sued to quickly load a batch of JPEG images
16 | % as MATLAB is busy doing something else.
17 | %
18 | % The function takes the following options:
19 | %
20 | % `Prefetch`:: not specified
21 | % If specified, run without blocking (see above).
22 | %
23 | % `Verbose`:: not specified
24 | % Increase the verbosity level.
25 | %
26 | % `NumThreads`:: `1`
27 | % Specify the number of threads used to read images. This number
28 | % must be at least 1. Note that it does not make sense to specify
29 | % a number larger than the number of available CPU cores, and
30 | % often fewer threads are sufficient as reading images is memory
31 | % access bound rather than CPU bound.
32 | %
33 | % `Resize`:: not specified
34 | % If specified, turn on image resizing. The argument can either
35 | % specify the desired [HEIGHT, WIDTH] or be simply a scalar
36 | % SIZE. In the latter case, the image is resized isotropically so
37 | % that the shorter side is equal to SIZE.
38 | %
39 | % Resizing uses bilinear interpolation. When shrinking, a simple
40 | % form of antialiasing is used by stretching the bilinear filter
41 | % over several input pixels to average them. The method is the
42 | % same as MATLAB IMRESIZE() function (the two functions are
43 | % numerically equivalent).
44 |
45 | % Copyright (C) 2014-16 Andrea Vedaldi.
46 | % All rights reserved.
47 | %
48 | % This file is part of the VLFeat library and is made available under
49 | % the terms of the BSD license (see the COPYING file).
50 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/ReLU.m:
--------------------------------------------------------------------------------
1 | classdef ReLU < dagnn.ElementWise
2 | properties
3 | useShortCircuit = true
4 | leak = 0
5 | opts = {}
6 | end
7 |
8 | methods
9 | function outputs = forward(obj, inputs, params)
10 | outputs{1} = vl_nnrelu(inputs{1}, [], ...
11 | 'leak', obj.leak, obj.opts{:}) ;
12 | end
13 |
14 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
15 | derInputs{1} = vl_nnrelu(inputs{1}, derOutputs{1}, ...
16 | 'leak', obj.leak, ...
17 | obj.opts{:}) ;
18 | derParams = {} ;
19 | end
20 |
21 | function forwardAdvanced(obj, layer)
22 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
23 | forwardAdvanced@dagnn.Layer(obj, layer) ;
24 | return ;
25 | end
26 | net = obj.net ;
27 | in = layer.inputIndexes ;
28 | out = layer.outputIndexes ;
29 | net.vars(out).value = vl_nnrelu(net.vars(in).value, [], ...
30 | 'leak', obj.leak, ...
31 | obj.opts{:}) ;
32 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) - 1;
33 | if ~net.vars(in).precious & net.numPendingVarRefs(in) == 0
34 | net.vars(in).value = [] ;
35 | end
36 | end
37 |
38 | function backwardAdvanced(obj, layer)
39 | if ~obj.useShortCircuit || ~obj.net.conserveMemory
40 | backwardAdvanced@dagnn.Layer(obj, layer) ;
41 | return ;
42 | end
43 | net = obj.net ;
44 | in = layer.inputIndexes ;
45 | out = layer.outputIndexes ;
46 |
47 | if isempty(net.vars(out).der), return ; end
48 |
49 | derInput = vl_nnrelu(net.vars(out).value, net.vars(out).der, ...
50 | 'leak', obj.leak, obj.opts{:}) ;
51 |
52 | if ~net.vars(out).precious
53 | net.vars(out).der = [] ;
54 | net.vars(out).value = [] ;
55 | end
56 |
57 | if net.numPendingVarRefs(in) == 0
58 | net.vars(in).der = derInput ;
59 | else
60 | net.vars(in).der = net.vars(in).der + derInput ;
61 | end
62 | net.numPendingVarRefs(in) = net.numPendingVarRefs(in) + 1 ;
63 | end
64 |
65 | function obj = ReLU(varargin)
66 | obj.load(varargin) ;
67 | end
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/nnbnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file nnbnorm.hpp
2 | // @brief Batch normalizatoion block
3 | // @author Sebastien Ehrhardt
4 | // @author Andrea Vedaldi
5 |
6 | /*
7 | Copyright (C) 2015-16 Sebastien Ehrhardt and Andrea Vedaldi.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #ifndef __vl__nnbnorm__
15 | #define __vl__nnbnorm__
16 |
17 | #include "data.hpp"
18 | #include
19 |
20 | namespace vl {
21 |
22 | // This version computes mean and sigma
23 | vl::Error
24 | nnbnorm_forward(vl::Context& context,
25 | vl::Tensor output,
26 | vl::Tensor moments, // [output: can pass null]
27 | vl::Tensor data,
28 | vl::Tensor filters,
29 | vl::Tensor biases,
30 | double epsilon) ;
31 |
32 | // This version uses the mean and sigma specified
33 | vl::Error
34 | nnbnorm_forward_given_moments(vl::Context& context,
35 | vl::Tensor output,
36 | vl::Tensor moments, // input
37 | vl::Tensor data,
38 | vl::Tensor filters,
39 | vl::Tensor biases) ;
40 |
41 | vl::Error
42 | nnbnorm_backward(vl::Context& context,
43 | vl::Tensor derData,
44 | vl::Tensor derFilters,
45 | vl::Tensor derBiaises,
46 | vl::Tensor moments,
47 | vl::Tensor data,
48 | vl::Tensor filters,
49 | vl::Tensor biases,
50 | vl::Tensor derOutput,
51 | double epsilon) ;
52 |
53 | vl::Error
54 | nnbnorm_backward_given_moments(vl::Context& context,
55 | vl::Tensor derData,
56 | vl::Tensor derFilters,
57 | vl::Tensor derBiaises,
58 | vl::Tensor moments,
59 | vl::Tensor data,
60 | vl::Tensor filters,
61 | vl::Tensor biases,
62 | vl::Tensor derOutput,
63 | double epsilon) ;
64 | }
65 |
66 | #endif /* defined(__vl__nnbnorm__) */
67 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnsoftmaxloss.m:
--------------------------------------------------------------------------------
1 | function Y = vl_nnsoftmaxloss(X,c,dzdy)
2 | %VL_NNSOFTMAXLOSS CNN combined softmax and logistic loss.
3 | % **Deprecated: use `vl_nnloss` instead**
4 | %
5 | % Y = VL_NNSOFTMAX(X, C) applies the softmax operator followed by
6 | % the logistic loss the data X. X has dimension H x W x D x N,
7 | % packing N arrays of W x H D-dimensional vectors.
8 | %
9 | % C contains the class labels, which should be integers in the range
10 | % 1 to D. C can be an array with either N elements or with dimensions
11 | % H x W x 1 x N dimensions. In the fist case, a given class label is
12 | % applied at all spatial locations; in the second case, different
13 | % class labels can be specified for different locations.
14 | %
15 | % DZDX = VL_NNSOFTMAXLOSS(X, C, DZDY) computes the derivative of the
16 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
17 | % as X and Y respectively.
18 |
19 | % Copyright (C) 2014-15 Andrea Vedaldi.
20 | % All rights reserved.
21 | %
22 | % This file is part of the VLFeat library and is made available under
23 | % the terms of the BSD license (see the COPYING file).
24 |
25 | %X = X + 1e-6 ;
26 | sz = [size(X,1) size(X,2) size(X,3) size(X,4)] ;
27 |
28 | if numel(c) == sz(4)
29 | % one label per image
30 | c = reshape(c, [1 1 1 sz(4)]) ;
31 | end
32 | if size(c,1) == 1 & size(c,2) == 1
33 | c = repmat(c, [sz(1) sz(2)]) ;
34 | end
35 |
36 | % one label per spatial location
37 | sz_ = [size(c,1) size(c,2) size(c,3) size(c,4)] ;
38 | assert(isequal(sz_, [sz(1) sz(2) sz_(3) sz(4)])) ;
39 | assert(sz_(3)==1 | sz_(3)==2) ;
40 |
41 | % class c = 0 skips a spatial location
42 | mass = cast(c(:,:,1,:) > 0, 'like', c) ;
43 | if sz_(3) == 2
44 | % the second channel of c (if present) is used as weights
45 | mass = mass .* c(:,:,2,:) ;
46 | c(:,:,2,:) = [] ;
47 | end
48 |
49 | % convert to indexes
50 | c = c - 1 ;
51 | c_ = 0:numel(c)-1 ;
52 | c_ = 1 + ...
53 | mod(c_, sz(1)*sz(2)) + ...
54 | (sz(1)*sz(2)) * max(c(:), 0)' + ...
55 | (sz(1)*sz(2)*sz(3)) * floor(c_/(sz(1)*sz(2))) ;
56 |
57 | % compute softmaxloss
58 | Xmax = max(X,[],3) ;
59 | ex = exp(bsxfun(@minus, X, Xmax)) ;
60 |
61 | %n = sz(1)*sz(2) ;
62 | if nargin <= 2
63 | t = Xmax + log(sum(ex,3)) - reshape(X(c_), [sz(1:2) 1 sz(4)]) ;
64 | Y = sum(sum(sum(mass .* t,1),2),4) ;
65 | else
66 | Y = bsxfun(@rdivide, ex, sum(ex,3)) ;
67 | Y(c_) = Y(c_) - 1;
68 | Y = bsxfun(@times, Y, bsxfun(@times, mass, dzdy)) ;
69 | end
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep Correlations for Texture Synthesis
2 |
3 |
4 |
5 |
6 |
7 | ### [[Code]](https://github.com/omrysendik/DCor/) [[Project page]](https://www.omrysendik.com/texturesynth2017/) [[Paper]](https://docs.wixstatic.com/ugd/b1fe6d_f4f1684f6ba647ffbf1148c3721fdfc4.pdf) [[Results]](https://www.omrysendik.com/texturesynth2017results)
8 |
9 | Matlab implementation for **Deep Correlations for Texture Synthesis**.
10 |
11 | This package includes our implementation for **Deep Correlations for Texture Synthesis**.
12 | We did our best to make it a stande-alone package, meaning that we included ALL of the dependencies into this repository.
13 | The code was written by [Omry Sendik](https://www.omrysendik.com).
14 |
15 | ## Prerequisites
16 | - Linux (Ubuntu 16.04)
17 | - Matlab R2015b
18 |
19 | ## Getting Started
20 | ### Installation
21 | - Clone this repository
22 | - Run synth.m while changing the 'First' flag to 1. This will compile everthing required and download the PreTrained CNN for you. After running for the first time successfully, you may change 'First' to 0.
23 |
24 | ### Running
25 | - Change the filename of the desired input texture in line 20 of Synth.m to reflect your desired input. For example:
26 | ```
27 | origSrcImg = imread(strrep('.\Data\Texture13.png','\',filesep));
28 | ```
29 | - Note that the choice of Hyper-Parameters as described in the paper can be tweaked through GetSynthParams.m
30 |
31 | - Now, patiently wait for the result to converge. If things are working properly, you should get a plot similar to this:
32 |
33 |
34 |
35 | - The results will be saved to `./Data/Output/`
36 |
37 | ## Dataset
38 | Download our dataset from this repository too [[data]](https://github.com/omrysendik/DCor/tree/master/Data)
39 |
40 |
41 | ## Citation
42 | If you use this code for your research, please cite our [paper](https://docs.wixstatic.com/ugd/b1fe6d_f4f1684f6ba647ffbf1148c3721fdfc4.pdf):
43 |
44 | ```
45 | @article{sendik2017deep,
46 | title={Deep correlations for texture synthesis},
47 | author={Sendik, Omry and Cohen-Or, Daniel},
48 | journal={ACM Transactions on Graphics (TOG)},
49 | volume={36},
50 | number={5},
51 | pages={161},
52 | year={2017},
53 | publisher={ACM}
54 | }
55 | ```
56 | ## Acknowledgments
57 | Code relies heavily on:
58 | [MatConvNet](http://www.vlfeat.org/matconvnet/)
59 | [L-BFGS-B](http://users.eecs.northwestern.edu/~nocedal/lbfgsb.html)
60 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/xtest/vl_testnn.m:
--------------------------------------------------------------------------------
1 | function vl_testnn(varargin)
2 | %VL_TESTNN Run MatConvNet test suite
3 | % VL_TESTNN('option', value, ...) takes the following options:
4 | % `cpu`:: true
5 | % Run the CPU tests.
6 | %
7 | % `gpu`:: false
8 | % Run the GPU tests.
9 | %
10 | % `single`:: true
11 | % Perform tests in single precision.
12 | %
13 | % `double`:: false
14 | % Perform tests in double precision.
15 | %
16 | % `command`:: 'nn'
17 | % Run only tests which name starts with the specified substring.
18 | % E.g. `vl_testnn('command', 'nnloss') would run only the nnloss tests.
19 | %
20 | % `break`:: false
21 | % Stop tests in case of error.
22 | %
23 | % `tapFile`:: ''
24 | % Output the test results to a file. If a specified file does
25 | % exist it is overwritten.
26 | %
27 | % This function uses the Matlab unit testing framework which was
28 | % introduced in Matlab R2013a (v8.1).
29 |
30 | % Copyright (C) 2015-16 Andrea Vedaldi, Karel Lenc.
31 | % All rights reserved.
32 | %
33 | % This file is part of the VLFeat library and is made available under
34 | % the terms of the BSD license (see the COPYING file).
35 |
36 | opts.cpu = true ;
37 | opts.gpu = false ;
38 | opts.single = true ;
39 | opts.double = false ;
40 | opts.command = 'nn' ;
41 | opts.break = false ;
42 | opts.tapFile = '';
43 | opts = vl_argparse(opts, varargin) ;
44 |
45 | import matlab.unittest.constraints.* ;
46 | import matlab.unittest.selectors.* ;
47 | import matlab.unittest.plugins.TAPPlugin;
48 | import matlab.unittest.plugins.ToFile;
49 |
50 | % Choose which tests to run
51 | sel = HasName(StartsWithSubstring(opts.command)) ;
52 | if opts.cpu & ~opts.gpu
53 | sel = sel & HasName(ContainsSubstring('cpu')) ;
54 | end
55 | if opts.gpu & ~opts.cpu
56 | sel = sel & HasName(ContainsSubstring('gpu')) ;
57 | end
58 | if opts.single & ~opts.double
59 | sel = sel & HasName(ContainsSubstring('single')) ;
60 | end
61 | if opts.double & ~opts.single
62 | sel = sel & HasName(ContainsSubstring('double')) ;
63 | end
64 |
65 | % Run tests
66 | root = fileparts(mfilename('fullpath')) ;
67 | suite = matlab.unittest.TestSuite.fromFolder(fullfile(root, 'suite'), sel) ;
68 | runner = matlab.unittest.TestRunner.withTextOutput('Verbosity',3);
69 | if opts.break
70 | runner.addPlugin(matlab.unittest.plugins.StopOnFailuresPlugin) ;
71 | end
72 | if ~isempty(opts.tapFile)
73 | if exist(opts.tapFile, 'file')
74 | delete(opts.tapFile);
75 | end
76 | runner.addPlugin(TAPPlugin.producingOriginalFormat(ToFile(opts.tapFile)));
77 | end
78 | result = runner.run(suite);
79 | display(result)
80 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/bnorm.hpp:
--------------------------------------------------------------------------------
1 | // @file bnorm.hpp
2 | // @brief Batch Normalization block implementation
3 | // @author Sebastien Ehrhardt
4 |
5 | /*
6 | Copyright (C) 2015-16 Sebastien Ehrhardt.
7 | All rights reserved.
8 |
9 | This file is part of the VLFeat library and is made available under
10 | the terms of the BSD license (see the COPYING file).
11 | */
12 |
13 | #ifndef __vl__bnorm__
14 | #define __vl__bnorm__
15 |
16 | #include "../data.hpp"
17 | #include
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct bnorm
23 | {
24 | static vl::Error
25 | forward(Context& context,
26 | type* output,
27 | type* moments, // can be null and it will be allocated size_ternally
28 | type const* data,
29 | type const* multipliers,
30 | type const* biases,
31 | size_t height, size_t width, size_t depth, size_t size,
32 | type epsilon) ;
33 |
34 | static vl::Error
35 | forward_given_moments(Context& context,
36 | type* output,
37 | type const* moments,
38 | type const* data,
39 | type const* multipliers,
40 | type const* biases,
41 | size_t height, size_t width, size_t depth, size_t size) ;
42 |
43 | static vl::Error
44 | backward(Context& context,
45 | type* derData,
46 | type* derMultipliers,
47 | type* derBiases,
48 | type* moments, // can be null and it will be allocated size_ternally
49 | type const* data,
50 | type const* multipliers,
51 | type const* biases,
52 | type const* derOutput,
53 | size_t height, size_t width, size_t depth, size_t size,
54 | type epsilon) ;
55 |
56 | static vl::Error
57 | backward_given_moments(Context& context,
58 | type* derData,
59 | type* derMultipliers,
60 | type* derBiases,
61 | type const* moments,
62 | type const* data,
63 | type const* multipliers,
64 | type const* biases,
65 | type const* derOutput,
66 | size_t height, size_t width, size_t depth, size_t size,
67 | type epsilon) ;
68 | } ;
69 |
70 | } }
71 | #endif /* __vl__bnorm__ */
72 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnbilinearsampler.m:
--------------------------------------------------------------------------------
1 | %VL_NNBILIEARSAMPLER CNN spatial bilinear resampling
2 | % Y = VL_NNBILINEARSAMPLER(X,GRID) resamples image X at the spatial
3 | % locations specified by GRID using bilinear interpolation.
4 | %
5 | % X is a array of dimension H x W x C x N, where (H,W) are the
6 | % height and width of the image, C is the number of feature
7 | % channels, and N is the number of images in the batch.
8 | %
9 | % GRID is an array of dimension 2 x Ho x Wo x No, where (Ho,Wo) are
10 | % the height and width of the output image and No the number of
11 | % output images in the output batch Y. The output array Y has
12 | % dimensions Ho x Wo x C x No. The same resampling grid is used for
13 | % all input feature channels, but each output image in the batchY
14 | % uses its own grid.
15 | %
16 | % For output image n, GRID(1,:,:,n) specifies the vertical location
17 | % v of a sample in the input image X and GRID(2,:,:,n) the
18 | % horizontal location u. The convention follows standard
19 | % impelemntations of this operator in the literature. Namely:
20 | %
21 | % 1. The grid coordinates are normalized in the range [-1,1]. This
22 | % means that (-1,-1) is the center of the upper-left pixel in the
23 | % input image and (+1,+1) the center of the bottom-right pixel.
24 | %
25 | % 2. The V,U coordiante planes are stacked in the fisrt dimension of
26 | % GRID instead of in the third, as it would be more natural in
27 | % MatConvNet (as these could be interpreted as 'channels' in
28 | % GRID).
29 | %
30 | % Furthre, Ng can be a multiple of N; in this case, it is assumed
31 | % that there are Ng/Ni transforms per input image, hence, the
32 | % transforms [1 ... Ng/Ni] are applied to the first image, [Ng/Ni+1
33 | % ... 2*Ng/Ni] are applied to the second image, etc.
34 | %
35 | % [DX, DGRID] = VL_NNBILINEARSAMPLER(X, GRID, DY) computes the
36 | % derivatives of the block projected onto DY. DX, DGRID, DY have the
37 | % same dimensions as X, GRID and Y, respectively.
38 | %
39 | % ## CUDNN SUPPORT
40 | %
41 | % If compiled in, the function will use cuDNN's
42 | % implementation. Note, cuDNN v5 or higher is required.
43 | % You can use the 'NoCudnn' option to disable
44 | % cuDNN or 'CuDNN' to activate it back again (the
45 | % choice sticks until MATLAB purges the MEX files for any reason).
46 |
47 | % Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi.
48 | % All rights reserved.
49 | %
50 | % This file is part of the VLFeat library and is made available under
51 | % the terms of the BSD license (see the COPYING file).
52 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/vl_nnpool.m:
--------------------------------------------------------------------------------
1 | %VL_NNPOOL CNN poolinng.
2 | % Y = VL_NNPOOL(X, POOL) applies the pooling operator to all
3 | % channels of the data X using a square filter of size POOL. X is a
4 | % SINGLE array of dimension H x W x D x N where (H,W) are the
5 | % height and width of the map stack, D is the image depth (number
6 | % of feature channels) and N the number of of images in the stack.
7 | %
8 | % Y = VL_NNPOOL(X, [POOLY, POOLX]) uses a rectangular filter of
9 | % height POOLY and width POOLX.
10 | %
11 | % DZDX = VL_NNPOOL(X, POOL, DZDY) computes the derivatives of the
12 | % block projected onto DZDY. DZDX and DZDY have the same dimensions
13 | % as X and Y respectively.
14 | %
15 | % VL_NNCONV(..., 'option', value, ...) takes the following options:
16 | %
17 | % `Stride`:: 1
18 | % The output stride (downsampling factor). It can be either a
19 | % scalar for isotropic downsampling or a vector [STRIDEY
20 | % STRIDEX].
21 | %
22 | % `Pad`:: 0
23 | % The amount of input padding. Input images are padded with zeros
24 | % by this number of pixels on all sides before the convolution is
25 | % computed. It can also be a vector [TOP BOTTOM LEFT RIGHT] to
26 | % specify a different amount of padding in each direction. The
27 | % size of the poolin filter has to exceed the padding.
28 | %
29 | % `Method`:: 'max'
30 | % Specify method of pooling. It can be either 'max' (retain max value
31 | % over the pooling region per channel) or 'avg' (compute the average
32 | % value over the poolling region per channel).
33 | %
34 | % The pooling window must be not larger than the padded image, i.e.
35 | %
36 | % 1 <= POOLY <= HEIGHT + (PADTOP + PADBOTTOM),
37 | % 1 <= POOLX <= WIDTH + (PADLEFT + PADRIGHT).
38 | %
39 | % The output a is a SINGLE array of dimension YH x YW x K x N of N
40 | % images with K challens and size:
41 | %
42 | % YH = floor((H + (PADTOP+PADBOTTOM) - POOLY)/STRIDEY) + 1,
43 | % YW = floor((W + (PADLEFT+PADRIGHT) - POOLX)/STRIDEX) + 1.
44 | %
45 | % The derivative DZDY has the same dimension of the output Y and
46 | % the derivative DZDX has the same dimension as the input X.
47 | %
48 | % ## CUDNN SUPPORT
49 | %
50 | % If compiled in, the function will use cuDNN convolution routines
51 | % (with the exception of asymmetric left-right or top-bottom
52 | % padding and avergage pooling that triggers a bug in cuDNN). You
53 | % can use the 'NoCuDNN' option to disable cuDNN or 'cuDNN' to
54 | % activate it back again (the choice sticks until MATLAB purges the
55 | % MEX files for any reason).
56 |
57 | % Copyright (C) 2014 Andrea Vedaldi, Karel Lenc, and Max Jaderberg.
58 | % Copyright (C) 2015 Andrea Vedaldi and Karel Lenc.
59 | % All rights reserved.
60 | %
61 | % This file is part of the VLFeat library and is made available under
62 | % the terms of the BSD license (see the COPYING file).
63 |
64 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/+dagnn/ConvTranspose.m:
--------------------------------------------------------------------------------
1 | classdef ConvTranspose < dagnn.Layer
2 | properties
3 | size = [0 0 0 0]
4 | hasBias = true
5 | upsample = [1 1]
6 | crop = [0 0 0 0]
7 | numGroups = 1
8 | opts = {'cuDNN'}
9 | end
10 |
11 | methods
12 | function outputs = forward(obj, inputs, params)
13 | if ~obj.hasBias, params{2} = [] ; end
14 | outputs{1} = vl_nnconvt(...
15 | inputs{1}, params{1}, params{2}, ...
16 | 'upsample', obj.upsample, ...
17 | 'crop', obj.crop, ...
18 | 'numGroups', obj.numGroups, ...
19 | obj.opts{:}) ;
20 | end
21 |
22 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs)
23 | if ~obj.hasBias, params{2} = [] ; end
24 | [derInputs{1}, derParams{1}, derParams{2}] = vl_nnconvt(...
25 | inputs{1}, params{1}, params{2}, derOutputs{1}, ...
26 | 'upsample', obj.upsample, ...
27 | 'crop', obj.crop, ...
28 | 'numGroups', obj.numGroups, ...
29 | obj.opts{:}) ;
30 | end
31 |
32 | function outputSizes = getOutputSizes(obj, inputSizes)
33 | outputSizes{1} = [...
34 | obj.upsample(1) * (inputSizes{1}(1) - 1) + obj.size(1) - obj.crop(1) - obj.crop(2), ...
35 | obj.upsample(2) * (inputSizes{1}(2) - 1) + obj.size(2) - obj.crop(3) - obj.crop(4), ...
36 | obj.size(4), ...
37 | inputSizes{1}(4)] ;
38 | end
39 |
40 | function rfs = getReceptiveFields(obj)
41 | rfs.size = (obj.size(1:2) - 1) ./ obj.upsample + 1 ;
42 | rfs.stride = 1 ./ [obj.upsample] ;
43 | rfs.offset = (2*obj.crop([1 3]) - obj.size(1:2) + 1) ...
44 | ./ (2*obj.upsample) + 1 ;
45 | end
46 |
47 | function params = initParams(obj)
48 | % todo: test this initialization method
49 | sc = sqrt(2 / prod(obj.size([1 2 4]))) ;
50 | params{1} = randn(obj.size,'single') * sc ;
51 | if obj.hasBias
52 | params{2} = zeros(obj.size(3),1,'single') * sc ;
53 | end
54 | end
55 |
56 | function set.size(obj, ksize)
57 | % make sure that ksize has 4 dimensions
58 | ksize = [ksize(:)' 1 1 1 1] ;
59 | obj.size = ksize(1:4) ;
60 | end
61 |
62 | function set.crop(obj, crop)
63 | if numel(crop) == 1
64 | obj.crop = [crop crop crop crop] ;
65 | elseif numel(crop) == 2
66 | obj.crop = crop([1 1 2 2]) ;
67 | else
68 | obj.crop = crop ;
69 | end
70 | end
71 |
72 | function set.upsample(obj, upsample)
73 | if numel(upsample) == 1
74 | obj.upsample = [upsample upsample] ;
75 | else
76 | obj.upsample = upsample ;
77 | end
78 | end
79 |
80 | function obj = ConvTranspose(varargin)
81 | obj.load(varargin) ;
82 | % normalize field by implicitly calling setters defined in
83 | % dagnn.Filter and here
84 | obj.size = obj.size ;
85 | obj.upsample = obj.upsample ;
86 | obj.crop = obj.crop ;
87 | end
88 | end
89 | end
90 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/config/mex_CUDA_glnxa64.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
11 |
26 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matlab/src/bits/impl/subsample_cpu.cpp:
--------------------------------------------------------------------------------
1 | // @file subsampling_cpu.cpp
2 | // @brief Subsampling block implementation (CPU)
3 | // @author Andrea Vedaldi
4 | // @author Karel Lenc
5 |
6 | /*
7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
8 | All rights reserved.
9 |
10 | This file is part of the VLFeat library and is made available under
11 | the terms of the BSD license (see the COPYING file).
12 | */
13 |
14 | #include "subsample.hpp"
15 | #include
16 | #include
17 |
18 |
19 | namespace vl { namespace impl {
20 |
21 | template
22 | struct subsample
23 | {
24 |
25 | static vl::Error
26 | forward(vl::Context& context,
27 | type* output,
28 | type const* data,
29 | size_t height, size_t width, size_t depth,
30 | size_t strideY, size_t strideX,
31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
32 | {
33 | int outputWidth = (width + (padLeft + padRight) - 1)/strideX + 1 ;
34 | int outputHeight = (height + (padTop + padBottom) - 1)/strideY + 1 ;
35 | for (int z = 0; z < depth; ++z) {
36 | for (int x = 0; x < outputWidth; ++x) {
37 | for (int y = 0; y < outputHeight; ++y) {
38 | int x1 = x * (signed)strideX - (signed)padLeft ;
39 | int y1 = y * (signed)strideY - (signed)padTop ;
40 | type value = 0 ;
41 | if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) {
42 | value = data[x1 * height + y1] ;
43 | }
44 | output[x * outputHeight + y] = value ;
45 | }
46 | }
47 | data += width*height ;
48 | output += outputWidth*outputHeight ;
49 | }
50 | return vlSuccess ;
51 | }
52 |
53 | static vl::Error
54 | backward(vl::Context& context,
55 | type* derData,
56 | type const* derOutput,
57 | size_t height, size_t width, size_t depth,
58 | size_t strideY, size_t strideX,
59 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight)
60 | {
61 | int outputWidth = (width + (padLeft + padRight) - 1)/strideX + 1 ;
62 | int outputHeight = (height + (padTop + padBottom) - 1)/strideY + 1 ;
63 |
64 | memset(derData, 0, sizeof(type) * width * height * depth) ;
65 |
66 | for (int z = 0; z < depth; ++z) {
67 | for (int px = 0; px < outputWidth; ++px) {
68 | for (int py = 0; py < outputHeight; ++py) {
69 | int x1 = px * (int)strideX - (int)padLeft ;
70 | int y1 = py * (int)strideY - (int)padTop ;
71 | if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) {
72 | derData[x1 * height + y1] = derOutput[px * outputHeight + py] ;
73 | }
74 | }
75 | }
76 | derData += width*height ;
77 | derOutput += outputWidth*outputHeight ;
78 | }
79 | return vlSuccess ;
80 | }
81 | } ;
82 |
83 | } }
84 |
85 | // Instantiations
86 | template struct vl::impl::subsample ;
87 |
88 | #ifdef ENABLE_DOUBLE
89 | template struct vl::impl::subsample ;
90 | #endif
91 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv CPU.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv GPU.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/matconvnet-1.0-beta20/matconvnet.xcodeproj/xcshareddata/xcschemes/matconv cuDNN.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
32 |
33 |
34 |
35 |
45 |
46 |
52 |
53 |
54 |
55 |
56 |
57 |
63 |
64 |
70 |
71 |
72 |
73 |
75 |
76 |
79 |
80 |
81 |
--------------------------------------------------------------------------------