├── .gitattributes ├── README.md └── matconvnet-1.0-beta24 ├── .gitattributes ├── .gitignore ├── .gitmodules ├── CONTRIBUTING.md ├── COPYING ├── Makefile ├── README.md ├── doc ├── Makefile ├── blocks.tex ├── figures │ ├── imnet.pdf │ ├── pepper.pdf │ └── svg │ │ ├── conv.svg │ │ ├── convt.svg │ │ ├── matconvnet-blue.svg │ │ └── matconvnet-white.svg ├── fundamentals.tex ├── geometry.tex ├── impl.tex ├── intro.tex ├── matconvnet-manual.tex ├── matdoc.py ├── matdocparser.py ├── references.bib ├── site │ ├── docs │ │ ├── about.md │ │ ├── css │ │ │ └── fixes.css │ │ ├── developers.md │ │ ├── faq.md │ │ ├── figures │ │ │ ├── stn-perf.png │ │ │ └── stn-samples.png │ │ ├── functions.md │ │ ├── gpu.md │ │ ├── index.md │ │ ├── install-alt.md │ │ ├── install.md │ │ ├── js │ │ │ ├── mathjaxhelper.js │ │ │ └── toggle.js │ │ ├── pretrained.md │ │ ├── quick.md │ │ ├── spatial-transformer.md │ │ ├── training.md │ │ └── wrappers.md │ ├── mkdocs.yml │ └── theme │ │ ├── base.html │ │ ├── content.html │ │ ├── css │ │ └── base.css │ │ ├── js │ │ └── base.js │ │ ├── matconvnet-blue.svg │ │ ├── nav.html │ │ └── toc.html └── wrappers.tex ├── examples ├── +solver │ ├── adadelta.m │ ├── adagrad.m │ ├── adam.m │ └── rmsprop.m ├── cifar │ ├── cnn_cifar.m │ ├── cnn_cifar_init.m │ └── cnn_cifar_init_nin.m ├── cnn_train.m ├── cnn_train_dag.m ├── custom_imdb │ ├── cnn_toy_data.m │ └── cnn_toy_data_generator.m ├── fast_rcnn │ ├── +dagnn │ │ └── LossSmoothL1.m │ ├── 000004.jpg │ ├── 000004_boxes.mat │ ├── README.md │ ├── bbox_functions │ │ ├── bbox_clip.m │ │ ├── bbox_draw.m │ │ ├── bbox_nms.m │ │ ├── bbox_overlap.m │ │ ├── bbox_remove_duplicates.m │ │ ├── bbox_scale.m │ │ ├── bbox_transform.m │ │ └── bbox_transform_inv.m │ ├── datasets │ │ ├── add_bboxreg_targets.m │ │ ├── attach_proposals.m │ │ ├── cnn_setup_data_voc07.m │ │ └── cnn_setup_data_voc07_ssw.m │ ├── fast_rcnn_demo.m │ ├── fast_rcnn_eval_get_batch.m │ ├── fast_rcnn_evaluate.m │ ├── fast_rcnn_init.m │ ├── fast_rcnn_train.m │ └── fast_rcnn_train_get_batch.m ├── imagenet │ ├── cnn_imagenet.m │ ├── cnn_imagenet_camdemo.m │ ├── cnn_imagenet_deploy.m │ ├── cnn_imagenet_evaluate.m │ ├── cnn_imagenet_googlenet.m │ ├── cnn_imagenet_init.m │ ├── cnn_imagenet_init_inception.m │ ├── cnn_imagenet_init_resnet.m │ ├── cnn_imagenet_minimal.m │ ├── cnn_imagenet_setup_data.m │ ├── cnn_imagenet_sync_labels.m │ ├── getImageBatch.m │ └── getImageStats.m ├── k-space-deep-learning │ ├── add_block_multi_img.m │ ├── cnn_cartesian.m │ ├── cnn_residual_k_space_deep_learning_w_weight_init.m │ ├── cnn_train_dag_cartesian.m │ ├── data │ │ ├── imdb_cartesian_1coil.mat │ │ ├── imdb_cartesian_8coil.mat │ │ ├── imdb_fig7.mat │ │ ├── imdb_fig9.mat │ │ └── smp_mask.mat │ ├── demo_cost_fig6.m │ ├── demo_test_fig7_fig9.m │ ├── demo_training.m │ ├── findLastCheckpoint.m │ ├── getBatchPatchVal.m │ ├── getReconPatchVal.m │ ├── install.m │ ├── loadState.m │ ├── make_box.m │ ├── matlab │ │ ├── +dagnn │ │ │ ├── EuclideanLoss.m │ │ │ ├── FFT.m │ │ │ ├── IFFT.m │ │ │ ├── SubPixel.m │ │ │ ├── SubPixelT.m │ │ │ ├── UnPooling.m │ │ │ ├── UnWeighting.m │ │ │ └── Weighting.m │ │ ├── comp2ri.m │ │ ├── k2wgt.m │ │ ├── ri2comp.m │ │ ├── vl_euclideanloss.m │ │ ├── vl_nnfft.m │ │ ├── vl_nnifft.m │ │ └── wgt2k.m │ ├── nmse.m │ ├── recon_cnn4img.m │ ├── ssos.m │ ├── support_mask.m │ └── weight_mask.m ├── mnist │ ├── cnn_mnist.m │ ├── cnn_mnist_experiments.m │ └── cnn_mnist_init.m ├── spatial_transformer │ ├── cnn_stn_cluttered_mnist.m │ ├── cnn_stn_cluttered_mnist_init.m │ └── readme.txt └── vggfaces │ └── cnn_vgg_faces.m ├── matconvnet.sln ├── matconvnet.vcxproj ├── matconvnet.vcxproj.filters ├── matconvnet.xcodeproj ├── project.pbxproj ├── project.xcworkspace │ └── contents.xcworkspacedata └── xcshareddata │ └── xcschemes │ ├── matconv CPU.xcscheme │ ├── matconv GPU.xcscheme │ └── matconv cuDNN.xcscheme ├── matlab ├── +dagnn │ ├── @DagNN │ │ ├── DagNN.m │ │ ├── addLayer.m │ │ ├── eval.m │ │ ├── fromSimpleNN.m │ │ ├── getVarReceptiveFields.m │ │ ├── getVarSizes.m │ │ ├── initParams.m │ │ ├── loadobj.m │ │ ├── move.m │ │ ├── print.m │ │ ├── rebuild.m │ │ ├── removeLayer.m │ │ ├── renameLayer.m │ │ ├── renameParam.m │ │ ├── renameVar.m │ │ ├── reset.m │ │ ├── saveobj.m │ │ ├── setLayerInputs.m │ │ ├── setLayerOutputs.m │ │ └── setLayerParams.m │ ├── AffineGridGenerator.m │ ├── BatchNorm.m │ ├── BilinearSampler.m │ ├── Concat.m │ ├── Conv.m │ ├── ConvTranspose.m │ ├── Crop.m │ ├── DropOut.m │ ├── ElementWise.m │ ├── Error.m │ ├── EuclideanLoss.m │ ├── FFT.m │ ├── Filter.m │ ├── IFFT.m │ ├── LRN.m │ ├── Layer.m │ ├── Loss.m │ ├── NormOffset.m │ ├── PDist.m │ ├── Pooling.m │ ├── ROIPooling.m │ ├── ReLU.m │ ├── Scale.m │ ├── Sigmoid.m │ ├── SoftMax.m │ ├── SpatialNorm.m │ ├── SubPixel.m │ ├── SubPixelT.m │ ├── Sum.m │ ├── UnPooling.m │ ├── UnWeighting.m │ ├── UniformScalingGridGenerator.m │ └── Weighting.m ├── ParameterServer.m ├── comp2ri.m ├── compatibility │ └── parallel │ │ ├── gather.m │ │ ├── labindex.m │ │ └── numlabs.m ├── k2wgt.m ├── ri2comp.m ├── simplenn │ ├── vl_simplenn.m │ ├── vl_simplenn_diagnose.m │ ├── vl_simplenn_display.m │ ├── vl_simplenn_move.m │ ├── vl_simplenn_start_parserv.m │ └── vl_simplenn_tidy.m ├── src │ ├── bits │ │ ├── data.cpp │ │ ├── data.cu │ │ ├── data.hpp │ │ ├── datacu.cu │ │ ├── datacu.hpp │ │ ├── datamex.cpp │ │ ├── datamex.cu │ │ ├── datamex.hpp │ │ ├── impl │ │ │ ├── bilinearsampler.hpp │ │ │ ├── bilinearsampler_cpu.cpp │ │ │ ├── bilinearsampler_gpu.cu │ │ │ ├── blashelper.hpp │ │ │ ├── bnorm.hpp │ │ │ ├── bnorm_cpu.cpp │ │ │ ├── bnorm_gpu.cu │ │ │ ├── compat.h │ │ │ ├── copy.hpp │ │ │ ├── copy_cpu.cpp │ │ │ ├── copy_gpu.cu │ │ │ ├── cudnnhelper.hpp │ │ │ ├── fast_mutex.h │ │ │ ├── im2row.hpp │ │ │ ├── im2row_cpu.cpp │ │ │ ├── im2row_gpu.cu │ │ │ ├── imread_gdiplus.cpp │ │ │ ├── imread_helpers.hpp │ │ │ ├── imread_libjpeg.cpp │ │ │ ├── imread_quartz.cpp │ │ │ ├── nnbias_blas.hpp │ │ │ ├── nnbias_cudnn.cu │ │ │ ├── nnbias_cudnn.hpp │ │ │ ├── nnbilinearsampler_cudnn.cu │ │ │ ├── nnbilinearsampler_cudnn.hpp │ │ │ ├── nnbnorm_cudnn.cu │ │ │ ├── nnbnorm_cudnn.hpp │ │ │ ├── nnconv_blas.hpp │ │ │ ├── nnconv_cudnn.cu │ │ │ ├── nnconv_cudnn.hpp │ │ │ ├── nnpooling_cudnn.cu │ │ │ ├── nnpooling_cudnn.hpp │ │ │ ├── normalize.hpp │ │ │ ├── normalize_cpu.cpp │ │ │ ├── normalize_gpu.cu │ │ │ ├── pooling.hpp │ │ │ ├── pooling_cpu.cpp │ │ │ ├── pooling_gpu.cu │ │ │ ├── roipooling.hpp │ │ │ ├── roipooling_cpu.cpp │ │ │ ├── roipooling_gpu.cu │ │ │ ├── sharedmem.cuh │ │ │ ├── subsample.hpp │ │ │ ├── subsample_cpu.cpp │ │ │ ├── subsample_gpu.cu │ │ │ ├── tinythread.cpp │ │ │ └── tinythread.h │ │ ├── imread.cpp │ │ ├── imread.hpp │ │ ├── mexutils.h │ │ ├── nnbias.cpp │ │ ├── nnbias.cu │ │ ├── nnbias.hpp │ │ ├── nnbilinearsampler.cpp │ │ ├── nnbilinearsampler.cu │ │ ├── nnbilinearsampler.hpp │ │ ├── nnbnorm.cpp │ │ ├── nnbnorm.cu │ │ ├── nnbnorm.hpp │ │ ├── nnconv.cpp │ │ ├── nnconv.cu │ │ ├── nnconv.hpp │ │ ├── nnfullyconnected.cpp │ │ ├── nnfullyconnected.cu │ │ ├── nnfullyconnected.hpp │ │ ├── nnnormalize.cpp │ │ ├── nnnormalize.cu │ │ ├── nnnormalize.hpp │ │ ├── nnpooling.cpp │ │ ├── nnpooling.cu │ │ ├── nnpooling.hpp │ │ ├── nnroipooling.cpp │ │ ├── nnroipooling.cu │ │ ├── nnroipooling.hpp │ │ ├── nnsubsample.cpp │ │ ├── nnsubsample.cu │ │ └── nnsubsample.hpp │ ├── config │ │ ├── mex_CUDA_glnxa64.sh │ │ ├── mex_CUDA_glnxa64.xml │ │ ├── mex_CUDA_maci64.sh │ │ └── mex_CUDA_maci64.xml │ ├── vl_cudatool.cpp │ ├── vl_cudatool.cu │ ├── vl_imreadjpeg.cpp │ ├── vl_imreadjpeg.cu │ ├── vl_imreadjpeg_old.cpp │ ├── vl_imreadjpeg_old.cu │ ├── vl_nnbilinearsampler.cpp │ ├── vl_nnbilinearsampler.cu │ ├── vl_nnbnorm.cpp │ ├── vl_nnbnorm.cu │ ├── vl_nnconv.cpp │ ├── vl_nnconv.cu │ ├── vl_nnconvt.cpp │ ├── vl_nnconvt.cu │ ├── vl_nnnormalize.cpp │ ├── vl_nnnormalize.cu │ ├── vl_nnpool.cpp │ ├── vl_nnpool.cu │ ├── vl_nnroipool.cpp │ ├── vl_nnroipool.cu │ ├── vl_taccummex.cpp │ ├── vl_taccummex.cu │ ├── vl_tmove.cpp │ └── vl_tmove.cu ├── vl_argparse.m ├── vl_compilenn.m ├── vl_euclideanloss.m ├── vl_imreadjpeg.m ├── vl_nnbilinearsampler.m ├── vl_nnbnorm.m ├── vl_nnconcat.m ├── vl_nnconv.m ├── vl_nnconvt.m ├── vl_nncrop.m ├── vl_nndropout.m ├── vl_nnfft.m ├── vl_nnifft.m ├── vl_nnloss.m ├── vl_nnnoffset.m ├── vl_nnnormalize.m ├── vl_nnnormalizelp.m ├── vl_nnpdist.m ├── vl_nnpool.m ├── vl_nnrelu.m ├── vl_nnroipool.m ├── vl_nnsigmoid.m ├── vl_nnsoftmax.m ├── vl_nnsoftmaxloss.m ├── vl_nnspnorm.m ├── vl_rootnn.m ├── vl_setupnn.m ├── vl_taccum.m ├── vl_tmove.m ├── vl_tshow.m ├── wgt2k.m └── xtest │ ├── cmyk.jpg │ ├── suite │ ├── Scale.m │ ├── nnbilinearsampler.m │ ├── nnbnorm.m │ ├── nnconcat.m │ ├── nnconv.m │ ├── nnconvt.m │ ├── nndagnn.m │ ├── nndropout.m │ ├── nnloss.m │ ├── nnmnist.m │ ├── nnnormalize.m │ ├── nnnormalizelp.m │ ├── nnoffset.m │ ├── nnpdist.m │ ├── nnpool.m │ ├── nnrelu.m │ ├── nnroipool.m │ ├── nnsigmoid.m │ ├── nnsimplenn.m │ ├── nnsoftmax.m │ ├── nnsoftmaxloss.m │ ├── nnsolvers.m │ ├── nnspnorm.m │ ├── nntest.m │ └── tmovemex.m │ ├── vl_bench_bnorm.m │ ├── vl_bench_imreadjpeg.m │ ├── vl_nnbnorm_old.m │ ├── vl_test_bnorm.m │ ├── vl_test_economic_relu.m │ ├── vl_test_gpureset.m │ ├── vl_test_imreadjpeg.m │ ├── vl_test_print.m │ └── vl_testnn.m └── utils ├── evaluate_ref_models.m ├── get-file.sh ├── import-caffe.py ├── import-fast-rcnn.sh ├── import-fcn.sh ├── import-googlenet.sh ├── import-ref-models.sh ├── import-resnet.sh ├── layers.py ├── model2dot.m ├── preprocess-imagenet.sh ├── proto ├── __init__.py ├── caffe.proto ├── caffe_0115.proto ├── caffe_0115_pb2.py ├── caffe_6e3916.proto ├── caffe_6e3916_pb2.py ├── caffe_b590f1d.proto ├── caffe_b590f1d_pb2.py ├── caffe_fastrcnn.proto ├── caffe_fastrcnn_pb2.py ├── caffe_old.proto ├── caffe_old_pb2.py ├── caffe_pb2.py ├── get-protos.sh ├── googlenet_prototxt_patch.diff ├── vgg_caffe.proto ├── vgg_caffe_pb2.py └── vgg_synset_words.txt ├── simplenn_caffe_compare.m ├── simplenn_caffe_deploy.m ├── simplenn_caffe_testdeploy.m ├── test_examples.m └── tidy_ref_models.m /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Paper 2 | =============== 3 | * k-Space Deep Learning for Accelerated MRI 4 | * Accepted by IEEE Transactions on Medical Imaging: [https://arxiv.org/abs/1805.03779] 5 | 6 | Implementation 7 | =============== 8 | * MatConvNet (matconvnet-1.0-beta24) 9 | * Please run the matconvnet-1.0-beta24/matlab/vl_compilenn.m file to compile matconvnet. 10 | * There is instruction on "http://www.vlfeat.org/matconvnet/mfiles/vl_compilenn/" 11 | * k-Space Deep Learning (matconvnet-1.0-beta24/examples/k-space-deep-learning) 12 | * Please run the matconvnet-1.0-beta24/examples/k-space-deep-learning/install.m 13 | * Install the customized library 14 | * Download the trained networks such as image-domain learning, and k-space deep learning 15 | 16 | Trained network 17 | =============== 18 | * Trained network for 'image-domain learing for 1 coil and 8 coils on Cartesian trajectory' is uploaded. 19 | * Trained network for 'k-space deep learning for 1 coil and 8 coils on Cartesian trajectory' is uploaded. 20 | 21 | Test data 22 | =============== 23 | * Iillustate the Fig. 6, 7, and 9 for k-Space Deep Learning fro Accelerated MRI 24 | * MR images from 'http://mridata.org/' are uploaded to train and test. 25 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | *.vcxproj text merge=union eol=crlf 3 | *.vcxproj.filters merge=union eol=crlf 4 | *.sln text merge=union eol=crlf 5 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitignore: -------------------------------------------------------------------------------- 1 | *.xcodeproj/*xcuserdata* 2 | *.xcodeproj/project.xcworkspace/*xcuserdata* 3 | *.xcodeproj/project.xcworkspace/xcshareddata/ 4 | mex/* 5 | mex 6 | *.o 7 | *.pyc 8 | *~ 9 | index.html 10 | matconvnet-*.tar.gz 11 | local 12 | 13 | # Documentation 14 | doc/figures/svg/*.pdf 15 | doc/figures/*.idraw 16 | doc/.texpadtmp/* 17 | doc/*.pdf 18 | doc/.build 19 | 20 | # Website 21 | doc/site/docs/mfiles 22 | doc/site/site 23 | doc/site/.build 24 | doc/site/theme/css/bootstrap.min.css 25 | doc/site/theme/css/bootstrap.min.css.map 26 | doc/site/theme/css/font-awesome.min.css 27 | doc/site/theme/fonts/fontawesome-webfont.eot 28 | doc/site/theme/fonts/fontawesome-webfont.svg 29 | doc/site/theme/fonts/fontawesome-webfont.ttf 30 | doc/site/theme/fonts/fontawesome-webfont.woff 31 | doc/site/theme/fonts/fontawesome-webfont.woff2 32 | doc/site/theme/js/bootstrap.min.js 33 | doc/site/theme/js/jquery.min.js 34 | doc/site/theme/js/jquery.min.map 35 | doc/site/theme/js/npm.js 36 | 37 | # Visual C 38 | *.suo 39 | *.user 40 | *.sdf 41 | *.opensdf 42 | doc/figures/svg/*.idraw 43 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanyoseob/k-space-deep-learning/e3d71b84ff3f2caaec4f4aabd223db800ad21104/matconvnet-1.0-beta24/.gitmodules -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | ## How to contribute to MatConvNet 4 | 5 | For a description of how the library is structured, take a look at the 6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on 7 | the MatConvNet website. 8 | 9 | ### Issues 10 | 11 | We are grateful for any reported issues which help to remove bugs and 12 | improve the overall quality of the library. In particular, you can use 13 | the issue tracker to: 14 | 15 | * report bugs and unexpected crashes 16 | * discuss library design decisions 17 | * request new features 18 | 19 | When reporting bugs, it really helps if you can provide the following: 20 | 21 | * Which steps are needed to reproduce the issue 22 | * MATLAB, compiler and CUDA version (where appropriate) 23 | 24 | Before opening an issue to report a bug, please make sure that the bug 25 | is reproducible on the latest version of the master branch. 26 | 27 | The most difficult bugs to remove are those which cause crashes of the 28 | core functions (e.g. CUDA errors etc.). In those cases, it is really 29 | useful to create a *minimal example* which is able to reproduce the 30 | issue. We know that this may mean a bit of work, but it helps us to 31 | remove the bug more quickly. 32 | 33 | ### Pull requests 34 | 35 | Please make any Pull Requests against the `devel` branch rather than 36 | the `master` branch which is maintained as the latest stable release 37 | of the library. 38 | 39 | As a general rule, it is much easier to accept small Pull Requests 40 | that make a single improvement to the library than complex code 41 | changes that affect multiple parts of the library. When submitting 42 | substantial changes, it is useful if unit tests are provided with the 43 | code. 44 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014-16 The MatConvNet Team. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms are permitted 5 | provided that the above copyright notice and this paragraph are 6 | duplicated in all such forms and that any documentation, advertising 7 | materials, and other materials related to such distribution and use 8 | acknowledge that the software was developed by the MatConvNet 9 | Team. The name of the MatConvNet Team may not be used to endorse or 10 | promote products derived from this software without specific prior 11 | written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT 12 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE 13 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 14 | PURPOSE. -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/README.md: -------------------------------------------------------------------------------- 1 | # MatConvNet: CNNs for MATLAB 2 | 3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural 4 | Networks* (CNNs) for computer vision applications. It is simple, 5 | efficient, and can run and learn state-of-the-art CNNs. Several 6 | example CNNs are included to classify and encode images. Please visit 7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more. 8 | 9 | In case of compilation issues, please read first the 10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and 11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub 12 | issue. For general inquiries regarding network design and training 13 | related questions, please use the 14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet). 15 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/figures/imnet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanyoseob/k-space-deep-learning/e3d71b84ff3f2caaec4f4aabd223db800ad21104/matconvnet-1.0-beta24/doc/figures/imnet.pdf -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/figures/pepper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanyoseob/k-space-deep-learning/e3d71b84ff3f2caaec4f4aabd223db800ad21104/matconvnet-1.0-beta24/doc/figures/pepper.pdf -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanyoseob/k-space-deep-learning/e3d71b84ff3f2caaec4f4aabd223db800ad21104/matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hanyoseob/k-space-deep-learning/e3d71b84ff3f2caaec4f4aabd223db800ad21104/matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/gpu.md: -------------------------------------------------------------------------------- 1 | # Using GPU acceleration 2 | 3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the 4 | [Parallel Computing Toolbox](http://www.mathworks.com/products/parallel-computing/). This 5 | toolbox requires CUDA-compatible cards, and you will need a copy of 6 | the corresponding 7 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to 8 | compile GPU support in MatConvNet (see 9 | [compiling](install#compiling)). 10 | 11 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox 12 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore, 13 | switching to use the GPU is as simple as converting the input CPU 14 | arrays in GPU arrays. 15 | 16 | In order to make the very best of powerful GPUs, it is important to 17 | balance the load between CPU and GPU in order to avoid starving the 18 | latter. In training on a problem like ImageNet, the CPU(s) in your 19 | system will be busy loading data from disk and streaming it to the GPU 20 | to evaluate the CNN and its derivative. MatConvNet includes the 21 | utility `vl_imreadjpeg` to accelerate and parallelize loading images 22 | into memory (this function is currently a bottleneck will be made more 23 | powerful in future releases). 24 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/js/mathjaxhelper.js: -------------------------------------------------------------------------------- 1 | /* 2 | #if false 3 | Prevent Unity to try compiling this js 4 | */ 5 | MathJax.Hub.Config({ 6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] } 7 | }); 8 | /* 9 | #endif 10 | */ -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/js/toggle.js: -------------------------------------------------------------------------------- 1 | function toggle_visibility(id) { 2 | var e = document.getElementById(id); 3 | if(e.style.display == 'block') 4 | e.style.display = 'none'; 5 | else 6 | e.style.display = 'block'; 7 | } 8 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/spatial-transformer.md: -------------------------------------------------------------------------------- 1 | # Spatial Transformer Networks 2 | 3 | This example demonstrates the use of a Spatial Transformer Network 4 | for classifying distorted MNIST digits in clutter. 5 | The source files used in this examples can be found in the 6 | `examples/spatial_transformer` directory. 7 | 8 | The spatial transformer network is defined in the `cnn_stn_cluttered_mnist.m` 9 | file. It has three components: (1) a localization network which 10 | predicts six affine transformation parameters for an input image, 11 | (2) a bilinear sampler which applies the above transformation 12 | to the input image, and (3) a classification network which classifies the 13 | output of the bilinear sampler. 14 | 15 | The picture below shows input images and their transformed versions as determined 16 | by the STN. Note how the STN has learned to rectify the input image. 17 | 18 | ![Transformations inferred by the Spatial Transformer Network for images from a cluttered MNIST dataset.](figures/stn-samples.png) 19 | 20 | The following graph compares the training and test errors of two CNNs: 21 | a STN and, a plain classification CNN (with the same configuration as the 22 | classification component of the STN). We note that the STN performs significantly 23 | better (STN test-error = 5.7%, CNN test-error = 14.2%). 24 | 25 | ![Classification error comparison between a STN and a CNN.](figures/stn-perf.png) 26 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/training.md: -------------------------------------------------------------------------------- 1 | ## Using MatConvNet to train convnets 2 | 3 | MatConvNet can be used to train models, typically by using a form of 4 | stochastic gradient descent (SGD) and back-propagation. 5 | 6 | The following learning demonstrators are provided in the MatConvNet 7 | package: 8 | 9 | - **MNIST**. See `examples/mnist/cnn_mnist.m`. 10 | - **CIFAR**. See `examples/cifar/cnn_cifar.m`. 11 | - **ImageNet**. See `examples/imagenet/cnn_imagenet.m`. 12 | 13 | These demos are self-contained; MNIST and CIFAR, in particular, 14 | automatically download and unpack the required data, so that they 15 | should work out-of-the-box. 16 | 17 | While MNIST and CIFAR are small datasets (by today's standard) and 18 | training is feasible on a CPU, ImageNet requires a powerful GPU to 19 | complete in a reasonable time (a few days!). It also requires the 20 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to 21 | accelerate reading large batches of JPEG images and avoid starving the 22 | GPU. 23 | 24 | All these demos use the `example/cnn_train.m` and 25 | `example/cnn_train_dag.m` SGD drivers, which are simple 26 | implementations of the standard SGD with momentum, done directly in 27 | MATLAB code. However, it should be easy to implement your own 28 | specialized or improved solver. 29 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/content.html: -------------------------------------------------------------------------------- 1 | {% if meta.source %} 2 | 7 | {% endif %} 8 | 9 | {{ content }} 10 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/js/base.js: -------------------------------------------------------------------------------- 1 | 2 | /* Highlight */ 3 | $( document ).ready(function() { 4 | hljs.initHighlightingOnLoad(); 5 | $('table').addClass('table table-striped table-hover'); 6 | }); 7 | 8 | 9 | $('body').scrollspy({ 10 | target: '.bs-sidebar', 11 | }); 12 | 13 | 14 | /* Prevent disabled links from causing a page reload */ 15 | $("li.disabled a").click(function() { 16 | event.preventDefault(); 17 | }); 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/toc.html: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/adadelta.m: -------------------------------------------------------------------------------- 1 | function [w, state] = adadelta(w, state, grad, opts, ~) 2 | %ADADELTA 3 | % Example AdaDelta solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % AdaDelta sets its own learning rate, so any learning rate set in the 6 | % options of CNN_TRAIN and CNN_TRAIN_DAG will be ignored. 7 | % 8 | % If called without any input argument, returns the default options 9 | % structure. 10 | % 11 | % Solver options: (opts.train.solverOpts) 12 | % 13 | % `epsilon`:: 1e-6 14 | % Small additive constant to regularize variance estimate. 15 | % 16 | % `rho`:: 0.9 17 | % Moving average window for variance update, between 0 and 1 (larger 18 | % values result in slower/more stable updating). 19 | 20 | % Copyright (C) 2016 Joao F. Henriques. 21 | % All rights reserved. 22 | % 23 | % This file is part of the VLFeat library and is made available under 24 | % the terms of the BSD license (see the COPYING file). 25 | 26 | if nargin == 0 % Return the default solver options 27 | w = struct('epsilon', 1e-6, 'rho', 0.9) ; 28 | return ; 29 | end 30 | 31 | if isequal(state, 0) % First iteration, initialize state struct 32 | state = struct('g_sqr', 0, 'delta_sqr', 0) ; 33 | end 34 | 35 | rho = opts.rho ; 36 | 37 | state.g_sqr = state.g_sqr * rho + grad.^2 * (1 - rho) ; 38 | new_delta = -sqrt((state.delta_sqr + opts.epsilon) ./ ... 39 | (state.g_sqr + opts.epsilon)) .* grad ; 40 | state.delta_sqr = state.delta_sqr * rho + new_delta.^2 * (1 - rho) ; 41 | 42 | w = w + new_delta ; 43 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/adagrad.m: -------------------------------------------------------------------------------- 1 | function [w, g_sqr] = adagrad(w, g_sqr, grad, opts, lr) 2 | %ADAGRAD 3 | % Example AdaGrad solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % Set the initial learning rate for AdaGrad in the options for 6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for 7 | % SGD may be inappropriate for AdaGrad; the default is 0.001. 8 | % 9 | % If called without any input argument, returns the default options 10 | % structure. 11 | % 12 | % Solver options: (opts.train.solverOpts) 13 | % 14 | % `epsilon`:: 1e-10 15 | % Small additive constant to regularize variance estimate. 16 | % 17 | % `rho`:: 1 18 | % Moving average window for variance update, between 0 and 1 (larger 19 | % values result in slower/more stable updating). This is similar to 20 | % RHO in AdaDelta and RMSProp. Standard AdaGrad is obtained with a RHO 21 | % value of 1 (use total average instead of a moving average). 22 | % 23 | % A possibly undesirable effect of standard AdaGrad is that the update 24 | % will monotonically decrease to 0, until training eventually stops. This 25 | % is because the AdaGrad update is inversely proportional to the total 26 | % variance of the gradients seen so far. 27 | % With RHO smaller than 1, a moving average is used instead. This 28 | % prevents the final update from monotonically decreasing to 0. 29 | 30 | % Copyright (C) 2016 Joao F. Henriques. 31 | % All rights reserved. 32 | % 33 | % This file is part of the VLFeat library and is made available under 34 | % the terms of the BSD license (see the COPYING file). 35 | 36 | if nargin == 0 % Return the default solver options 37 | w = struct('epsilon', 1e-10, 'rho', 1) ; 38 | return ; 39 | end 40 | 41 | g_sqr = g_sqr * opts.rho + grad.^2 ; 42 | 43 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ; 44 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/rmsprop.m: -------------------------------------------------------------------------------- 1 | function [w, g_sqr] = rmsprop(w, g_sqr, grad, opts, lr) 2 | %RMSPROP 3 | % Example RMSProp solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % Set the initial learning rate for RMSProp in the options for 6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for 7 | % SGD may be inappropriate for RMSProp; the default is 0.001. 8 | % 9 | % If called without any input argument, returns the default options 10 | % structure. 11 | % 12 | % Solver options: (opts.train.solverOpts) 13 | % 14 | % `epsilon`:: 1e-8 15 | % Small additive constant to regularize variance estimate. 16 | % 17 | % `rho`:: 0.99 18 | % Moving average window for variance update, between 0 and 1 (larger 19 | % values result in slower/more stable updating). 20 | 21 | % Copyright (C) 2016 Joao F. Henriques. 22 | % All rights reserved. 23 | % 24 | % This file is part of the VLFeat library and is made available under 25 | % the terms of the BSD license (see the COPYING file). 26 | 27 | if nargin == 0 % Return the default solver options 28 | w = struct('epsilon', 1e-8, 'rho', 0.99) ; 29 | return ; 30 | end 31 | 32 | g_sqr = g_sqr * opts.rho + grad.^2 * (1 - opts.rho) ; 33 | 34 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ; 35 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/custom_imdb/cnn_toy_data_generator.m: -------------------------------------------------------------------------------- 1 | function cnn_toy_data_generator(dataDir) 2 | %CNN_TOY_DATA_GENERATOR 3 | % Generates toy data in the given path: random image of triangles, 4 | % squares and circles. 5 | % 6 | % The directory format is: '//