├── README.md ├── cnn_CT_denoising.m ├── cnn_CT_denoising_aapm_challenge.m ├── cnn_CT_denoising_contourlet_init.m ├── cnn_CT_denoising_forward_process.m ├── cnn_train_modified.m ├── demo_test_aapm_challenge.m ├── demo_test_wavresnet.m ├── demo_test_wavresnet_rnn.m ├── lib_contourlet ├── cnn_wavelet_decon.m ├── cnn_wavelet_recon.m └── nsct_toolbox │ ├── README.txt │ ├── atrousc.c │ ├── atrousc.dll │ ├── atrousc.mexmac │ ├── atrousc.mexw64 │ ├── atrousdec.m │ ├── atrousfilters.m │ ├── atrousrec.m │ ├── decdemo.m │ ├── dfbdecdemo.m │ ├── dfilters.m │ ├── dmaxflat.m │ ├── efilter2.m │ ├── extend2.m │ ├── house.png │ ├── ld2quin.m │ ├── ldfilter.m │ ├── mctrans.m │ ├── modulate2.m │ ├── nsctdec.m │ ├── nsctrec.m │ ├── nsdfbdec.m │ ├── nsdfbrec.m │ ├── nsfbdec.m │ ├── nsfbrec.m │ ├── nssfbdec.m │ ├── nssfbrec.m │ ├── parafilters.m │ ├── qupz.m │ ├── resampz.m │ ├── shownsct.m │ ├── symext.m │ ├── upsample2df.m │ ├── wfilters.m │ ├── zconv2.c │ ├── zconv2.dll │ ├── zconv2.mexmac │ ├── zconv2.mexw64 │ ├── zconv2S.c │ ├── zconv2S.dll │ ├── zconv2S.mexmac │ ├── zconv2S.mexw64 │ └── zoneplate.png ├── matconvnet-1.0-beta24 ├── .gitattributes ├── .gitignore ├── .gitmodules ├── CONTRIBUTING.md ├── COPYING ├── Makefile ├── README.md ├── doc │ ├── Makefile │ ├── blocks.tex │ ├── figures │ │ ├── imnet.pdf │ │ ├── pepper.pdf │ │ └── svg │ │ │ ├── conv.svg │ │ │ ├── convt.svg │ │ │ ├── matconvnet-blue.svg │ │ │ └── matconvnet-white.svg │ ├── fundamentals.tex │ ├── geometry.tex │ ├── impl.tex │ ├── intro.tex │ ├── matconvnet-manual.tex │ ├── matdoc.py │ ├── matdocparser.py │ ├── references.bib │ ├── site │ │ ├── docs │ │ │ ├── about.md │ │ │ ├── css │ │ │ │ └── fixes.css │ │ │ ├── developers.md │ │ │ ├── faq.md │ │ │ ├── figures │ │ │ │ ├── stn-perf.png │ │ │ │ └── stn-samples.png │ │ │ ├── functions.md │ │ │ ├── gpu.md │ │ │ ├── index.md │ │ │ ├── install-alt.md │ │ │ ├── install.md │ │ │ ├── js │ │ │ │ ├── mathjaxhelper.js │ │ │ │ └── toggle.js │ │ │ ├── pretrained.md │ │ │ ├── quick.md │ │ │ ├── spatial-transformer.md │ │ │ ├── training.md │ │ │ └── wrappers.md │ │ ├── mkdocs.yml │ │ └── theme │ │ │ ├── base.html │ │ │ ├── content.html │ │ │ ├── css │ │ │ └── base.css │ │ │ ├── js │ │ │ └── base.js │ │ │ ├── matconvnet-blue.svg │ │ │ ├── nav.html │ │ │ └── toc.html │ └── wrappers.tex ├── examples │ ├── +solver │ │ ├── adadelta.m │ │ ├── adagrad.m │ │ ├── adam.m │ │ └── rmsprop.m │ ├── cifar │ │ ├── cnn_cifar.m │ │ ├── cnn_cifar_init.m │ │ └── cnn_cifar_init_nin.m │ ├── cnn_train.m │ ├── cnn_train_dag.m │ ├── custom_imdb │ │ ├── cnn_toy_data.m │ │ └── cnn_toy_data_generator.m │ ├── fast_rcnn │ │ ├── +dagnn │ │ │ └── LossSmoothL1.m │ │ ├── 000004.jpg │ │ ├── 000004_boxes.mat │ │ ├── README.md │ │ ├── bbox_functions │ │ │ ├── bbox_clip.m │ │ │ ├── bbox_draw.m │ │ │ ├── bbox_nms.m │ │ │ ├── bbox_overlap.m │ │ │ ├── bbox_remove_duplicates.m │ │ │ ├── bbox_scale.m │ │ │ ├── bbox_transform.m │ │ │ └── bbox_transform_inv.m │ │ ├── datasets │ │ │ ├── add_bboxreg_targets.m │ │ │ ├── attach_proposals.m │ │ │ ├── cnn_setup_data_voc07.m │ │ │ └── cnn_setup_data_voc07_ssw.m │ │ ├── fast_rcnn_demo.m │ │ ├── fast_rcnn_eval_get_batch.m │ │ ├── fast_rcnn_evaluate.m │ │ ├── fast_rcnn_init.m │ │ ├── fast_rcnn_train.m │ │ └── fast_rcnn_train_get_batch.m │ ├── imagenet │ │ ├── cnn_imagenet.m │ │ ├── cnn_imagenet_camdemo.m │ │ ├── cnn_imagenet_deploy.m │ │ ├── cnn_imagenet_evaluate.m │ │ ├── cnn_imagenet_googlenet.m │ │ ├── cnn_imagenet_init.m │ │ ├── cnn_imagenet_init_inception.m │ │ ├── cnn_imagenet_init_resnet.m │ │ ├── cnn_imagenet_minimal.m │ │ ├── cnn_imagenet_setup_data.m │ │ ├── cnn_imagenet_sync_labels.m │ │ ├── getImageBatch.m │ │ └── getImageStats.m │ ├── mnist │ │ ├── cnn_mnist.m │ │ ├── cnn_mnist_experiments.m │ │ └── cnn_mnist_init.m │ ├── spatial_transformer │ │ ├── cnn_stn_cluttered_mnist.m │ │ ├── cnn_stn_cluttered_mnist_init.m │ │ └── readme.txt │ └── vggfaces │ │ └── cnn_vgg_faces.m ├── matconvnet.sln ├── matconvnet.vcxproj ├── matconvnet.vcxproj.filters ├── matconvnet.xcodeproj │ ├── project.pbxproj │ ├── project.xcworkspace │ │ └── contents.xcworkspacedata │ └── xcshareddata │ │ └── xcschemes │ │ ├── matconv CPU.xcscheme │ │ ├── matconv GPU.xcscheme │ │ └── matconv cuDNN.xcscheme ├── matlab │ ├── +dagnn │ │ ├── @DagNN │ │ │ ├── DagNN.m │ │ │ ├── addLayer.m │ │ │ ├── eval.m │ │ │ ├── fromSimpleNN.m │ │ │ ├── getVarReceptiveFields.m │ │ │ ├── getVarSizes.m │ │ │ ├── initParams.m │ │ │ ├── loadobj.m │ │ │ ├── move.m │ │ │ ├── print.m │ │ │ ├── rebuild.m │ │ │ ├── removeLayer.m │ │ │ ├── renameLayer.m │ │ │ ├── renameParam.m │ │ │ ├── renameVar.m │ │ │ ├── reset.m │ │ │ ├── saveobj.m │ │ │ ├── setLayerInputs.m │ │ │ ├── setLayerOutputs.m │ │ │ └── setLayerParams.m │ │ ├── AffineGridGenerator.m │ │ ├── BatchNorm.m │ │ ├── BilinearSampler.m │ │ ├── Concat.m │ │ ├── Conv.m │ │ ├── ConvTranspose.m │ │ ├── Crop.m │ │ ├── DropOut.m │ │ ├── ElementWise.m │ │ ├── Filter.m │ │ ├── LRN.m │ │ ├── Layer.m │ │ ├── Loss.m │ │ ├── NormOffset.m │ │ ├── PDist.m │ │ ├── Pooling.m │ │ ├── ROIPooling.m │ │ ├── ReLU.m │ │ ├── Scale.m │ │ ├── Sigmoid.m │ │ ├── SoftMax.m │ │ ├── SpatialNorm.m │ │ ├── Sum.m │ │ └── UniformScalingGridGenerator.m │ ├── ParameterServer.m │ ├── compatibility │ │ └── parallel │ │ │ ├── gather.m │ │ │ ├── labindex.m │ │ │ └── numlabs.m │ ├── mex │ │ ├── vl_cudatool.mexw64 │ │ ├── vl_imreadjpeg.mexw64 │ │ ├── vl_imreadjpeg_old.mexw64 │ │ ├── vl_nnbilinearsampler.mexw64 │ │ ├── vl_nnbnorm.mexw64 │ │ ├── vl_nnconv.mexw64 │ │ ├── vl_nnconvt.mexw64 │ │ ├── vl_nnnormalize.mexw64 │ │ ├── vl_nnpool.mexw64 │ │ ├── vl_nnroipool.mexw64 │ │ └── vl_taccummex.mexw64 │ ├── simplenn │ │ ├── vl_simplenn.m │ │ ├── vl_simplenn_diagnose.m │ │ ├── vl_simplenn_display.m │ │ ├── vl_simplenn_move.m │ │ ├── vl_simplenn_start_parserv.m │ │ └── vl_simplenn_tidy.m │ ├── src │ │ ├── bits │ │ │ ├── data.cpp │ │ │ ├── data.cu │ │ │ ├── data.hpp │ │ │ ├── datacu.cu │ │ │ ├── datacu.hpp │ │ │ ├── datamex.cpp │ │ │ ├── datamex.cu │ │ │ ├── datamex.hpp │ │ │ ├── impl │ │ │ │ ├── bilinearsampler.hpp │ │ │ │ ├── bilinearsampler_cpu.cpp │ │ │ │ ├── bilinearsampler_gpu.cu │ │ │ │ ├── blashelper.hpp │ │ │ │ ├── bnorm.hpp │ │ │ │ ├── bnorm_cpu.cpp │ │ │ │ ├── bnorm_gpu.cu │ │ │ │ ├── compat.h │ │ │ │ ├── copy.hpp │ │ │ │ ├── copy_cpu.cpp │ │ │ │ ├── copy_gpu.cu │ │ │ │ ├── cudnnhelper.hpp │ │ │ │ ├── fast_mutex.h │ │ │ │ ├── im2row.hpp │ │ │ │ ├── im2row_cpu.cpp │ │ │ │ ├── im2row_gpu.cu │ │ │ │ ├── imread_gdiplus.cpp │ │ │ │ ├── imread_helpers.hpp │ │ │ │ ├── imread_libjpeg.cpp │ │ │ │ ├── imread_quartz.cpp │ │ │ │ ├── nnbias_blas.hpp │ │ │ │ ├── nnbias_cudnn.cu │ │ │ │ ├── nnbias_cudnn.hpp │ │ │ │ ├── nnbilinearsampler_cudnn.cu │ │ │ │ ├── nnbilinearsampler_cudnn.hpp │ │ │ │ ├── nnbnorm_cudnn.cu │ │ │ │ ├── nnbnorm_cudnn.hpp │ │ │ │ ├── nnconv_blas.hpp │ │ │ │ ├── nnconv_cudnn.cu │ │ │ │ ├── nnconv_cudnn.hpp │ │ │ │ ├── nnpooling_cudnn.cu │ │ │ │ ├── nnpooling_cudnn.hpp │ │ │ │ ├── normalize.hpp │ │ │ │ ├── normalize_cpu.cpp │ │ │ │ ├── normalize_gpu.cu │ │ │ │ ├── pooling.hpp │ │ │ │ ├── pooling_cpu.cpp │ │ │ │ ├── pooling_gpu.cu │ │ │ │ ├── roipooling.hpp │ │ │ │ ├── roipooling_cpu.cpp │ │ │ │ ├── roipooling_gpu.cu │ │ │ │ ├── sharedmem.cuh │ │ │ │ ├── subsample.hpp │ │ │ │ ├── subsample_cpu.cpp │ │ │ │ ├── subsample_gpu.cu │ │ │ │ ├── tinythread.cpp │ │ │ │ └── tinythread.h │ │ │ ├── imread.cpp │ │ │ ├── imread.hpp │ │ │ ├── mexutils.h │ │ │ ├── nnbias.cpp │ │ │ ├── nnbias.cu │ │ │ ├── nnbias.hpp │ │ │ ├── nnbilinearsampler.cpp │ │ │ ├── nnbilinearsampler.cu │ │ │ ├── nnbilinearsampler.hpp │ │ │ ├── nnbnorm.cpp │ │ │ ├── nnbnorm.cu │ │ │ ├── nnbnorm.hpp │ │ │ ├── nnconv.cpp │ │ │ ├── nnconv.cu │ │ │ ├── nnconv.hpp │ │ │ ├── nnfullyconnected.cpp │ │ │ ├── nnfullyconnected.cu │ │ │ ├── nnfullyconnected.hpp │ │ │ ├── nnnormalize.cpp │ │ │ ├── nnnormalize.cu │ │ │ ├── nnnormalize.hpp │ │ │ ├── nnpooling.cpp │ │ │ ├── nnpooling.cu │ │ │ ├── nnpooling.hpp │ │ │ ├── nnroipooling.cpp │ │ │ ├── nnroipooling.cu │ │ │ ├── nnroipooling.hpp │ │ │ ├── nnsubsample.cpp │ │ │ ├── nnsubsample.cu │ │ │ └── nnsubsample.hpp │ │ ├── config │ │ │ ├── mex_CUDA_glnxa64.sh │ │ │ ├── mex_CUDA_glnxa64.xml │ │ │ ├── mex_CUDA_maci64.sh │ │ │ └── mex_CUDA_maci64.xml │ │ ├── vl_cudatool.cpp │ │ ├── vl_cudatool.cu │ │ ├── vl_imreadjpeg.cpp │ │ ├── vl_imreadjpeg.cu │ │ ├── vl_imreadjpeg_old.cpp │ │ ├── vl_imreadjpeg_old.cu │ │ ├── vl_nnbilinearsampler.cpp │ │ ├── vl_nnbilinearsampler.cu │ │ ├── vl_nnbnorm.cpp │ │ ├── vl_nnbnorm.cu │ │ ├── vl_nnconv.cpp │ │ ├── vl_nnconv.cu │ │ ├── vl_nnconvt.cpp │ │ ├── vl_nnconvt.cu │ │ ├── vl_nnnormalize.cpp │ │ ├── vl_nnnormalize.cu │ │ ├── vl_nnpool.cpp │ │ ├── vl_nnpool.cu │ │ ├── vl_nnroipool.cpp │ │ ├── vl_nnroipool.cu │ │ ├── vl_taccummex.cpp │ │ ├── vl_taccummex.cu │ │ ├── vl_tmove.cpp │ │ └── vl_tmove.cu │ ├── vl_argparse.m │ ├── vl_compilenn.m │ ├── vl_imreadjpeg.m │ ├── vl_nnbilinearsampler.m │ ├── vl_nnbnorm.m │ ├── vl_nnconcat.m │ ├── vl_nnconv.m │ ├── vl_nnconvt.m │ ├── vl_nncrop.m │ ├── vl_nndropout.m │ ├── vl_nnloss.m │ ├── vl_nnnoffset.m │ ├── vl_nnnormalize.m │ ├── vl_nnnormalizelp.m │ ├── vl_nnpdist.m │ ├── vl_nnpool.m │ ├── vl_nnrelu.m │ ├── vl_nnroipool.m │ ├── vl_nnsigmoid.m │ ├── vl_nnsoftmax.m │ ├── vl_nnsoftmaxloss.m │ ├── vl_nnspnorm.m │ ├── vl_rootnn.m │ ├── vl_setupnn.m │ ├── vl_taccum.m │ ├── vl_tmove.m │ ├── vl_tshow.m │ └── xtest │ │ ├── cmyk.jpg │ │ ├── suite │ │ ├── Scale.m │ │ ├── nnbilinearsampler.m │ │ ├── nnbnorm.m │ │ ├── nnconcat.m │ │ ├── nnconv.m │ │ ├── nnconvt.m │ │ ├── nndagnn.m │ │ ├── nndropout.m │ │ ├── nnloss.m │ │ ├── nnmnist.m │ │ ├── nnnormalize.m │ │ ├── nnnormalizelp.m │ │ ├── nnoffset.m │ │ ├── nnpdist.m │ │ ├── nnpool.m │ │ ├── nnrelu.m │ │ ├── nnroipool.m │ │ ├── nnsigmoid.m │ │ ├── nnsimplenn.m │ │ ├── nnsoftmax.m │ │ ├── nnsoftmaxloss.m │ │ ├── nnsolvers.m │ │ ├── nnspnorm.m │ │ ├── nntest.m │ │ └── tmovemex.m │ │ ├── vl_bench_bnorm.m │ │ ├── vl_bench_imreadjpeg.m │ │ ├── vl_nnbnorm_old.m │ │ ├── vl_test_bnorm.m │ │ ├── vl_test_economic_relu.m │ │ ├── vl_test_gpureset.m │ │ ├── vl_test_imreadjpeg.m │ │ ├── vl_test_print.m │ │ └── vl_testnn.m └── utils │ ├── evaluate_ref_models.m │ ├── get-file.sh │ ├── import-caffe.py │ ├── import-fast-rcnn.sh │ ├── import-fcn.sh │ ├── import-googlenet.sh │ ├── import-ref-models.sh │ ├── import-resnet.sh │ ├── layers.py │ ├── model2dot.m │ ├── preprocess-imagenet.sh │ ├── proto │ ├── __init__.py │ ├── caffe.proto │ ├── caffe_0115.proto │ ├── caffe_0115_pb2.py │ ├── caffe_6e3916.proto │ ├── caffe_6e3916_pb2.py │ ├── caffe_b590f1d.proto │ ├── caffe_b590f1d_pb2.py │ ├── caffe_fastrcnn.proto │ ├── caffe_fastrcnn_pb2.py │ ├── caffe_old.proto │ ├── caffe_old_pb2.py │ ├── caffe_pb2.py │ ├── get-protos.sh │ ├── googlenet_prototxt_patch.diff │ ├── vgg_caffe.proto │ ├── vgg_caffe_pb2.py │ └── vgg_synset_words.txt │ ├── simplenn_caffe_compare.m │ ├── simplenn_caffe_deploy.m │ ├── simplenn_caffe_testdeploy.m │ ├── test_examples.m │ └── tidy_ref_models.m ├── test_data ├── test_case1.mat ├── test_case2.mat └── test_case3.mat ├── train_wavresnet.m ├── trained_networks ├── net-aapm-challenge.mat ├── net-forward-process.mat └── net-rnn.mat ├── vl_euclideanloss.m └── vl_simplenn_modified.m /README.md: -------------------------------------------------------------------------------- 1 | Paper 2 | =============== 3 | * A deep convolutional neural network using directional wavelets for low-dose X-ray CT reconstruction 4 | * published in Medical Physics (2017): [http://onlinelibrary.wiley.com/doi/10.1002/mp.12344/full] 5 | * *2nd winner* of '**2016 Low-Dose CT Grand Challenge**' 6 | * Wavelet Domain Residual Network (WavResNet) for Low-Dose X-ray CT Reconstruction 7 | * Accepted at Fully3D 2017: [https://arxiv.org/abs/1703.01383] 8 | * Deep Convolutional Framelet Denoising for Low-Dose CT via Wavelet Residual Network 9 | * published in IEEE Transactions on Medical Imaging (2018): [https://ieeexplore.ieee.org/abstract/document/8332971] 10 | 11 | Implementation 12 | =============== 13 | * MatConvNet (matconvnet-1.0-beta24) 14 | * Please run the matconvnet/matlab/vl_compilenn.m file to compile matconvnet. 15 | * There is instruction on "http://www.vlfeat.org/matconvnet/mfiles/vl_compilenn/" 16 | 17 | Learned network 18 | =============== 19 | * Learned network for '**2016 Low-Dose CT Grand Challenge**' is uploaded. 20 | * Learned network for WavResNet is uploaded. 21 | * Learned network for Deep Convolutional Framelet Denoising is uploaded. 22 | 23 | Test data 24 | =============== 25 | * 3 CT images from '2016 Low-Dose CT Grand Challenge' are uploaded to test. 26 | * Thanks Dr. Cynthia McCollough, the Mayo Clinic, the American Association of Physicists in Medicine(AAPM), and grand EB017095 and EB017185 from the National Institute of Biomedical Imaging and Bioengineering for providing the Low-Dose CT Grand Challenge dataset. 27 | 28 | -------------------------------------------------------------------------------- /lib_contourlet/cnn_wavelet_decon.m: -------------------------------------------------------------------------------- 1 | function waveletCoeffs = cnn_wavelet_decon(images,level,name) 2 | 3 | waveletCoeffs = zeros(size(images,1),size(images,2),sum(2.^level)+1,size(images,3),'double'); 4 | 5 | for tt= 1:size(images,3) 6 | coeffs = nsctdec(images(:,:,tt),level,name,'pyr'); 7 | cc = 1; 8 | waveletCoeffs(:,:,cc,tt) = coeffs{1}; 9 | for l = 1:length(level) 10 | for ll = 1: 2^(level(l)) 11 | cc = cc+1; 12 | if level(l) == 0; 13 | waveletCoeffs(:,:,cc,tt) = coeffs{l+1}; 14 | else 15 | waveletCoeffs(:,:,cc,tt) = coeffs{l+1}{ll}; 16 | end 17 | end 18 | end 19 | end 20 | end -------------------------------------------------------------------------------- /lib_contourlet/cnn_wavelet_recon.m: -------------------------------------------------------------------------------- 1 | function images = cnn_wavelet_recon(waveletCoeffs,level,name) 2 | 3 | waveletCoeffs = double(gather(waveletCoeffs)); 4 | images = (zeros(size(waveletCoeffs,1),size(waveletCoeffs,2),size(waveletCoeffs,4))); 5 | coeffs = cell(1,length(level)+1); 6 | for tt= 1:size(waveletCoeffs,4) 7 | cc = 1; 8 | coeffs{1} = waveletCoeffs(:,:,cc,tt); 9 | for l = 1:length(level) 10 | for ll = 1: 2^(level(l)) 11 | cc = cc+1; 12 | if (level(l)) == 0; 13 | coeffs{l+1} = waveletCoeffs(:,:,cc,tt); 14 | else 15 | coeffs{l+1}{ll} = waveletCoeffs(:,:,cc,tt); 16 | end 17 | end 18 | end 19 | images(:,:,tt) = nsctrec(coeffs,name,'pyr'); 20 | end 21 | end -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/README.txt -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/atrousc.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/atrousc.dll -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/atrousc.mexmac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/atrousc.mexmac -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/atrousc.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/atrousc.mexw64 -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/atrousdec.m: -------------------------------------------------------------------------------- 1 | function y = atrousdec(x,fname,Nlevels); 2 | 3 | % ATROUSDEC - computes the 2-D atrous decomposition using symmetric extension. 4 | % y = atrousdec(x,fname,L) 5 | % INPUT: x image 6 | % fname - can be any filter available in the function atrousfilters 7 | % N levels - number of decomposition levels 8 | % OUTPUT: y vector cell. the first entry is the lowpass images, the next entries 9 | % are the highpass images from coarser to finer scales 10 | % EXAMPLE: y = atrousdecd(x,'9-7',4) 11 | % 12 | % History 13 | % Created on May, 2004 by Arthur Cunha 14 | % Modified on Aug 2004 by A. C. 15 | % Modified on Oct 2004 by A. C. 16 | % SEE ALSO: ATROUSREC, ATROUSFILTERS 17 | 18 | 19 | [h0,h1,g0,g1] = atrousfilters(fname); % Obtain pyramid filters (the filters must be zero-phase!) 20 | y = cell(1,Nlevels+1); 21 | 22 | % First Level 23 | 24 | shift = [1, 1]; % delay compensation 25 | y0 = conv2(symext(x,h0,shift),h0,'valid'); 26 | y1 = conv2(symext(x,h1,shift),h1,'valid'); 27 | 28 | 29 | % Remaining levels 30 | 31 | y{Nlevels+1} = y1; 32 | x = y0; 33 | I2 = eye(2); 34 | for i=1:Nlevels-1 35 | shift = -2^(i-1)*[1,1] + 2; L=2^i; 36 | y0 = atrousc(symext(x,upsample2df(h0,i),shift),h0,I2 * L); 37 | y1 = atrousc(symext(x,upsample2df(h1,i),shift),h1,I2 * L); 38 | y{Nlevels-i+1} = y1; 39 | x=y0; 40 | end 41 | y{1}=x; 42 | 43 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/atrousrec.m: -------------------------------------------------------------------------------- 1 | function x=satrousrec(y,fname); 2 | 3 | % SATROUSREC - computes the inverse of 2-D atrous decomposition computed with ATROUSDEC 4 | % y = satrousdrc(x,fname) 5 | % INPUT: x image 6 | % fname - can be any filter available in the function atrousfilters 7 | % 8 | % OUTPUT: reconstructed image 9 | % 10 | % EXAMPLE: xr = satrousrec(y,'9-7'); 11 | % 12 | % History 13 | % Created on May, 2004 by Arthur Cunha 14 | % Modified on Aug 2004 by A. C. 15 | % Modified on Oct 2004 by A. C. 16 | % SEE ALSO: SATROUSDEC, ATROUSFILTERS 17 | 18 | Nlevels=length(y)-1; 19 | [h0,h1,g0,g1] = atrousfilters(fname); 20 | 21 | 22 | % First Nlevels - 1 levels 23 | 24 | x = y{1}; 25 | I2 = eye(2); 26 | for i=Nlevels-1:-1:1 27 | y1=y{Nlevels-i+1}; 28 | shift = -2^(i-1)*[1,1] + 2; % delay correction 29 | L=2^i; 30 | x = atrousc(symext(x,upsample2df(g0,i),shift),g0,L*I2)+ atrousc(symext(y1,upsample2df(g1,i),shift),g1,L*I2); 31 | end 32 | 33 | % Reconstruct first level 34 | 35 | shift=[1,1]; 36 | x = conv2(symext(x,g0,shift),g0,'valid')+ conv2(symext(y{Nlevels+1},g1,shift),g1,'valid'); 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/dfbdecdemo.m: -------------------------------------------------------------------------------- 1 | 2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 3 | % Image decomposition by nonsubsampled contourlet transform (NSSC). 4 | % This is the iterated filter bank that computes the nonsubsampled 5 | % contourlet transform. 6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 7 | 8 | % Parameteters: 9 | nlevels = 4 ; % Decomposition level 10 | %pfilter = 'pkva' ; % Pyramidal filter 11 | dfilter = 'dmaxflat7'; %'cd' ; % Directional filter 12 | 13 | % Nonsubsampled Contourlet decomposition 14 | coeffs = nsdfbdec( double(im), dfilter, nlevels ); 15 | disp( nlevels); disp(dfilter); 16 | 17 | 18 | % Display the coefficients 19 | %disp('Displaying the contourlet coefficients...') ; 20 | %shownssc( coeffs ) ; 21 | 22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 23 | % Nonsubsampled Contourlet transform (NSSC) reconstruction. 24 | % This is the inverse of nsscdec, i.e. 25 | % imrec = nsscrec(coeffs, dfilter, pfilter); 26 | % would reconstruct imrec = im 27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 28 | 29 | % Reconstruct image 30 | imrec = nsdfbrec( coeffs, dfilter ) ; 31 | 32 | 33 | % Show the reconstruction image and the original image 34 | if 0 35 | figure; 36 | subplot(1,2,1), imagesc( im, [0, 255] ); 37 | title('Original image' ) ; 38 | colormap(gray); 39 | axis image off; 40 | subplot(1,2,2), imagesc( imrec, [0, 255] ); 41 | title('Reconstructed image' ) ; 42 | colormap(gray); 43 | axis image off; 44 | end 45 | 46 | mse = sum( sum( (imrec - double(im)).^2 ) ); 47 | mse = mse / prod(size(im)); 48 | 49 | disp( sprintf('The mean square error is: %f', mse ) ); 50 | disp(' '); -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/efilter2.m: -------------------------------------------------------------------------------- 1 | function y = efilter2(x, f, extmod, shift) 2 | % EFILTER2 2D Filtering with edge handling (via extension) 3 | % 4 | % y = efilter2(x, f, [extmod], [shift]) 5 | % 6 | % Input: 7 | % x: input image 8 | % f: 2D filter 9 | % extmod: [optional] extension mode (default is 'per') 10 | % shift: [optional] specify the window over which the 11 | % convolution occurs. By default shift = [0; 0]. 12 | % 13 | % Output: 14 | % y: filtered image that has: 15 | % Y(z1,z2) = X(z1,z2)*F(z1,z2)*z1^shift(1)*z2^shift(2) 16 | % 17 | % Note: 18 | % The origin of filter f is assumed to be floor(size(f)/2) + 1. 19 | % Amount of shift should be no more than floor((size(f)-1)/2). 20 | % The output image has the same size with the input image. 21 | % 22 | % See also: EXTEND2, SEFILTER2 23 | 24 | if ~exist('extmod', 'var') 25 | extmod = 'per'; 26 | end 27 | 28 | if ~exist('shift', 'var') 29 | shift = [0; 0]; 30 | end 31 | 32 | % Periodized extension 33 | sf = (size(f) - 1) / 2; 34 | 35 | xext = extend2(x, floor(sf(1)) + shift(1), ceil(sf(1)) - shift(1), ... 36 | floor(sf(2)) + shift(2), ceil(sf(2)) - shift(2), extmod); 37 | 38 | % Convolution and keep the central part that has the size as the input 39 | y = conv2(xext, f, 'valid'); -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/extend2.m: -------------------------------------------------------------------------------- 1 | function y = extend2(x, ru, rd, cl, cr, extmod) 2 | % EXTEND2 2D extension 3 | % 4 | % y = extend2(x, ru, rd, cl, cr, extmod) 5 | % 6 | % Input: 7 | % x: input image 8 | % ru, rd: amount of extension, up and down, for rows 9 | % cl, cr: amount of extension, left and rigth, for column 10 | % extmod: extension mode. The valid modes are: 11 | % 'per': periodized extension (both direction) 12 | % 'qper_row': quincunx periodized extension in row 13 | % 'qper_col': quincunx periodized extension in column 14 | % 15 | % Output: 16 | % y: extended image 17 | % 18 | % Note: 19 | % Extension modes 'qper_row' and 'qper_col' are used multilevel 20 | % quincunx filter banks, assuming the original image is periodic in 21 | % both directions. For example: 22 | % [y0, y1] = fbdec(x, h0, h1, 'q', '1r', 'per'); 23 | % [y00, y01] = fbdec(y0, h0, h1, 'q', '2c', 'qper_col'); 24 | % [y10, y11] = fbdec(y1, h0, h1, 'q', '2c', 'qper_col'); 25 | % 26 | % See also: FBDEC 27 | 28 | [rx, cx] = size(x); 29 | 30 | switch extmod 31 | case 'per' 32 | I = getPerIndices(rx, ru, rd); 33 | y = x(I, :); 34 | 35 | I = getPerIndices(cx, cl, cr); 36 | y = y(:, I); 37 | 38 | case 'qper_row' 39 | rx2 = round(rx / 2); 40 | 41 | y = [[x(rx2+1:rx, cx-cl+1:cx); x(1:rx2, cx-cl+1:cx)], x, ... 42 | [x(rx2+1:rx, 1:cr); x(1:rx2, 1:cr)]]; 43 | 44 | I = getPerIndices(rx, ru, rd); 45 | y = y(I, :); 46 | 47 | case 'qper_col' 48 | cx2 = round(cx / 2); 49 | 50 | y = [x(rx-ru+1:rx, cx2+1:cx), x(rx-ru+1:rx, 1:cx2); x; ... 51 | x(1:rd, cx2+1:cx), x(1:rd, 1:cx2)]; 52 | 53 | I = getPerIndices(cx, cl, cr); 54 | y = y(:, I); 55 | 56 | otherwise 57 | error('Invalid input for EXTMOD') 58 | end 59 | 60 | %----------------------------------------------------------------------------% 61 | % Internal Function(s) 62 | %----------------------------------------------------------------------------% 63 | function I = getPerIndices(lx, lb, le) 64 | 65 | I = [lx-lb+1:lx , 1:lx , 1:le]; 66 | 67 | if (lx < lb) | (lx < le) 68 | I = mod(I, lx); 69 | I(I==0) = lx; 70 | end -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/house.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/house.png -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/ld2quin.m: -------------------------------------------------------------------------------- 1 | function [h0, h1] = ld2quin(beta) 2 | % LD2QUIN Quincunx filters from the ladder network structure 3 | % 4 | % Construct the quincunx filters from an allpass filter (beta) using the 5 | % ladder network structure 6 | % 7 | % Ref: Phong et al., IEEE Trans. on SP, March 1995 8 | 9 | if all(size(beta) ~= 1) 10 | error('The input must be an 1-D fitler'); 11 | end 12 | 13 | % Make sure beta is a row vector 14 | beta = beta(:)'; 15 | 16 | lf = length(beta); 17 | n = lf / 2; 18 | 19 | if n ~= floor(n) 20 | error('The input allpass filter must be even length'); 21 | end 22 | 23 | % beta(z1) * beta(z2) 24 | sp = beta' * beta; 25 | 26 | % beta(z1*z2^{-1}) * beta(z1*z2) 27 | % Obtained by quincunx upsampling type 1 (with zero padded) 28 | h = qupz(sp, 1); 29 | 30 | % Lowpass quincunx filter 31 | h0 = h; 32 | h0(2*n, 2*n) = h0(2*n, 2*n) + 1; 33 | h0 = h0 / 2; 34 | 35 | % Highpass quincunx filter 36 | h1 = -conv2(h, h0); 37 | h1(4*n-1, 4*n-1) = h1(4*n-1, 4*n-1) + 1; 38 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/ldfilter.m: -------------------------------------------------------------------------------- 1 | function f = ldfilter(fname) 2 | % LDFILTER Generate filter for the ladder structure network 3 | % 4 | % f = ldfilter(fname) 5 | % 6 | % Input: 7 | % fname: Available 'fname' are: 8 | % 'pkvaN': length N filter from Phoong, Kim, Vaidyanathan and Ansari 9 | 10 | switch fname 11 | case {'pkva12', 'pkva'} 12 | v = [0.6300 -0.1930 0.0972 -0.0526 0.0272 -0.0144]; 13 | 14 | case {'pkva8'} 15 | v = [0.6302 -0.1924 0.0930 -0.0403]; 16 | 17 | case {'pkva6'} 18 | v = [0.6261 -0.1794 0.0688]; 19 | 20 | otherwise 21 | error('Unrecognized ladder structure filter name'); 22 | end 23 | 24 | % Symmetric impulse response 25 | f = [v(end:-1:1), v]; -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/mctrans.m: -------------------------------------------------------------------------------- 1 | function h = mctrans(b,t) 2 | % MCTRANS McClellan transformation 3 | % H = mctrans(B,T) produces the 2-D FIR filter H that 4 | % corresponds to the 1-D FIR filter B using the transform T. 5 | 6 | % Convert the 1-D filter b to SUM_n a(n) cos(wn) form 7 | n = (length(b)-1)/2; 8 | b = rot90(fftshift(rot90(b,2)),2); % Inverse fftshift 9 | a = [b(1) 2*b(2:n+1)]; 10 | 11 | inset = floor((size(t)-1)/2); 12 | 13 | % Use Chebyshev polynomials to compute h 14 | P0 = 1; P1 = t; 15 | h = a(2)*P1; 16 | rows = inset(1)+1; cols = inset(2)+1; 17 | h(rows,cols) = h(rows,cols)+a(1)*P0; 18 | for i=3:n+1, 19 | P2 = 2*conv2(t,P1); 20 | rows = rows + inset(1); cols = cols + inset(2); 21 | P2(rows,cols) = P2(rows,cols) - P0; 22 | rows = inset(1) + [1:size(P1,1)]; 23 | cols = inset(2) + [1:size(P1,2)]; 24 | hh = h; 25 | h = a(i)*P2; h(rows,cols) = h(rows,cols) + hh; 26 | P0 = P1; 27 | P1 = P2; 28 | end 29 | h = rot90(h,2); % Rotate for use with filter2 -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/modulate2.m: -------------------------------------------------------------------------------- 1 | function y = modulate2(x, type, center) 2 | % MODULATE2 2D modulation 3 | % 4 | % y = modulate2(x, type, [center]) 5 | % 6 | % With TYPE = {'r', 'c' or 'b'} for modulate along the row, or column or 7 | % both directions. 8 | % 9 | % CENTER secify the origin of modulation as floor(size(x)/2)+1+center 10 | % (default is [0, 0]) 11 | 12 | if ~exist('center', 'var') 13 | center = [0, 0]; 14 | end 15 | 16 | % Size and origin 17 | s = size(x); 18 | o = floor(s / 2) + 1 + center; 19 | 20 | n1 = [1:s(1)] - o(1); 21 | n2 = [1:s(2)] - o(2); 22 | 23 | switch lower(type(1)) 24 | case 'r' 25 | m1 = (-1) .^ n1; 26 | y = x .* repmat(m1', [1, s(2)]); 27 | 28 | case 'c' 29 | m2 = (-1) .^ n2; 30 | y = x .* repmat(m2, [s(1), 1]); 31 | 32 | case 'b' 33 | m1 = (-1) .^ n1; 34 | m2 = (-1) .^ n2; 35 | m = m1' * m2; 36 | y = x .* m; 37 | 38 | otherwise 39 | error('Invalid input type'); 40 | end 41 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/nsfbdec.m: -------------------------------------------------------------------------------- 1 | function [y0, y1] = nsfbdec( x, h0, h1, lev ) 2 | 3 | % nsfbdec - computes the ns pyramid decomposition 4 | % at level lev with filters f1, f2 5 | % y = nsfbdec(x,h0,h1,L) 6 | % INPUT: x image at finer scale 7 | % h0, h1 atrous filters obtained from 'atrousfilters' 8 | % OUTPUT: y1 - image at coarser scale, y2 - wavelet highpass output 9 | % 10 | % History 11 | % Adapted from atrousdec by Arthur Cunha 12 | % Modified on Aug 2004 by A. C. 13 | % Modified on Oct 2004 by A. C. 14 | % SEE ALSO: ATROUSREC, ATROUSFILTERS 15 | 16 | 17 | 18 | if lev ~= 0 19 | I2 = eye(2); % delay compensation 20 | shift = -2^(lev-1)*[1,1] + 2; L=2^lev; 21 | y0 = atrousc(symext(x,upsample2df(h0,lev),shift),h0,I2 * L); 22 | y1 = atrousc(symext(x,upsample2df(h1,lev),shift),h1,I2 * L); 23 | else 24 | % first Level 25 | shift = [1, 1]; % delay compensation 26 | y0 = conv2(symext(x,h0,shift),h0,'valid'); 27 | y1 = conv2(symext(x,h1,shift),h1,'valid'); 28 | end 29 | 30 | 31 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/nsfbrec.m: -------------------------------------------------------------------------------- 1 | function x = nsfbrec( y0, y1, g0, g1, lev ) 2 | % nsfbrec - computes the inverse of 2-D atrous decomposition at level lev 3 | % y =nsfb(x,fname) 4 | % INPUT: lowpass image y0 and highpass y1 5 | % OUTPUT: reconstructed image at finer scale 6 | % 7 | % EXAMPLE: xr = nsfbrec(y0,y1,g0,g1); 8 | % 9 | % History 10 | % Created on May, 2004 by Arthur Cunha 11 | % Modified on Aug 2004 by A. C. 12 | % Modified on Oct 2004 by A. C. 13 | % SEE ALSO: NSFBDEC, ATROUSFILTERS 14 | 15 | I2 = eye(2); 16 | if lev ~= 0 17 | shift = -2^(lev-1)*[1,1] + 2; % delay correction 18 | L=2^lev; 19 | x = atrousc(symext(y0,upsample2df(g0,lev),shift),g0,L*I2) + ... 20 | atrousc(symext(y1,upsample2df(g1,lev),shift),g1,L*I2); 21 | else 22 | shift=[1,1]; 23 | x = conv2(symext(y0,g0,shift),g0,'valid')+ conv2(symext(y1,g1,shift),g1,'valid'); 24 | end 25 | 26 | 27 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/parafilters.m: -------------------------------------------------------------------------------- 1 | function [y1, y2] = parafilters( f1, f2 ) 2 | % PARAFILTERS Generate four groups of parallelogram filters. 3 | % PARAFILTERS generates four groups of parallelogram filters from a pair of 4 | % diamond filters F1 and F2 by modulation and rotation operations. 5 | % 6 | % parafilters( f1, f2 ) 7 | % 8 | % INPUT: 9 | % f1: 10 | % a matrix, the filter for the first branch. 11 | % f2: 12 | % a matrix, the filter for the second branch. 13 | % 14 | % OUTPUT: 15 | % y1: 16 | % a cell vector of matrices, four outputs for the first branch. 17 | % y2: 18 | % a cell vector of matrices, four outputs for the second branch. 19 | % 20 | % NOTE: 21 | % Refer to pp. 51, Minh N. Do's thesis. 22 | % 23 | % History: 24 | % 08/06/2004 Created by Jianping Zhou. 25 | % 26 | % See also: MODULATE2, RESAMPZ. 27 | 28 | % Initialize output 29 | y1 = cell(1, 4); 30 | y2 = cell(1, 4); 31 | 32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 33 | % Obtain fan filters from the diamond filters 34 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 35 | % Modulation operation 36 | y1{1} = modulate2(f1, 'r'); 37 | y2{1} = modulate2(f2, 'r'); 38 | y1{2} = modulate2(f1, 'c'); 39 | y2{2} = modulate2(f2, 'c'); 40 | 41 | % Transpose operation 42 | y1{3} = y1{1}'; 43 | y2{3} = y2{1}'; 44 | y1{4} = y1{2}'; 45 | y2{4} = y2{2}'; 46 | 47 | 48 | 49 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 50 | % Obtain parallelogram filters from the fan filters 51 | % Use the rotation sampling matrices. 52 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 53 | for i = 1:4 54 | % Resample the filters by corresponding rotation matrices 55 | y1{i} = resampz( y1{i}, i ) ; 56 | y2{i} = resampz( y2{i}, i ) ; 57 | end -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/qupz.m: -------------------------------------------------------------------------------- 1 | function y = qupz(x, type) 2 | % QUPZ Quincunx Upsampling (with zero-pad and matrix extending) 3 | % 4 | % y = qup(x, [type]) 5 | % 6 | % Input: 7 | % x: input image 8 | % type: [optional] 1 or 2 for selecting the quincunx matrices: 9 | % Q1 = [1, -1; 1, 1] or Q2 = [1, 1; -1, 1] 10 | % Output: 11 | % y: qunincunx upsampled image 12 | % 13 | % This resampling operation does NOT involve periodicity, thus it 14 | % zero-pad and extend the matrix 15 | 16 | if ~exist('type', 'var') 17 | type = '1'; 18 | end 19 | 20 | % Quincunx downsampling using the Smith decomposition: 21 | % Q1 = R2 * [2, 0; 0, 1] * R3 22 | % and, 23 | % Q2 = R1 * [2, 0; 0, 1] * R4 24 | % 25 | % See RESAMP for the definition of those resampling matrices 26 | % 27 | % Note that R1 * R2 = R3 * R4 = I so for example, 28 | % upsample by R1 is the same with down sample by R2. 29 | % Also the order of upsampling operations is in the reserved order 30 | % with the one of matrix multiplication. 31 | 32 | switch type 33 | case 1 34 | x1 = resampz(x, 4); 35 | [m, n] = size(x1); 36 | x2 = zeros(2*m-1, n); 37 | x2(1:2:end, :) = x1; 38 | y = resampz(x2, 1); 39 | 40 | case 2 41 | x1 = resampz(x, 3); 42 | [m, n] = size(x1); 43 | x2 = zeros(2*m-1, n); 44 | x2(1:2:end, :) = x1; 45 | y = resampz(x2, 2); 46 | 47 | otherwise 48 | error('Invalid argument type'); 49 | end 50 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/shownsct.m: -------------------------------------------------------------------------------- 1 | function displayIm = shownsct( y ) 2 | % SHOWNSSC Show nonsubsampled Contourlet transform coefficients. 3 | % 4 | % shownsct(y) 5 | % Input: 6 | % y: a cell vector of length n+1, one for each layer of 7 | % subband images from NSCT, y{1} is the lowpass image 8 | % 9 | % NOTE: 10 | % It need further improvement later!!!! 11 | 12 | % History: 13 | % 08/08/2003 Created by Jianping Zhou. 14 | 15 | % Level of decomposition. 16 | clevels = length( y ) ; 17 | 18 | % Show the subband images. 19 | for i=1:clevels 20 | figure; 21 | if iscell( y{i} ) 22 | % The number of directional subbands. 23 | csubband = length( y{i} ) ; 24 | if csubband > 7 25 | col = 4 ; 26 | else 27 | col = 2 ; 28 | end 29 | row = csubband / col ; 30 | for j = 1:csubband 31 | subplot( row, col, j ) ; 32 | imshow( uint8(y{i}{j}) ); 33 | title( sprintf('NSSC coefficients: level %d', i) ); 34 | end 35 | else 36 | imshow ( uint8(y{i}) ) ; 37 | title( sprintf('Nonsubsampled Contourlet coefficients level %d', i) ); 38 | end 39 | end -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/symext.m: -------------------------------------------------------------------------------- 1 | function yT=symext(x,h,shift); 2 | 3 | % FUNCTION Y = SYMEXT 4 | % INPUT: x, mxn image 5 | % h, 2-D filter coefficients 6 | % shift, optional shift 7 | % OUTPUT: yT image symetrically extended (H/V symmetry) 8 | % 9 | % Performs symmetric extension for image x, filter h. 10 | % The filter h is assumed have odd dimensions. 11 | % If the filter has horizontal and vertical symmetry, then 12 | % the nonsymmetric part of conv2(h,x) has the same size of x. 13 | % 14 | % Created by A. Cunha, Fall 2003; 15 | % Modified 12/2005 by A. Cunha. Fixed a bug on wrongly 16 | % swapped indices (m and n). 17 | 18 | [m,n] = size(x); 19 | [p,q] = size(h); 20 | parp = 1-mod(p,2) ; 21 | parq = 1-mod(q,2); 22 | 23 | p2=floor(p/2);q2=floor(q/2); 24 | s1=shift(1);s2=shift(2); 25 | 26 | ss = p2 - s1 + 1; 27 | rr = q2 - s2 + 1; 28 | 29 | yT = [fliplr(x(:,1:ss)) x x(:,n :-1: n-p-s1+1)]; 30 | yT = [flipud(yT(1:rr,:)); yT ; yT(m :-1: m-q-s2+1,:)]; 31 | yT = yT(1:m+p-1 ,1:n+q-1); 32 | 33 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/upsample2df.m: -------------------------------------------------------------------------------- 1 | function ho=upsample2df(h, power); 2 | % upsample filter by 2^power; 3 | 4 | [m,n]=size(h); 5 | ho = zeros(2^power * m,2^power * n); 6 | ho(1:2^power:end,1:2^power:end)=h; 7 | -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2.dll -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2.mexmac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2.mexmac -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2.mexw64 -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2S.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2S.dll -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2S.mexmac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2S.mexmac -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zconv2S.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zconv2S.mexw64 -------------------------------------------------------------------------------- /lib_contourlet/nsct_toolbox/zoneplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/lib_contourlet/nsct_toolbox/zoneplate.png -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | *.vcxproj text merge=union eol=crlf 3 | *.vcxproj.filters merge=union eol=crlf 4 | *.sln text merge=union eol=crlf 5 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitignore: -------------------------------------------------------------------------------- 1 | *.xcodeproj/*xcuserdata* 2 | *.xcodeproj/project.xcworkspace/*xcuserdata* 3 | *.xcodeproj/project.xcworkspace/xcshareddata/ 4 | mex/* 5 | mex 6 | data 7 | *.o 8 | *.pyc 9 | *~ 10 | index.html 11 | matconvnet-*.tar.gz 12 | local 13 | 14 | # Documentation 15 | doc/figures/svg/*.pdf 16 | doc/figures/*.idraw 17 | doc/.texpadtmp/* 18 | doc/*.pdf 19 | doc/.build 20 | 21 | # Website 22 | doc/site/docs/mfiles 23 | doc/site/site 24 | doc/site/.build 25 | doc/site/theme/css/bootstrap.min.css 26 | doc/site/theme/css/bootstrap.min.css.map 27 | doc/site/theme/css/font-awesome.min.css 28 | doc/site/theme/fonts/fontawesome-webfont.eot 29 | doc/site/theme/fonts/fontawesome-webfont.svg 30 | doc/site/theme/fonts/fontawesome-webfont.ttf 31 | doc/site/theme/fonts/fontawesome-webfont.woff 32 | doc/site/theme/fonts/fontawesome-webfont.woff2 33 | doc/site/theme/js/bootstrap.min.js 34 | doc/site/theme/js/jquery.min.js 35 | doc/site/theme/js/jquery.min.map 36 | doc/site/theme/js/npm.js 37 | 38 | # Visual C 39 | *.suo 40 | *.user 41 | *.sdf 42 | *.opensdf 43 | doc/figures/svg/*.idraw 44 | 45 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/matconvnet-1.0-beta24/.gitmodules -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | ## How to contribute to MatConvNet 4 | 5 | For a description of how the library is structured, take a look at the 6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on 7 | the MatConvNet website. 8 | 9 | ### Issues 10 | 11 | We are grateful for any reported issues which help to remove bugs and 12 | improve the overall quality of the library. In particular, you can use 13 | the issue tracker to: 14 | 15 | * report bugs and unexpected crashes 16 | * discuss library design decisions 17 | * request new features 18 | 19 | When reporting bugs, it really helps if you can provide the following: 20 | 21 | * Which steps are needed to reproduce the issue 22 | * MATLAB, compiler and CUDA version (where appropriate) 23 | 24 | Before opening an issue to report a bug, please make sure that the bug 25 | is reproducible on the latest version of the master branch. 26 | 27 | The most difficult bugs to remove are those which cause crashes of the 28 | core functions (e.g. CUDA errors etc.). In those cases, it is really 29 | useful to create a *minimal example* which is able to reproduce the 30 | issue. We know that this may mean a bit of work, but it helps us to 31 | remove the bug more quickly. 32 | 33 | ### Pull requests 34 | 35 | Please make any Pull Requests against the `devel` branch rather than 36 | the `master` branch which is maintained as the latest stable release 37 | of the library. 38 | 39 | As a general rule, it is much easier to accept small Pull Requests 40 | that make a single improvement to the library than complex code 41 | changes that affect multiple parts of the library. When submitting 42 | substantial changes, it is useful if unit tests are provided with the 43 | code. 44 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014-16 The MatConvNet Team. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms are permitted 5 | provided that the above copyright notice and this paragraph are 6 | duplicated in all such forms and that any documentation, advertising 7 | materials, and other materials related to such distribution and use 8 | acknowledge that the software was developed by the MatConvNet 9 | Team. The name of the MatConvNet Team may not be used to endorse or 10 | promote products derived from this software without specific prior 11 | written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT 12 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE 13 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 14 | PURPOSE. -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/README.md: -------------------------------------------------------------------------------- 1 | # MatConvNet: CNNs for MATLAB 2 | 3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural 4 | Networks* (CNNs) for computer vision applications. It is simple, 5 | efficient, and can run and learn state-of-the-art CNNs. Several 6 | example CNNs are included to classify and encode images. Please visit 7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more. 8 | 9 | In case of compilation issues, please read first the 10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and 11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub 12 | issue. For general inquiries regarding network design and training 13 | related questions, please use the 14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet). 15 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/figures/imnet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/matconvnet-1.0-beta24/doc/figures/imnet.pdf -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/figures/pepper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/matconvnet-1.0-beta24/doc/figures/pepper.pdf -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/matconvnet-1.0-beta24/doc/site/docs/figures/stn-perf.png -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jongcye/deeplearningLDCT/927ba16beb726bf956761b99af52cb79f8e3f544/matconvnet-1.0-beta24/doc/site/docs/figures/stn-samples.png -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/gpu.md: -------------------------------------------------------------------------------- 1 | # Using GPU acceleration 2 | 3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the 4 | [Parallel Computing Toolbox](http://www.mathworks.com/products/parallel-computing/). This 5 | toolbox requires CUDA-compatible cards, and you will need a copy of 6 | the corresponding 7 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to 8 | compile GPU support in MatConvNet (see 9 | [compiling](install#compiling)). 10 | 11 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox 12 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore, 13 | switching to use the GPU is as simple as converting the input CPU 14 | arrays in GPU arrays. 15 | 16 | In order to make the very best of powerful GPUs, it is important to 17 | balance the load between CPU and GPU in order to avoid starving the 18 | latter. In training on a problem like ImageNet, the CPU(s) in your 19 | system will be busy loading data from disk and streaming it to the GPU 20 | to evaluate the CNN and its derivative. MatConvNet includes the 21 | utility `vl_imreadjpeg` to accelerate and parallelize loading images 22 | into memory (this function is currently a bottleneck will be made more 23 | powerful in future releases). 24 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/js/mathjaxhelper.js: -------------------------------------------------------------------------------- 1 | /* 2 | #if false 3 | Prevent Unity to try compiling this js 4 | */ 5 | MathJax.Hub.Config({ 6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] } 7 | }); 8 | /* 9 | #endif 10 | */ -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/js/toggle.js: -------------------------------------------------------------------------------- 1 | function toggle_visibility(id) { 2 | var e = document.getElementById(id); 3 | if(e.style.display == 'block') 4 | e.style.display = 'none'; 5 | else 6 | e.style.display = 'block'; 7 | } 8 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/spatial-transformer.md: -------------------------------------------------------------------------------- 1 | # Spatial Transformer Networks 2 | 3 | This example demonstrates the use of a Spatial Transformer Network 4 | for classifying distorted MNIST digits in clutter. 5 | The source files used in this examples can be found in the 6 | `examples/spatial_transformer` directory. 7 | 8 | The spatial transformer network is defined in the `cnn_stn_cluttered_mnist.m` 9 | file. It has three components: (1) a localization network which 10 | predicts six affine transformation parameters for an input image, 11 | (2) a bilinear sampler which applies the above transformation 12 | to the input image, and (3) a classification network which classifies the 13 | output of the bilinear sampler. 14 | 15 | The picture below shows input images and their transformed versions as determined 16 | by the STN. Note how the STN has learned to rectify the input image. 17 | 18 | ![Transformations inferred by the Spatial Transformer Network for images from a cluttered MNIST dataset.](figures/stn-samples.png) 19 | 20 | The following graph compares the training and test errors of two CNNs: 21 | a STN and, a plain classification CNN (with the same configuration as the 22 | classification component of the STN). We note that the STN performs significantly 23 | better (STN test-error = 5.7%, CNN test-error = 14.2%). 24 | 25 | ![Classification error comparison between a STN and a CNN.](figures/stn-perf.png) 26 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/docs/training.md: -------------------------------------------------------------------------------- 1 | ## Using MatConvNet to train convnets 2 | 3 | MatConvNet can be used to train models, typically by using a form of 4 | stochastic gradient descent (SGD) and back-propagation. 5 | 6 | The following learning demonstrators are provided in the MatConvNet 7 | package: 8 | 9 | - **MNIST**. See `examples/mnist/cnn_mnist.m`. 10 | - **CIFAR**. See `examples/cifar/cnn_cifar.m`. 11 | - **ImageNet**. See `examples/imagenet/cnn_imagenet.m`. 12 | 13 | These demos are self-contained; MNIST and CIFAR, in particular, 14 | automatically download and unpack the required data, so that they 15 | should work out-of-the-box. 16 | 17 | While MNIST and CIFAR are small datasets (by today's standard) and 18 | training is feasible on a CPU, ImageNet requires a powerful GPU to 19 | complete in a reasonable time (a few days!). It also requires the 20 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to 21 | accelerate reading large batches of JPEG images and avoid starving the 22 | GPU. 23 | 24 | All these demos use the `example/cnn_train.m` and 25 | `example/cnn_train_dag.m` SGD drivers, which are simple 26 | implementations of the standard SGD with momentum, done directly in 27 | MATLAB code. However, it should be easy to implement your own 28 | specialized or improved solver. 29 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/content.html: -------------------------------------------------------------------------------- 1 | {% if meta.source %} 2 | 7 | {% endif %} 8 | 9 | {{ content }} 10 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/js/base.js: -------------------------------------------------------------------------------- 1 | 2 | /* Highlight */ 3 | $( document ).ready(function() { 4 | hljs.initHighlightingOnLoad(); 5 | $('table').addClass('table table-striped table-hover'); 6 | }); 7 | 8 | 9 | $('body').scrollspy({ 10 | target: '.bs-sidebar', 11 | }); 12 | 13 | 14 | /* Prevent disabled links from causing a page reload */ 15 | $("li.disabled a").click(function() { 16 | event.preventDefault(); 17 | }); 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/doc/site/theme/toc.html: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/adadelta.m: -------------------------------------------------------------------------------- 1 | function [w, state] = adadelta(w, state, grad, opts, ~) 2 | %ADADELTA 3 | % Example AdaDelta solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % AdaDelta sets its own learning rate, so any learning rate set in the 6 | % options of CNN_TRAIN and CNN_TRAIN_DAG will be ignored. 7 | % 8 | % If called without any input argument, returns the default options 9 | % structure. 10 | % 11 | % Solver options: (opts.train.solverOpts) 12 | % 13 | % `epsilon`:: 1e-6 14 | % Small additive constant to regularize variance estimate. 15 | % 16 | % `rho`:: 0.9 17 | % Moving average window for variance update, between 0 and 1 (larger 18 | % values result in slower/more stable updating). 19 | 20 | % Copyright (C) 2016 Joao F. Henriques. 21 | % All rights reserved. 22 | % 23 | % This file is part of the VLFeat library and is made available under 24 | % the terms of the BSD license (see the COPYING file). 25 | 26 | if nargin == 0 % Return the default solver options 27 | w = struct('epsilon', 1e-6, 'rho', 0.9) ; 28 | return ; 29 | end 30 | 31 | if isequal(state, 0) % First iteration, initialize state struct 32 | state = struct('g_sqr', 0, 'delta_sqr', 0) ; 33 | end 34 | 35 | rho = opts.rho ; 36 | 37 | state.g_sqr = state.g_sqr * rho + grad.^2 * (1 - rho) ; 38 | new_delta = -sqrt((state.delta_sqr + opts.epsilon) ./ ... 39 | (state.g_sqr + opts.epsilon)) .* grad ; 40 | state.delta_sqr = state.delta_sqr * rho + new_delta.^2 * (1 - rho) ; 41 | 42 | w = w + new_delta ; 43 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/adagrad.m: -------------------------------------------------------------------------------- 1 | function [w, g_sqr] = adagrad(w, g_sqr, grad, opts, lr) 2 | %ADAGRAD 3 | % Example AdaGrad solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % Set the initial learning rate for AdaGrad in the options for 6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for 7 | % SGD may be inappropriate for AdaGrad; the default is 0.001. 8 | % 9 | % If called without any input argument, returns the default options 10 | % structure. 11 | % 12 | % Solver options: (opts.train.solverOpts) 13 | % 14 | % `epsilon`:: 1e-10 15 | % Small additive constant to regularize variance estimate. 16 | % 17 | % `rho`:: 1 18 | % Moving average window for variance update, between 0 and 1 (larger 19 | % values result in slower/more stable updating). This is similar to 20 | % RHO in AdaDelta and RMSProp. Standard AdaGrad is obtained with a RHO 21 | % value of 1 (use total average instead of a moving average). 22 | % 23 | % A possibly undesirable effect of standard AdaGrad is that the update 24 | % will monotonically decrease to 0, until training eventually stops. This 25 | % is because the AdaGrad update is inversely proportional to the total 26 | % variance of the gradients seen so far. 27 | % With RHO smaller than 1, a moving average is used instead. This 28 | % prevents the final update from monotonically decreasing to 0. 29 | 30 | % Copyright (C) 2016 Joao F. Henriques. 31 | % All rights reserved. 32 | % 33 | % This file is part of the VLFeat library and is made available under 34 | % the terms of the BSD license (see the COPYING file). 35 | 36 | if nargin == 0 % Return the default solver options 37 | w = struct('epsilon', 1e-10, 'rho', 1) ; 38 | return ; 39 | end 40 | 41 | g_sqr = g_sqr * opts.rho + grad.^2 ; 42 | 43 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ; 44 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/+solver/rmsprop.m: -------------------------------------------------------------------------------- 1 | function [w, g_sqr] = rmsprop(w, g_sqr, grad, opts, lr) 2 | %RMSPROP 3 | % Example RMSProp solver, for use with CNN_TRAIN and CNN_TRAIN_DAG. 4 | % 5 | % Set the initial learning rate for RMSProp in the options for 6 | % CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for 7 | % SGD may be inappropriate for RMSProp; the default is 0.001. 8 | % 9 | % If called without any input argument, returns the default options 10 | % structure. 11 | % 12 | % Solver options: (opts.train.solverOpts) 13 | % 14 | % `epsilon`:: 1e-8 15 | % Small additive constant to regularize variance estimate. 16 | % 17 | % `rho`:: 0.99 18 | % Moving average window for variance update, between 0 and 1 (larger 19 | % values result in slower/more stable updating). 20 | 21 | % Copyright (C) 2016 Joao F. Henriques. 22 | % All rights reserved. 23 | % 24 | % This file is part of the VLFeat library and is made available under 25 | % the terms of the BSD license (see the COPYING file). 26 | 27 | if nargin == 0 % Return the default solver options 28 | w = struct('epsilon', 1e-8, 'rho', 0.99) ; 29 | return ; 30 | end 31 | 32 | g_sqr = g_sqr * opts.rho + grad.^2 * (1 - opts.rho) ; 33 | 34 | w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ; 35 | -------------------------------------------------------------------------------- /matconvnet-1.0-beta24/examples/custom_imdb/cnn_toy_data_generator.m: -------------------------------------------------------------------------------- 1 | function cnn_toy_data_generator(dataDir) 2 | %CNN_TOY_DATA_GENERATOR 3 | % Generates toy data in the given path: random image of triangles, 4 | % squares and circles. 5 | % 6 | % The directory format is: '//