├── README.md ├── demo1_tutorial_instance_segmentation ├── README.md ├── figFolder │ ├── main001_instSeg_v1_absEucMM_visualization_epoch-31 │ │ ├── fig00_visualization.jpg │ │ ├── fig01_visualization_looping.jpg │ │ ├── fig02_showcaseSamples.bmp │ │ ├── fig03_showcaseSemanticMask.bmp │ │ ├── fig04_showcaseInstanceMask.bmp │ │ ├── fig05_predInstanceMask-loop0.bmp │ │ ├── fig06_predInstanceMask-loop1.bmp │ │ ├── fig07_predInstanceMask-loop2.bmp │ │ ├── fig08_predInstanceMask-loop3.bmp │ │ ├── fig09_predInstanceMask-loop4.bmp │ │ ├── fig10_predInstanceMask-loop5.bmp │ │ └── results.mat │ ├── main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59 │ │ ├── fig00_visualization.jpg │ │ ├── fig01_visualization_looping1.jpg │ │ ├── fig02_showcaseSamples.bmp │ │ ├── fig03_showcaseSemanticMask.bmp │ │ ├── fig04_showcaseInstanceMask.bmp │ │ ├── fig05_predInstanceMask-loop0.bmp │ │ ├── fig06_predInstanceMask-loop1.bmp │ │ ├── fig07_predInstanceMask-loop2.bmp │ │ ├── fig08_predInstanceMask-loop3.bmp │ │ ├── fig09_predInstanceMask-loop4.bmp │ │ ├── fig10_predInstanceMask-loop5.bmp │ │ └── results.mat │ └── main007_instSeg_v1_absEucMM_epoch-83 │ │ ├── fig00_visualization.jpg │ │ ├── fig01_visualization_looping1.jpg │ │ ├── fig01_visualization_looping2.jpg │ │ ├── fig01_visualization_looping3.jpg │ │ ├── fig02_showcaseSamples.bmp │ │ ├── fig03_showcaseSemanticMask.bmp │ │ ├── fig04_showcaseInstanceMask.bmp │ │ ├── fig05_predInstanceMask-loop0.bmp │ │ ├── fig06_predInstanceMask-loop1.bmp │ │ ├── fig07_predInstanceMask-loop2.bmp │ │ ├── fig08_predInstanceMask-loop3.bmp │ │ ├── fig09_predInstanceMask-loop4.bmp │ │ ├── fig10_predInstanceMask-loop5.bmp │ │ └── results.mat ├── fun4MeanShift │ ├── InstanceSegLogLoss.m │ ├── InstanceSegMMLoss.m │ ├── InstanceSegMMLoss_randSample.m │ ├── InstanceSegRegLoss.m │ ├── InstanceSegRegLoss_randSample.m │ ├── L2normalization.m │ ├── addOneLoop_forMeanShiftGrouping.m │ ├── cnnTrain.m │ ├── cosineSimilarity.m │ ├── cosineSimilarity_randSample.m │ ├── getBatchWrapper4toyDigitV2.m │ ├── getImgBatch4toyDigitV2.m │ ├── init_resnet_toyDigitV2.m │ ├── main000_interpret_MeanShiftLoop1.m │ ├── main000_interpret_ff.m │ ├── main001_addMeanShift4Eval.m │ ├── main001_instSeg_proj3Dsphere.m │ ├── main001_instSeg_test.m │ ├── main001_instSeg_v1_absEucMM.m │ ├── main001_inst_visualize_looping.m │ ├── main002__visualEval.m │ ├── main002_instSeg_v1_ftAbsEucMM_epoch83.m │ ├── meanshift_G_is_Gaussian.m │ ├── meanshift_P_is_G_diag_q.m │ ├── meanshift_S_is_XX.m │ ├── meanshift_Y_is_XP.m │ ├── meanshift_d_is_sumG.m │ ├── meanshift_q_is_inv_d.m │ ├── processEpoch.m │ ├── showStitch.m │ ├── showdict.m │ ├── stitchImage2panel.m │ └── vl_nnloss_modified.m ├── imdb_toydata_v3_from_mnist.mat ├── local_functions_demo1 │ ├── display_network.m │ ├── generate_test_set.m │ ├── generate_train_set_part1.m │ ├── generate_train_set_part2.m │ ├── loadMNISTImages.m │ ├── loadMNISTLabels.m │ ├── showStitch.m │ └── showdict.m ├── step001_prepare_dataset_mnist.m ├── step002_prepare_imdb_v3.m ├── step003_instSeg_v1_absEucMM.m ├── step004_instSeg_v1_multiMShiftLoops_finetuneStep003.m └── step005_inst_visualize_looping.m ├── demo2_boundary_detection ├── BoundaryLossLogistic.m ├── L2normalization.m ├── README.md ├── data │ ├── 104010.jpg │ ├── 104010.mat │ ├── 130066.jpg │ ├── 130066.mat │ ├── 65084.jpg │ └── 65084.mat ├── edgesNmsMex.cpp ├── edgesNmsMex.mexa64 ├── edgesNmsMex.mexmaci64 ├── edgesNmsMex.mexw64 ├── figures │ ├── 104010.jpg │ ├── 130066.jpg │ └── 65084.jpg ├── imdb.mat ├── main001_visualize.m └── rescaleFeaMap.m ├── demo3_objectness_proposal_detection ├── InstanceSegMMLoss_randSample.m ├── InstanceSegRegLoss_randSample.m ├── L2normalization.m ├── README.md ├── basemodel │ └── README.md ├── cosineSimilarity_randSample.m ├── demo.m ├── images │ ├── 2008_000215.jpg │ ├── 2008_000215.mat │ ├── 2008_000234.jpg │ ├── 2008_000234.mat │ ├── 2008_000359.jpg │ ├── 2008_000359.mat │ ├── 2008_002240.jpg │ ├── 2008_002240.mat │ ├── 2008_006554.jpg │ ├── 2008_006554.mat │ ├── 2010_001646.jpg │ ├── 2010_001646.mat │ ├── 2010_005063.jpg │ └── 2010_005063.mat ├── imdb_complete_on_server.mat ├── rescaleFeaMap.m └── results │ ├── id1_summary.jpg │ ├── id1_summaryLoops.jpg │ ├── id2_summary.jpg │ ├── id2_summaryLoops.jpg │ ├── id3_summary.jpg │ ├── id3_summaryLoops.jpg │ ├── id4_summary.jpg │ ├── id4_summaryLoops.jpg │ ├── id5_summary.jpg │ ├── id5_summaryLoops.jpg │ ├── id6_summary.jpg │ ├── id6_summaryLoops.jpg │ ├── id7_summary.jpg │ └── id7_summaryLoops.jpg ├── demo4_InstSegTraining_VOC2012 ├── InstanceSegMMLoss_randSample.m ├── InstanceSegRegLoss_randSample.m ├── L2normalization.m ├── README.md ├── cnnTrain_augVOC2012_inst_div4.m ├── cosineSimilarity_invProbSample.m ├── cosineSimilarity_randSample.m ├── exp │ └── README.md ├── getBatchWrapper_augVOC2012.m ├── getImgBatch_augVOC2012.m ├── imdb_complete_on_server.mat ├── main000_metaStep0_visual_withMShift.m ├── main000_metaStep1_visual_withoutMShift.m ├── main001_trainScript.m ├── processEpoch_augVOC2012_inst_div4.m ├── rescaleFeaMap.m └── vl_nnloss_modified.m ├── demo5_analysis_MShift_gradient ├── L2RegressionLoss.m ├── README.md ├── fun4MShift_analysis │ ├── addOneLoop_forMeanShiftGrouping.m │ ├── meanshift_G_is_Gaussian.m │ ├── meanshift_P_is_G_diag_q.m │ ├── meanshift_S_is_XX.m │ ├── meanshift_Y_is_XP.m │ ├── meanshift_d_is_sumG.m │ └── meanshift_q_is_inv_d.m ├── linspecer.m ├── simulation07_1D_GBMS_1step_trajectory.m ├── simulation07_GBMS_3Loops.png ├── simulation07_GBMS_3Loops_summary.png ├── simulation07_GBMS_5Loops.png ├── simulation07_GBMS_5Loops_summary.png ├── simulation07_GBMS_7Loops.png ├── simulation07_GBMS_7Loops_summary.png └── vl_nnloss_regression.m ├── figure_to_show ├── demo_boundaryDet.png ├── demo_combo.png ├── demo_combo_v2.png ├── demo_instSeg.png ├── demo_testImg_boundaryDet.jpg ├── fig00_visualization.jpg ├── fig01_visualization_looping.jpg ├── fig02_showcaseSamples.bmp ├── fig05_predInstanceMask-loop0.bmp ├── fig10_predInstanceMask-loop5.bmp └── softmax_net-train.png └── libs ├── exportFig ├── .gitignore ├── ImageSelection.class ├── ImageSelection.java ├── LICENSE ├── README.md ├── append_pdfs.m ├── copyfig.m ├── crop_borders.m ├── eps2pdf.m ├── export_fig.m ├── fix_lines.m ├── ghostscript.m ├── im2gif.m ├── isolate_axes.m ├── pdf2eps.m ├── pdftops.m ├── print2array.m ├── print2eps.m ├── read_write_entire_textfile.m ├── user_string.m └── using_hg2.m ├── fun4MeanShift ├── addOneLoop_forMeanShiftGrouping.m ├── meanshift_G_is_Gaussian.m ├── meanshift_P_is_G_diag_q.m ├── meanshift_S_is_XX.m ├── meanshift_Y_is_XP.m ├── meanshift_d_is_sumG.m └── meanshift_q_is_inv_d.m ├── layerExt ├── AddBilinearUpSampling.m ├── AddDilationErosionObjectives.m ├── AddSubModel.m ├── DotProduct.m ├── LRRAddMasking.m ├── LRRInitializeFromVGG16.m ├── Neg.m ├── SegmentationAccuracy.m ├── SegmentationLoss.m ├── SegmentationLossLogistic.m └── bilinear_u.m ├── matconvnet-1.0-beta23_modifiedDagnn ├── .gitattributes ├── .gitignore ├── .gitmodules ├── CONTRIBUTING.md ├── COPYING ├── Makefile ├── README.md ├── doc │ ├── Makefile │ ├── blocks.tex │ ├── figures │ │ ├── imnet.pdf │ │ ├── pepper.pdf │ │ └── svg │ │ │ ├── conv.svg │ │ │ ├── convt.svg │ │ │ ├── matconvnet-blue.svg │ │ │ └── matconvnet-white.svg │ ├── fundamentals.tex │ ├── geometry.tex │ ├── impl.tex │ ├── intro.tex │ ├── matconvnet-manual.tex │ ├── matdoc.py │ ├── matdocparser.py │ ├── references.bib │ ├── site │ │ ├── docs │ │ │ ├── about.md │ │ │ ├── css │ │ │ │ └── fixes.css │ │ │ ├── developers.md │ │ │ ├── faq.md │ │ │ ├── figures │ │ │ │ ├── stn-perf.png │ │ │ │ └── stn-samples.png │ │ │ ├── functions.md │ │ │ ├── gpu.md │ │ │ ├── index.md │ │ │ ├── install-alt.md │ │ │ ├── install.md │ │ │ ├── js │ │ │ │ ├── mathjaxhelper.js │ │ │ │ └── toggle.js │ │ │ ├── pretrained.md │ │ │ ├── quick.md │ │ │ ├── spatial-transformer.md │ │ │ ├── training.md │ │ │ └── wrappers.md │ │ ├── mkdocs.yml │ │ └── theme │ │ │ ├── base.html │ │ │ ├── content.html │ │ │ ├── css │ │ │ └── base.css │ │ │ ├── js │ │ │ └── base.js │ │ │ ├── matconvnet-blue.svg │ │ │ ├── nav.html │ │ │ └── toc.html │ └── wrappers.tex ├── examples │ ├── cifar │ │ ├── cnn_cifar.m │ │ ├── cnn_cifar_init.m │ │ └── cnn_cifar_init_nin.m │ ├── cnn_train.m │ ├── cnn_train_dag.m │ ├── fast_rcnn │ │ ├── +dagnn │ │ │ └── LossSmoothL1.m │ │ ├── 000004.jpg │ │ ├── 000004_boxes.mat │ │ ├── README.md │ │ ├── bbox_functions │ │ │ ├── bbox_clip.m │ │ │ ├── bbox_draw.m │ │ │ ├── bbox_nms.m │ │ │ ├── bbox_overlap.m │ │ │ ├── bbox_remove_duplicates.m │ │ │ ├── bbox_scale.m │ │ │ ├── bbox_transform.m │ │ │ └── bbox_transform_inv.m │ │ ├── datasets │ │ │ ├── add_bboxreg_targets.m │ │ │ ├── attach_proposals.m │ │ │ ├── cnn_setup_data_voc07.m │ │ │ └── cnn_setup_data_voc07_ssw.m │ │ ├── fast_rcnn_demo.m │ │ ├── fast_rcnn_eval_get_batch.m │ │ ├── fast_rcnn_evaluate.m │ │ ├── fast_rcnn_init.m │ │ ├── fast_rcnn_train.m │ │ └── fast_rcnn_train_get_batch.m │ ├── imagenet │ │ ├── cnn_imagenet.m │ │ ├── cnn_imagenet_camdemo.m │ │ ├── cnn_imagenet_deploy.m │ │ ├── cnn_imagenet_evaluate.m │ │ ├── cnn_imagenet_googlenet.m │ │ ├── cnn_imagenet_init.m │ │ ├── cnn_imagenet_init_inception.m │ │ ├── cnn_imagenet_init_resnet.m │ │ ├── cnn_imagenet_minimal.m │ │ ├── cnn_imagenet_setup_data.m │ │ ├── cnn_imagenet_sync_labels.m │ │ ├── getImageBatch.m │ │ └── getImageStats.m │ ├── mnist │ │ ├── cnn_mnist.m │ │ ├── cnn_mnist_experiments.m │ │ └── cnn_mnist_init.m │ ├── spatial_transformer │ │ ├── cnn_stn_cluttered_mnist.m │ │ ├── cnn_stn_cluttered_mnist_init.m │ │ └── readme.txt │ └── vggfaces │ │ └── cnn_vgg_faces.m ├── matconvnet.sln ├── matconvnet.vcxproj ├── matconvnet.vcxproj.filters ├── matconvnet.xcodeproj │ ├── project.pbxproj │ ├── project.xcworkspace │ │ └── contents.xcworkspacedata │ └── xcshareddata │ │ └── xcschemes │ │ ├── matconv CPU.xcscheme │ │ ├── matconv GPU.xcscheme │ │ └── matconv cuDNN.xcscheme ├── matlab │ ├── +dagnn │ │ ├── @DagNN │ │ │ ├── DagNN.m │ │ │ ├── addLayer.m │ │ │ ├── eval.m │ │ │ ├── fromSimpleNN.m │ │ │ ├── getVarReceptiveFields.m │ │ │ ├── getVarSizes.m │ │ │ ├── initParams.m │ │ │ ├── loadobj.m │ │ │ ├── move.m │ │ │ ├── print.m │ │ │ ├── rebuild.m │ │ │ ├── removeLayer.m │ │ │ ├── renameLayer.m │ │ │ ├── renameVar.m │ │ │ ├── reset.m │ │ │ ├── saveobj.m │ │ │ ├── setLayerInputs.m │ │ │ ├── setLayerOutputs.m │ │ │ └── setLayerParams.m │ │ ├── AffineGridGenerator.m │ │ ├── BatchNorm.m │ │ ├── BilinearSampler.m │ │ ├── Concat.m │ │ ├── Conv.m │ │ ├── ConvTranspose.m │ │ ├── Crop.m │ │ ├── DropOut.m │ │ ├── ElementWise.m │ │ ├── Filter.m │ │ ├── LRN.m │ │ ├── Layer.m │ │ ├── Loss.m │ │ ├── MaskGating.m │ │ ├── NormOffset.m │ │ ├── Pooling.m │ │ ├── ROIPooling.m │ │ ├── ReLU.m │ │ ├── Scale.m │ │ ├── Sigmoid.m │ │ ├── SoftMax.m │ │ ├── SpatialNorm.m │ │ ├── Sum.m │ │ └── UniformScalingGridGenerator.m │ ├── ParameterServer.m │ ├── compatibility │ │ └── parallel │ │ │ ├── gather.m │ │ │ ├── labindex.m │ │ │ └── numlabs.m │ ├── simplenn │ │ ├── vl_simplenn.m │ │ ├── vl_simplenn_diagnose.m │ │ ├── vl_simplenn_display.m │ │ ├── vl_simplenn_move.m │ │ ├── vl_simplenn_start_parserv.m │ │ └── vl_simplenn_tidy.m │ ├── src │ │ ├── bits │ │ │ ├── data.cpp │ │ │ ├── data.cu │ │ │ ├── data.hpp │ │ │ ├── datacu.cu │ │ │ ├── datacu.hpp │ │ │ ├── datamex.cpp │ │ │ ├── datamex.cu │ │ │ ├── datamex.hpp │ │ │ ├── impl │ │ │ │ ├── bilinearsampler.hpp │ │ │ │ ├── bilinearsampler_cpu.cpp │ │ │ │ ├── bilinearsampler_gpu.cu │ │ │ │ ├── blashelper.hpp │ │ │ │ ├── bnorm.hpp │ │ │ │ ├── bnorm_cpu.cpp │ │ │ │ ├── bnorm_gpu.cu │ │ │ │ ├── copy.hpp │ │ │ │ ├── copy_cpu.cpp │ │ │ │ ├── copy_gpu.cu │ │ │ │ ├── cudnnhelper.hpp │ │ │ │ ├── fast_mutex.h │ │ │ │ ├── im2row.hpp │ │ │ │ ├── im2row_cpu.cpp │ │ │ │ ├── im2row_gpu.cu │ │ │ │ ├── imread_gdiplus.cpp │ │ │ │ ├── imread_helpers.hpp │ │ │ │ ├── imread_libjpeg.cpp │ │ │ │ ├── imread_quartz.cpp │ │ │ │ ├── nnbias_blas.hpp │ │ │ │ ├── nnbias_cudnn.cu │ │ │ │ ├── nnbias_cudnn.hpp │ │ │ │ ├── nnbilinearsampler_cudnn.cu │ │ │ │ ├── nnbilinearsampler_cudnn.hpp │ │ │ │ ├── nnbnorm_cudnn.cu │ │ │ │ ├── nnbnorm_cudnn.hpp │ │ │ │ ├── nnconv_blas.hpp │ │ │ │ ├── nnconv_cudnn.cu │ │ │ │ ├── nnconv_cudnn.hpp │ │ │ │ ├── nnpooling_cudnn.cu │ │ │ │ ├── nnpooling_cudnn.hpp │ │ │ │ ├── normalize.hpp │ │ │ │ ├── normalize_cpu.cpp │ │ │ │ ├── normalize_gpu.cu │ │ │ │ ├── pooling.hpp │ │ │ │ ├── pooling_cpu.cpp │ │ │ │ ├── pooling_gpu.cu │ │ │ │ ├── roipooling.hpp │ │ │ │ ├── roipooling_cpu.cpp │ │ │ │ ├── roipooling_gpu.cu │ │ │ │ ├── sharedmem.cuh │ │ │ │ ├── subsample.hpp │ │ │ │ ├── subsample_cpu.cpp │ │ │ │ ├── subsample_gpu.cu │ │ │ │ ├── tinythread.cpp │ │ │ │ └── tinythread.h │ │ │ ├── imread.cpp │ │ │ ├── imread.hpp │ │ │ ├── mexutils.h │ │ │ ├── nnbias.cpp │ │ │ ├── nnbias.cu │ │ │ ├── nnbias.hpp │ │ │ ├── nnbilinearsampler.cpp │ │ │ ├── nnbilinearsampler.cu │ │ │ ├── nnbilinearsampler.hpp │ │ │ ├── nnbnorm.cpp │ │ │ ├── nnbnorm.cu │ │ │ ├── nnbnorm.hpp │ │ │ ├── nnconv.cpp │ │ │ ├── nnconv.cu │ │ │ ├── nnconv.hpp │ │ │ ├── nnfullyconnected.cpp │ │ │ ├── nnfullyconnected.cu │ │ │ ├── nnfullyconnected.hpp │ │ │ ├── nnnormalize.cpp │ │ │ ├── nnnormalize.cu │ │ │ ├── nnnormalize.hpp │ │ │ ├── nnpooling.cpp │ │ │ ├── nnpooling.cu │ │ │ ├── nnpooling.hpp │ │ │ ├── nnroipooling.cpp │ │ │ ├── nnroipooling.cu │ │ │ ├── nnroipooling.hpp │ │ │ ├── nnsubsample.cpp │ │ │ ├── nnsubsample.cu │ │ │ └── nnsubsample.hpp │ │ ├── config │ │ │ ├── mex_CUDA_glnxa64.sh │ │ │ ├── mex_CUDA_glnxa64.xml │ │ │ ├── mex_CUDA_maci64.sh │ │ │ └── mex_CUDA_maci64.xml │ │ ├── vl_cudatool.cpp │ │ ├── vl_cudatool.cu │ │ ├── vl_imreadjpeg.cpp │ │ ├── vl_imreadjpeg.cu │ │ ├── vl_imreadjpeg_old.cpp │ │ ├── vl_imreadjpeg_old.cu │ │ ├── vl_nnbilinearsampler.cpp │ │ ├── vl_nnbilinearsampler.cu │ │ ├── vl_nnbnorm.cpp │ │ ├── vl_nnbnorm.cu │ │ ├── vl_nnconv.cpp │ │ ├── vl_nnconv.cu │ │ ├── vl_nnconvt.cpp │ │ ├── vl_nnconvt.cu │ │ ├── vl_nnnormalize.cpp │ │ ├── vl_nnnormalize.cu │ │ ├── vl_nnpool.cpp │ │ ├── vl_nnpool.cu │ │ ├── vl_nnroipool.cpp │ │ ├── vl_nnroipool.cu │ │ ├── vl_taccummex.cpp │ │ ├── vl_taccummex.cu │ │ ├── vl_tmove.cpp │ │ └── vl_tmove.cu │ ├── vl_argparse.m │ ├── vl_compilenn.m │ ├── vl_imreadjpeg.m │ ├── vl_nnbilinearsampler.m │ ├── vl_nnbnorm.m │ ├── vl_nnconcat.m │ ├── vl_nnconv.m │ ├── vl_nnconvt.m │ ├── vl_nncrop.m │ ├── vl_nndropout.m │ ├── vl_nnloss.m │ ├── vl_nnnoffset.m │ ├── vl_nnnormalize.m │ ├── vl_nnnormalizelp.m │ ├── vl_nnpdist.m │ ├── vl_nnpool.m │ ├── vl_nnrelu.m │ ├── vl_nnroipool.m │ ├── vl_nnsigmoid.m │ ├── vl_nnsoftmax.m │ ├── vl_nnsoftmaxloss.m │ ├── vl_nnspnorm.m │ ├── vl_rootnn.m │ ├── vl_setupnn.m │ ├── vl_taccum.m │ ├── vl_tmove.m │ └── xtest │ │ ├── suite │ │ ├── Scale.m │ │ ├── nnbilinearsampler.m │ │ ├── nnbnorm.m │ │ ├── nnconcat.m │ │ ├── nnconv.m │ │ ├── nnconvt.m │ │ ├── nndagnn.m │ │ ├── nndropout.m │ │ ├── nnloss.m │ │ ├── nnmnist.m │ │ ├── nnnormalize.m │ │ ├── nnnormalizelp.m │ │ ├── nnoffset.m │ │ ├── nnpdist.m │ │ ├── nnpool.m │ │ ├── nnrelu.m │ │ ├── nnroipool.m │ │ ├── nnsigmoid.m │ │ ├── nnsimplenn.m │ │ ├── nnsoftmax.m │ │ ├── nnsoftmaxloss.m │ │ ├── nnspnorm.m │ │ ├── nntest.m │ │ └── tmovemex.m │ │ ├── vl_bench_bnorm.m │ │ ├── vl_bench_imreadjpeg.m │ │ ├── vl_nnbnorm_old.m │ │ ├── vl_test_bnorm.m │ │ ├── vl_test_economic_relu.m │ │ ├── vl_test_gpureset.m │ │ ├── vl_test_imreadjpeg.m │ │ ├── vl_test_print.m │ │ └── vl_testnn.m └── utils │ ├── evaluate_ref_models.m │ ├── get-file.sh │ ├── import-caffe.py │ ├── import-fast-rcnn.sh │ ├── import-fcn.sh │ ├── import-googlenet.sh │ ├── import-ref-models.sh │ ├── import-resnet.sh │ ├── layers.py │ ├── model2dot.m │ ├── preprocess-imagenet.sh │ ├── proto │ ├── __init__.py │ ├── caffe.proto │ ├── caffe_0115.proto │ ├── caffe_0115_pb2.py │ ├── caffe_6e3916.proto │ ├── caffe_6e3916_pb2.py │ ├── caffe_b590f1d.proto │ ├── caffe_b590f1d_pb2.py │ ├── caffe_fastrcnn.proto │ ├── caffe_fastrcnn_pb2.py │ ├── caffe_old.proto │ ├── caffe_old_pb2.py │ ├── caffe_pb2.py │ ├── get-protos.sh │ ├── googlenet_prototxt_patch.diff │ ├── vgg_caffe.proto │ ├── vgg_caffe_pb2.py │ └── vgg_synset_words.txt │ ├── simplenn_caffe_compare.m │ ├── simplenn_caffe_deploy.m │ ├── simplenn_caffe_testdeploy.m │ ├── test_examples.m │ └── tidy_ref_models.m └── myFunctions ├── accumulate_gradients.m ├── addCombo_conv_BN_relu.m ├── extractStats.m ├── getRawCounts.m ├── index2RGBlabel.m ├── loadState.m ├── myfindLastCheckpoint.m ├── plotLearningCurves.m ├── prepareGPUs.m ├── saveState.m ├── showDagNetFlow.m └── switchFigure.m /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig00_visualization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig00_visualization.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig01_visualization_looping.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig01_visualization_looping.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig02_showcaseSamples.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig02_showcaseSamples.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig03_showcaseSemanticMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig03_showcaseSemanticMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig04_showcaseInstanceMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig04_showcaseInstanceMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig05_predInstanceMask-loop0.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig05_predInstanceMask-loop0.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig06_predInstanceMask-loop1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig06_predInstanceMask-loop1.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig07_predInstanceMask-loop2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig07_predInstanceMask-loop2.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig08_predInstanceMask-loop3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig08_predInstanceMask-loop3.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig09_predInstanceMask-loop4.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig09_predInstanceMask-loop4.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig10_predInstanceMask-loop5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/fig10_predInstanceMask-loop5.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/results.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main001_instSeg_v1_absEucMM_visualization_epoch-31/results.mat -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig00_visualization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig00_visualization.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig01_visualization_looping1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig01_visualization_looping1.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig02_showcaseSamples.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig02_showcaseSamples.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig03_showcaseSemanticMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig03_showcaseSemanticMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig04_showcaseInstanceMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig04_showcaseInstanceMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig05_predInstanceMask-loop0.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig05_predInstanceMask-loop0.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig06_predInstanceMask-loop1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig06_predInstanceMask-loop1.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig07_predInstanceMask-loop2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig07_predInstanceMask-loop2.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig08_predInstanceMask-loop3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig08_predInstanceMask-loop3.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig09_predInstanceMask-loop4.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig09_predInstanceMask-loop4.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig10_predInstanceMask-loop5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/fig10_predInstanceMask-loop5.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/results.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main002_instSeg_v1_ftAbsEucMM_epoch83_epoch-59/results.mat -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig00_visualization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig00_visualization.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping1.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping2.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig01_visualization_looping3.jpg -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig02_showcaseSamples.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig02_showcaseSamples.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig03_showcaseSemanticMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig03_showcaseSemanticMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig04_showcaseInstanceMask.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig04_showcaseInstanceMask.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig05_predInstanceMask-loop0.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig05_predInstanceMask-loop0.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig06_predInstanceMask-loop1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig06_predInstanceMask-loop1.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig07_predInstanceMask-loop2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig07_predInstanceMask-loop2.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig08_predInstanceMask-loop3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig08_predInstanceMask-loop3.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig09_predInstanceMask-loop4.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig09_predInstanceMask-loop4.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig10_predInstanceMask-loop5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/fig10_predInstanceMask-loop5.bmp -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/results.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/figFolder/main007_instSeg_v1_absEucMM_epoch-83/results.mat -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/fun4MeanShift/getBatchWrapper4toyDigitV2.m: -------------------------------------------------------------------------------- 1 | % return a get batch function 2 | % ------------------------------------------------------------------------- 3 | function fn = getBatchWrapper4toyDigitV2(opts) 4 | % ------------------------------------------------------------------------- 5 | fn = @(images, mode) getBatch_dict4toyDigitV2(images, mode, opts) ; 6 | end 7 | 8 | % ------------------------------------------------------------------------- 9 | function [imBatch, semanticMaskBatch, instanceMaskBatch, weightBatch] = getBatch_dict4toyDigitV2(images, mode, opts) 10 | % ------------------------------------------------------------------------- 11 | %images = strcat([imdb.path_to_dataset filesep], imdb.(mode).(batch) ) ; 12 | [imBatch, semanticMaskBatch, instanceMaskBatch, weightBatch] = getImgBatch4toyDigitV2(images, mode, opts, 'prefetch', nargout == 0) ; 13 | end 14 | -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/fun4MeanShift/main001_instSeg_proj3Dsphere.m: -------------------------------------------------------------------------------- 1 | clc; 2 | close all 3 | %% which one to show? 4 | idx = 24; 5 | coordIndices = [1,2,3]; 6 | 7 | imgFig1 = figure(1); 8 | set(imgFig1, 'Position', [100 100 1400 900]) % [1 1 width height] 9 | 10 | subplot(2,2,1); 11 | imagesc(imgMat(:,:,:,idx)); axis off image; 12 | subplot(2,2,2); 13 | imagesc(instanceMaskMat(:,:,:,idx)); axis off image; 14 | subplot(2,2,3); 15 | A = (predInstanceMaskMat0(:,:,coordIndices,idx) + 1) / 2; 16 | imagesc(A); axis off image; 17 | %% 3D surface 18 | subplot(2,2,4); 19 | 20 | r = 1; 21 | [x,y,z] = sphere(50); 22 | x0 = 0; y0 = 0; z0 = 0; 23 | x = x*r + x0; 24 | y = y*r + y0; 25 | z = z*r + z0; 26 | 27 | % figure 28 | lightGrey = 0.7*[1 1 1]; % It looks better if the lines are lighter 29 | surface(x,y,z, 'FaceColor', 'none', 'EdgeColor',lightGrey) 30 | hold on 31 | %% points 32 | hold on; 33 | points = reshape(predInstanceMaskMat0(:,:,coordIndices,idx), [], 3); 34 | points = points'; 35 | pointsColor = points - (-1);%min(points(:)); % -1 36 | pointsColor = pointsColor ./ 2;%max(pointsColor(:)); 37 | for i = 1:size(points,2) 38 | plot3( points(1,i), points(2,i), points(3,i), 's', 'MarkerSize',3, 'MarkerFaceColor', pointsColor(:,i)', 'MarkerEdgeColor', pointsColor(:,i)'); 39 | end 40 | hold off; 41 | 42 | axis off square 43 | view([1 1 0.75]) % adjust the viewing angle 44 | zoom(1.4) 45 | 46 | %% save result 47 | if flagSaveFig 48 | export_fig( sprintf('%s/%04d_visualization_single.jpg', saveFolder, i) ); 49 | end 50 | 51 | -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/fun4MeanShift/meanshift_G_is_Gaussian.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_G_is_Gaussian < dagnn.ElementWise 2 | properties 3 | delta=0.1 4 | end 5 | properties % (Transient) 6 | numInputs 7 | SIZE_=[] 8 | end 9 | 10 | % [TODO]: current version only supports batchSize=1; need to extend to 11 | % multiple input images 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | obj.numInputs = numel(inputs); 15 | % obj.SIZE_ = inputs{2}; 16 | outputs{1} = exp((inputs{1}-1)/(obj.delta^2)); 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | derInputs = cell(1, numel(inputs)); 21 | dzdy = derOutputs{1}; 22 | S = inputs{1}; 23 | derInputs{1} = dzdy.* ((1/obj.delta^2)* exp((S-1)/(obj.delta^2))); 24 | derParams = {} ; 25 | end 26 | 27 | function obj = meanshift_G_is_Gaussian(varargin) 28 | obj.load(varargin) ; 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/fun4MeanShift/meanshift_q_is_inv_d.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_q_is_inv_d < dagnn.ElementWise 2 | properties (Transient) 3 | numInputs 4 | SIZE_ 5 | end 6 | 7 | % [TODO]: current version only supports batchSize=1; need to extend to 8 | % multiple input images 9 | methods 10 | function outputs = forward(obj, inputs, params) 11 | obj.numInputs = numel(inputs); 12 | % obj.SIZE_ = inputs{2}; 13 | outputs{1} = (inputs{1}+0.0000001).^(-1); 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | derInputs = cell(1, numel(inputs)); 18 | dzdy = derOutputs{1}; 19 | derInputs{1} = dzdy .* (-1*(inputs{1}.^(-2))); 20 | derParams = {} ; 21 | end 22 | 23 | function obj = meanshift_q_is_inv_d(varargin) 24 | obj.load(varargin) ; 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/fun4MeanShift/stitchImage2panel.m: -------------------------------------------------------------------------------- 1 | function showcaseSamples = stitchImage2panel(imgMat, panelSZ, M, N, st, ed) 2 | % num = 25; % the total number of example to show 3 | % M = 5; % the number of rows in the panel, each row shows N images as below 4 | % N = 5; % the number of columns in the panel, each column shows M images as above 5 | % panelSZ = round(64/2)*2; % the size (height/width) of the small square image 6 | 7 | 8 | showcaseSamples_R = imgMat(:,:,1,st:ed); 9 | showcaseSamples_R = reshape(showcaseSamples_R, [numel(showcaseSamples_R(:,:,1,1)), ed]); 10 | showcaseSamples_R = showStitch(showcaseSamples_R, [panelSZ panelSZ], M, N, 'whitelines', 'linewidth', 5); 11 | showcaseSamples_G = imgMat(:,:,2,st:ed); 12 | showcaseSamples_G = reshape(showcaseSamples_G, [numel(showcaseSamples_G(:,:,1,1)), ed]); 13 | showcaseSamples_G = showStitch(showcaseSamples_G, [panelSZ panelSZ], M, N, 'whitelines', 'linewidth', 5); 14 | showcaseSamples_B = imgMat(:,:,3,st:ed); 15 | showcaseSamples_B = reshape(showcaseSamples_B, [numel(showcaseSamples_B(:,:,1,1)), ed]); 16 | showcaseSamples_B = showStitch(showcaseSamples_B, [panelSZ panelSZ], M, N, 'whitelines', 'linewidth', 5); 17 | showcaseSamples = cat(3, showcaseSamples_R, showcaseSamples_G, showcaseSamples_B); -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/imdb_toydata_v3_from_mnist.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo1_tutorial_instance_segmentation/imdb_toydata_v3_from_mnist.mat -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/local_functions_demo1/loadMNISTImages.m: -------------------------------------------------------------------------------- 1 | function images = loadMNISTImages(filename) 2 | %loadMNISTImages returns a 28x28x[number of MNIST images] matrix containing 3 | %the raw MNIST images 4 | 5 | fp = fopen(filename, 'rb'); 6 | assert(fp ~= -1, ['Could not open ', filename, '']); 7 | 8 | magic = fread(fp, 1, 'int32', 0, 'ieee-be'); 9 | assert(magic == 2051, ['Bad magic number in ', filename, '']); 10 | 11 | numImages = fread(fp, 1, 'int32', 0, 'ieee-be'); 12 | numRows = fread(fp, 1, 'int32', 0, 'ieee-be'); 13 | numCols = fread(fp, 1, 'int32', 0, 'ieee-be'); 14 | 15 | images = fread(fp, inf, 'unsigned char'); 16 | images = reshape(images, numCols, numRows, numImages); 17 | images = permute(images,[2 1 3]); 18 | 19 | fclose(fp); 20 | 21 | % Reshape to #pixels x #examples 22 | images = reshape(images, size(images, 1) * size(images, 2), size(images, 3)); 23 | % Convert to double and rescale to [0,1] 24 | images = double(images) / 255; 25 | 26 | end 27 | -------------------------------------------------------------------------------- /demo1_tutorial_instance_segmentation/local_functions_demo1/loadMNISTLabels.m: -------------------------------------------------------------------------------- 1 | function labels = loadMNISTLabels(filename) 2 | %loadMNISTLabels returns a [number of MNIST images]x1 matrix containing 3 | %the labels for the MNIST images 4 | 5 | fp = fopen(filename, 'rb'); 6 | assert(fp ~= -1, ['Could not open ', filename, '']); 7 | 8 | magic = fread(fp, 1, 'int32', 0, 'ieee-be'); 9 | assert(magic == 2049, ['Bad magic number in ', filename, '']); 10 | 11 | numLabels = fread(fp, 1, 'int32', 0, 'ieee-be'); 12 | 13 | labels = fread(fp, inf, 'unsigned char'); 14 | 15 | assert(size(labels,1) == numLabels, 'Mismatch in label count'); 16 | 17 | fclose(fp); 18 | 19 | end 20 | -------------------------------------------------------------------------------- /demo2_boundary_detection/BoundaryLossLogistic.m: -------------------------------------------------------------------------------- 1 | classdef BoundaryLossLogistic < dagnn.Loss 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | sz = size(inputs{2}); 5 | mass = sz(1) * sz(2) + 1; 6 | 7 | outputs{1} = vl_nnloss_boundaryDet(inputs{1}, inputs{2}, [], ... 8 | 'loss', obj.loss, ... 9 | 'instanceWeights', 1./mass) ; 10 | n = obj.numAveraged ; 11 | m = n + size(inputs{1},4) ; 12 | obj.average = (n * obj.average + double(gather(outputs{1}))) / m ; 13 | obj.numAveraged = m ; 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | sz = size(inputs{2}); 18 | mass = sz(1) * sz(2) + 1; 19 | derInputs{1} = vl_nnloss_boundaryDet(inputs{1}, inputs{2}, derOutputs{1}, ... 20 | 'loss', obj.loss, ... 21 | 'instanceWeights', 1./mass) ; 22 | derInputs{2} = [] ; 23 | derParams = {} ; 24 | end 25 | 26 | function obj = BoundaryLossLogistic(varargin) 27 | obj.load(varargin) ; 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /demo2_boundary_detection/README.md: -------------------------------------------------------------------------------- 1 | # Learning to Group Pixels into Boundaries, Objectness, Segments, and Instances 2 | 3 | For papers, slides and posters, please refer to our [project page](http://www.ics.uci.edu/~skong2/SMMMSG.html "pixel-grouping") 4 | 5 | 6 | 7 | 8 | This demo is for boundary detection. When downloading our trained models from the [google drive](https://drive.google.com/drive/u/1/folders/1MfWWToezy9E6Sv6jY7JfxoUo2igX42Wg), please copy the whole folder(s) inside the link to "models" directory. 9 | 10 | Running script [main001_visualize.m](https://github.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/blob/master/demo2_boundary_detection/main001_visualize.m) will show you the embedding visualization as well as intermediate&final results. Note that you might need to compile several scripts which are used for boundary thinning (NMS) from Piotr Dollar's [edge toolbox](https://github.com/pdollar/edges). If issues about this happens, commenting out the related lines will help executing the script. 11 | 12 | 13 | 14 | If you find our model/method/dataset useful, please cite our work ([draft at arxiv](https://arxiv.org/abs/1712.08273)): 15 | 16 | @inproceedings{kong2018grouppixels, 17 | title={Recurrent Pixel Embedding for Instance Grouping}, 18 | author={Kong, Shu and Fowlkes, Charless}, 19 | booktitle={2018 Conference on Computer Vision and Pattern Recognition (CVPR)}, 20 | year={2018} 21 | } 22 | 23 | 24 | 25 | 26 | 27 | last update: 03/08/2018 28 | 29 | Shu Kong 30 | 31 | aimerykong At g-m-a-i-l dot com 32 | 33 | -------------------------------------------------------------------------------- /demo2_boundary_detection/data/104010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/104010.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/data/104010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/104010.mat -------------------------------------------------------------------------------- /demo2_boundary_detection/data/130066.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/130066.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/data/130066.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/130066.mat -------------------------------------------------------------------------------- /demo2_boundary_detection/data/65084.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/65084.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/data/65084.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/data/65084.mat -------------------------------------------------------------------------------- /demo2_boundary_detection/edgesNmsMex.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/edgesNmsMex.mexa64 -------------------------------------------------------------------------------- /demo2_boundary_detection/edgesNmsMex.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/edgesNmsMex.mexmaci64 -------------------------------------------------------------------------------- /demo2_boundary_detection/edgesNmsMex.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/edgesNmsMex.mexw64 -------------------------------------------------------------------------------- /demo2_boundary_detection/figures/104010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/figures/104010.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/figures/130066.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/figures/130066.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/figures/65084.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/figures/65084.jpg -------------------------------------------------------------------------------- /demo2_boundary_detection/imdb.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo2_boundary_detection/imdb.mat -------------------------------------------------------------------------------- /demo2_boundary_detection/rescaleFeaMap.m: -------------------------------------------------------------------------------- 1 | function feaMap = rescaleFeaMap(feaMap) 2 | feaMap = feaMap - min(feaMap(:)); 3 | feaMap = feaMap ./ max(feaMap(:)); 4 | -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/basemodel/README.md: -------------------------------------------------------------------------------- 1 | # Download model 2 | 3 | Please download the basemodel and put it here from [google drive](https://drive.google.com/drive/u/1/folders/1Ii1RPiwB-SvQchnmRvSVEcGSCCrxvpHc) 4 | 5 | -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000215.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000215.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000215.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000215.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000234.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000234.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000234.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000234.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000359.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000359.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_000359.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_000359.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_002240.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_002240.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_002240.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_002240.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_006554.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_006554.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2008_006554.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2008_006554.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2010_001646.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2010_001646.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2010_001646.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2010_001646.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2010_005063.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2010_005063.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/images/2010_005063.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/images/2010_005063.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/imdb_complete_on_server.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/imdb_complete_on_server.mat -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/rescaleFeaMap.m: -------------------------------------------------------------------------------- 1 | function feaMap = rescaleFeaMap(feaMap) 2 | feaMap = feaMap - min(feaMap(:)); 3 | feaMap = feaMap ./ max(feaMap(:)); 4 | -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id1_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id1_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id1_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id1_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id2_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id2_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id2_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id2_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id3_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id3_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id3_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id3_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id4_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id4_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id4_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id4_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id5_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id5_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id5_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id5_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id6_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id6_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id6_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id6_summaryLoops.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id7_summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id7_summary.jpg -------------------------------------------------------------------------------- /demo3_objectness_proposal_detection/results/id7_summaryLoops.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo3_objectness_proposal_detection/results/id7_summaryLoops.jpg -------------------------------------------------------------------------------- /demo4_InstSegTraining_VOC2012/README.md: -------------------------------------------------------------------------------- 1 | # Learning to Group Pixels into Boundaries, Objectness, Segments, and Instances 2 | 3 | For papers, slides and posters, please refer to our [project page](http://www.ics.uci.edu/~skong2/SMMMSG.html "pixel-grouping") 4 | 5 | 6 | ![alt text](https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/master/demo3_objectness_proposal_detection/results/id1_summary.jpg "visualization") 7 | 8 | This is a demo for training instance segmentation using the proposed method. 9 | Please download the initial basemodel from the [google drive](https://drive.google.com/drive/folders/15WHwfsNEV1I2cDxm9YGHSf_r2uHF1sQ-?usp=sharing), and put the whole folder under directory "exp". 10 | Once the initial model is ready, feel free to run the three script with name prefix "main00....m". 11 | As the name suggests, 12 | 13 | 1. "main000..." visualizes the results using the model specified inside the script; 14 | 2. "main001_trainScript.m" provides a demo to train the model. 15 | 16 | 17 | 18 | 19 | 20 | 21 | If you find our model/method/dataset useful, please cite our work ([arxiv manuscript](https://arxiv.org/abs/1712.08273)): 22 | 23 | @inproceedings{kong2018grouppixels, 24 | title={Recurrent Pixel Embedding for Instance Grouping}, 25 | author={Kong, Shu and Fowlkes, Charless}, 26 | booktitle={2018 Conference on Computer Vision and Pattern Recognition (CVPR)}, 27 | year={2018} 28 | } 29 | 30 | 31 | 32 | 33 | last update: 5/22/2018 34 | 35 | Shu Kong 36 | 37 | aimerykong At g-m-a-i-l dot com 38 | 39 | -------------------------------------------------------------------------------- /demo4_InstSegTraining_VOC2012/exp/README.md: -------------------------------------------------------------------------------- 1 | Please download the initial basemodel from the [google drive](https://drive.google.com/drive/folders/15WHwfsNEV1I2cDxm9YGHSf_r2uHF1sQ-?usp=sharing), and put the whole folder in this directory "exp". 2 | 3 | -------------------------------------------------------------------------------- /demo4_InstSegTraining_VOC2012/getBatchWrapper_augVOC2012.m: -------------------------------------------------------------------------------- 1 | % return a get batch function 2 | % ------------------------------------------------------------------------- 3 | function fn = getBatchWrapper_augVOC2012(opts) 4 | fn = @(images, mode) getBatch_dict(images, mode, opts) ; 5 | end 6 | 7 | 8 | function [imBatch, semanticMaskBatch, instanceMaskBatch, weightBatch] = getBatch_dict(images, mode, opts) 9 | [imBatch, semanticMaskBatch, instanceMaskBatch, weightBatch] = getImgBatch_augVOC2012(images, mode, opts, 'prefetch', nargout == 0) ; 10 | end 11 | -------------------------------------------------------------------------------- /demo4_InstSegTraining_VOC2012/imdb_complete_on_server.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo4_InstSegTraining_VOC2012/imdb_complete_on_server.mat -------------------------------------------------------------------------------- /demo4_InstSegTraining_VOC2012/rescaleFeaMap.m: -------------------------------------------------------------------------------- 1 | function feaMap = rescaleFeaMap(feaMap) 2 | feaMap = feaMap - min(feaMap(:)); 3 | feaMap = feaMap ./ max(feaMap(:)); 4 | -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/L2RegressionLoss.m: -------------------------------------------------------------------------------- 1 | classdef L2RegressionLoss < dagnn.Loss 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | sz = size(inputs{2}); 5 | mass = sz(1) * sz(2) + 1; 6 | 7 | outputs{1} = vl_nnloss_regression(inputs{1}, inputs{2}, [], ... 8 | 'loss', obj.loss, ... 9 | 'instanceWeights', 1./mass) ; 10 | n = obj.numAveraged ; 11 | m = n + size(inputs{1},4) ; 12 | obj.average = (n * obj.average + double(gather(outputs{1}))) / m ; 13 | obj.numAveraged = m ; 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | sz = size(inputs{2}); 18 | mass = sz(1) * sz(2) + 1; 19 | derInputs{1} = vl_nnloss_regression(inputs{1}, inputs{2}, derOutputs{1}, ... 20 | 'loss', obj.loss, ... 21 | 'instanceWeights', 1./mass) ; 22 | derInputs{2} = [] ; 23 | derParams = {} ; 24 | end 25 | 26 | function obj = L2RegressionLoss(varargin) 27 | obj.load(varargin) ; 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/fun4MShift_analysis/addOneLoop_forMeanShiftGrouping.m: -------------------------------------------------------------------------------- 1 | function [net, sName] = addOneLoop_forMeanShiftGrouping(net, sName, loopIdx, GaussianBandwidth) 2 | 3 | if ~exist('GaussianBandwidth', 'var') 4 | GaussianBandwidth = 0.1; 5 | end 6 | %% 7 | pre_input_layer = sName; 8 | 9 | lName = sprintf('loop%d_meanshift_S_is_XX', loopIdx); 10 | net.addLayer(lName, ... 11 | meanshift_S_is_XX(), ... 12 | {sName}, lName); 13 | sName = lName; 14 | 15 | lName = sprintf('loop%d_meanshift_G_is_Gaussian', loopIdx); 16 | net.addLayer(lName, ... 17 | meanshift_G_is_Gaussian('delta', GaussianBandwidth), ... 18 | {sName}, lName); 19 | G_layer = lName; 20 | sName = lName; 21 | 22 | 23 | lName = sprintf('loop%d_meanshift_d_is_sumG', loopIdx); 24 | net.addLayer(lName, ... 25 | meanshift_d_is_sumG(), ... 26 | {sName}, lName); 27 | sName = lName; 28 | 29 | 30 | lName = sprintf('loop%d_meanshift_q_is_inv_d', loopIdx); 31 | net.addLayer(lName, ... 32 | meanshift_q_is_inv_d(), ... 33 | {sName}, lName); 34 | sName = lName; 35 | 36 | 37 | lName = sprintf('loop%d_meanshift_P_is_G_diag_q', loopIdx); 38 | net.addLayer(lName, ... 39 | meanshift_P_is_G_diag_q(), ... 40 | {G_layer, sName}, lName); 41 | sName = lName; 42 | 43 | 44 | lName = sprintf('loop%d_meanshift_Y_is_XP', loopIdx); 45 | net.addLayer(lName, ... 46 | meanshift_Y_is_XP(), ... 47 | {pre_input_layer, sName}, lName); 48 | sName = lName; 49 | 50 | 51 | -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/fun4MShift_analysis/meanshift_G_is_Gaussian.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_G_is_Gaussian < dagnn.ElementWise 2 | properties 3 | delta=0.1 4 | end 5 | properties % (Transient) 6 | numInputs 7 | SIZE_=[] 8 | end 9 | 10 | % [TODO]: current version only supports batchSize=1; need to extend to 11 | % multiple input images 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | obj.numInputs = numel(inputs); 15 | % obj.SIZE_ = inputs{2}; 16 | %outputs{1} = exp((inputs{1}-1)/(obj.delta^2)); 17 | 18 | outputs{1} = exp( -0.5*(inputs{1}.^2)/(obj.delta^2)); 19 | end 20 | 21 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 22 | derInputs = cell(1, numel(inputs)); 23 | dzdy = derOutputs{1}; 24 | S = inputs{1}; 25 | %derInputs{1} = dzdy.* ((1/obj.delta^2)* exp((S-1)/(obj.delta^2))); 26 | 27 | derInputs{1} = dzdy.* (-1/(obj.delta^2)*inputs{1}).*exp(-0.5*(inputs{1}.^2)/(obj.delta^2)); 28 | 29 | derParams = {} ; 30 | end 31 | 32 | function obj = meanshift_G_is_Gaussian(varargin) 33 | obj.load(varargin) ; 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/fun4MShift_analysis/meanshift_q_is_inv_d.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_q_is_inv_d < dagnn.ElementWise 2 | properties (Transient) 3 | numInputs 4 | SIZE_ 5 | end 6 | 7 | % [TODO]: current version only supports batchSize=1; need to extend to 8 | % multiple input images 9 | methods 10 | function outputs = forward(obj, inputs, params) 11 | obj.numInputs = numel(inputs); 12 | % obj.SIZE_ = inputs{2}; 13 | outputs{1} = (inputs{1}+0.0000001).^(-1); 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | derInputs = cell(1, numel(inputs)); 18 | dzdy = derOutputs{1}; 19 | derInputs{1} = dzdy .* (-1*(inputs{1}.^(-2))); 20 | derParams = {} ; 21 | end 22 | 23 | function obj = meanshift_q_is_inv_d(varargin) 24 | obj.load(varargin) ; 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_3Loops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_3Loops.png -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_3Loops_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_3Loops_summary.png -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_5Loops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_5Loops.png -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_5Loops_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_5Loops_summary.png -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_7Loops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_7Loops.png -------------------------------------------------------------------------------- /demo5_analysis_MShift_gradient/simulation07_GBMS_7Loops_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/demo5_analysis_MShift_gradient/simulation07_GBMS_7Loops_summary.png -------------------------------------------------------------------------------- /figure_to_show/demo_boundaryDet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/demo_boundaryDet.png -------------------------------------------------------------------------------- /figure_to_show/demo_combo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/demo_combo.png -------------------------------------------------------------------------------- /figure_to_show/demo_combo_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/demo_combo_v2.png -------------------------------------------------------------------------------- /figure_to_show/demo_instSeg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/demo_instSeg.png -------------------------------------------------------------------------------- /figure_to_show/demo_testImg_boundaryDet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/demo_testImg_boundaryDet.jpg -------------------------------------------------------------------------------- /figure_to_show/fig00_visualization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/fig00_visualization.jpg -------------------------------------------------------------------------------- /figure_to_show/fig01_visualization_looping.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/fig01_visualization_looping.jpg -------------------------------------------------------------------------------- /figure_to_show/fig02_showcaseSamples.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/fig02_showcaseSamples.bmp -------------------------------------------------------------------------------- /figure_to_show/fig05_predInstanceMask-loop0.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/fig05_predInstanceMask-loop0.bmp -------------------------------------------------------------------------------- /figure_to_show/fig10_predInstanceMask-loop5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/fig10_predInstanceMask-loop5.bmp -------------------------------------------------------------------------------- /figure_to_show/softmax_net-train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/figure_to_show/softmax_net-train.png -------------------------------------------------------------------------------- /libs/exportFig/.gitignore: -------------------------------------------------------------------------------- 1 | /.ignore 2 | *.txt 3 | *.asv 4 | *~ 5 | *.mex* 6 | -------------------------------------------------------------------------------- /libs/exportFig/ImageSelection.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/exportFig/ImageSelection.class -------------------------------------------------------------------------------- /libs/exportFig/ImageSelection.java: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/exportFig/ImageSelection.java -------------------------------------------------------------------------------- /libs/exportFig/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Oliver J. Woodford, Yair M. Altman 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the {organization} nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /libs/exportFig/export_fig.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/exportFig/export_fig.m -------------------------------------------------------------------------------- /libs/exportFig/pdf2eps.m: -------------------------------------------------------------------------------- 1 | %PDF2EPS Convert a pdf file to eps format using pdftops 2 | % 3 | % Examples: 4 | % pdf2eps source dest 5 | % 6 | % This function converts a pdf file to eps format. 7 | % 8 | % This function requires that you have pdftops, from the Xpdf suite of 9 | % functions, installed on your system. This can be downloaded from: 10 | % http://www.foolabs.com/xpdf 11 | % 12 | %IN: 13 | % source - filename of the source pdf file to convert. The filename is 14 | % assumed to already have the extension ".pdf". 15 | % dest - filename of the destination eps file. The filename is assumed to 16 | % already have the extension ".eps". 17 | 18 | % Copyright (C) Oliver Woodford 2009-2010 19 | 20 | % Thanks to Aldebaro Klautau for reporting a bug when saving to 21 | % non-existant directories. 22 | 23 | function pdf2eps(source, dest) 24 | % Construct the options string for pdftops 25 | options = ['-q -paper match -eps -level2 "' source '" "' dest '"']; 26 | % Convert to eps using pdftops 27 | [status, message] = pdftops(options); 28 | % Check for error 29 | if status 30 | % Report error 31 | if isempty(message) 32 | error('Unable to generate eps. Check destination directory is writable.'); 33 | else 34 | error(message); 35 | end 36 | end 37 | % Fix the DSC error created by pdftops 38 | fid = fopen(dest, 'r+'); 39 | if fid == -1 40 | % Cannot open the file 41 | return 42 | end 43 | fgetl(fid); % Get the first line 44 | str = fgetl(fid); % Get the second line 45 | if strcmp(str(1:min(13, end)), '% Produced by') 46 | fseek(fid, -numel(str)-1, 'cof'); 47 | fwrite(fid, '%'); % Turn ' ' into '%' 48 | end 49 | fclose(fid); 50 | end 51 | 52 | -------------------------------------------------------------------------------- /libs/exportFig/print2eps.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/exportFig/print2eps.m -------------------------------------------------------------------------------- /libs/exportFig/read_write_entire_textfile.m: -------------------------------------------------------------------------------- 1 | %READ_WRITE_ENTIRE_TEXTFILE Read or write a whole text file to/from memory 2 | % 3 | % Read or write an entire text file to/from memory, without leaving the 4 | % file open if an error occurs. 5 | % 6 | % Reading: 7 | % fstrm = read_write_entire_textfile(fname) 8 | % Writing: 9 | % read_write_entire_textfile(fname, fstrm) 10 | % 11 | %IN: 12 | % fname - Pathname of text file to be read in. 13 | % fstrm - String to be written to the file, including carriage returns. 14 | % 15 | %OUT: 16 | % fstrm - String read from the file. If an fstrm input is given the 17 | % output is the same as that input. 18 | 19 | function fstrm = read_write_entire_textfile(fname, fstrm) 20 | modes = {'rt', 'wt'}; 21 | writing = nargin > 1; 22 | fh = fopen(fname, modes{1+writing}); 23 | if fh == -1 24 | error('Unable to open file %s.', fname); 25 | end 26 | try 27 | if writing 28 | fwrite(fh, fstrm, 'char*1'); 29 | else 30 | fstrm = fread(fh, '*char')'; 31 | end 32 | catch ex 33 | fclose(fh); 34 | rethrow(ex); 35 | end 36 | fclose(fh); 37 | end 38 | -------------------------------------------------------------------------------- /libs/exportFig/using_hg2.m: -------------------------------------------------------------------------------- 1 | %USING_HG2 Determine if the HG2 graphics engine is used 2 | % 3 | % tf = using_hg2(fig) 4 | % 5 | %IN: 6 | % fig - handle to the figure in question. 7 | % 8 | %OUT: 9 | % tf - boolean indicating whether the HG2 graphics engine is being used 10 | % (true) or not (false). 11 | 12 | % 19/06/2015 - Suppress warning in R2015b; cache result for improved performance 13 | 14 | function tf = using_hg2(fig) 15 | persistent tf_cached 16 | if isempty(tf_cached) 17 | try 18 | if nargin < 1, fig = figure('visible','off'); end 19 | oldWarn = warning('off','MATLAB:graphicsversion:GraphicsVersionRemoval'); 20 | try 21 | % This generates a [supressed] warning in R2015b: 22 | tf = ~graphicsversion(fig, 'handlegraphics'); 23 | catch 24 | tf = verLessThan('matlab','8.4'); % =R2014b 25 | end 26 | warning(oldWarn); 27 | catch 28 | tf = false; 29 | end 30 | if nargin < 1, delete(fig); end 31 | tf_cached = tf; 32 | else 33 | tf = tf_cached; 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /libs/fun4MeanShift/meanshift_G_is_Gaussian.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_G_is_Gaussian < dagnn.ElementWise 2 | properties 3 | delta=0.1 4 | end 5 | properties % (Transient) 6 | numInputs 7 | SIZE_=[] 8 | end 9 | 10 | % [TODO]: current version only supports batchSize=1; need to extend to 11 | % multiple input images 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | obj.numInputs = numel(inputs); 15 | % obj.SIZE_ = inputs{2}; 16 | outputs{1} = exp((inputs{1}-1)/(obj.delta^2)); 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | derInputs = cell(1, numel(inputs)); 21 | dzdy = derOutputs{1}; 22 | S = inputs{1}; 23 | derInputs{1} = dzdy.* ((1/obj.delta^2)* exp((S-1)/(obj.delta^2))); 24 | derParams = {} ; 25 | end 26 | 27 | function obj = meanshift_G_is_Gaussian(varargin) 28 | obj.load(varargin) ; 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /libs/fun4MeanShift/meanshift_d_is_sumG.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_d_is_sumG < dagnn.ElementWise 2 | properties % (Transient) 3 | numInputs 4 | SIZE_ 5 | end 6 | 7 | % [TODO]: current version only supports batchSize=1; need to extend to 8 | % multiple input images 9 | methods 10 | function outputs = forward(obj, inputs, params) 11 | obj.numInputs = numel(inputs); 12 | % [hw, ~, ~, bs] = size(inputs{1}); 13 | % obj.SIZE_ = inputs{2}; 14 | % gpuMode = isa(inputs{1}, 'gpuArray'); 15 | % if gpuMode 16 | % Y = gpuArray(zeros(hw, 1, 1, bs, 'single')); 17 | % else 18 | % Y = zeros(hw, 1, 1, bs, 'single'); 19 | % end 20 | outputs{1} = sum(inputs{1}, 2)+0.000001; 21 | end 22 | 23 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 24 | derInputs = cell(1, numel(inputs)); 25 | dzdy = derOutputs{1}; 26 | gpuMode = isa(inputs{1}, 'gpuArray'); 27 | if gpuMode 28 | dzdx = gpuArray(zeros(size(inputs{1},1), size(inputs{1},2), size(inputs{1},3), size(inputs{1},4), 'single')); 29 | else 30 | dzdx = zeros(size(inputs{1},1), size(inputs{1},2), size(inputs{1},3), size(inputs{1},4), 'single'); 31 | end 32 | for i = 1:size(inputs{1},4) 33 | dzdx(:,:,:,i) = repmat(dzdy(:,:,:,i)', size(dzdx,1), 1); 34 | end 35 | derInputs{1} = dzdx; 36 | derParams = {} ; 37 | end 38 | 39 | function obj = meanshift_d_is_sumG(varargin) 40 | obj.load(varargin) ; 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /libs/fun4MeanShift/meanshift_q_is_inv_d.m: -------------------------------------------------------------------------------- 1 | classdef meanshift_q_is_inv_d < dagnn.ElementWise 2 | properties (Transient) 3 | numInputs 4 | SIZE_ 5 | end 6 | 7 | % [TODO]: current version only supports batchSize=1; need to extend to 8 | % multiple input images 9 | methods 10 | function outputs = forward(obj, inputs, params) 11 | obj.numInputs = numel(inputs); 12 | % obj.SIZE_ = inputs{2}; 13 | outputs{1} = (inputs{1}+0.0000001).^(-1); 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | derInputs = cell(1, numel(inputs)); 18 | dzdy = derOutputs{1}; 19 | derInputs{1} = dzdy .* (-1*(inputs{1}.^(-2))); 20 | derParams = {} ; 21 | end 22 | 23 | function obj = meanshift_q_is_inv_d(varargin) 24 | obj.load(varargin) ; 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /libs/layerExt/AddBilinearUpSampling.m: -------------------------------------------------------------------------------- 1 | function net = AddBilinearUpSampling(net, var_to_up_sample, upsampled_var, upsample_fac, opts) 2 | filters = single(bilinear_u(upsample_fac*2, opts.num_classes, opts.num_classes)) ; 3 | crop = upsample_fac/2; 4 | deconv_name = ['dec_' upsampled_var]; 5 | net.addLayer(deconv_name, ... 6 | dagnn.ConvTranspose('size', size(filters), ... 7 | 'upsample', upsample_fac, ... 8 | 'crop', [crop crop crop crop], ... 9 | 'opts', {'cudnn','nocudnn'}, ... 10 | 'numGroups', opts.num_classes, ... 11 | 'hasBias', false), ... 12 | var_to_up_sample, upsampled_var, [deconv_name 'f']) ; 13 | f = net.getParamIndex([deconv_name 'f']) ; 14 | net.params(f).value = filters ; 15 | net.params(f).learningRate = 0 ; 16 | net.params(f).weightDecay = 1 ; 17 | net.params(f).trainMethod = 'nothing'; 18 | 19 | -------------------------------------------------------------------------------- /libs/layerExt/LRRAddMasking.m: -------------------------------------------------------------------------------- 1 | function net = LLRAddMasking(net, upsample_fac, bilinear_up, upsample_2x_per_layer) 2 | 3 | if upsample_2x_per_layer 4 | post_name = '_bil_x2'; 5 | else 6 | post_name = ''; 7 | end 8 | up_name = [num2str(upsample_fac) 'x']; 9 | pre_up_name = [num2str(2 * upsample_fac) 'x']; 10 | 11 | 12 | pool_size = (upsample_fac * 2) / bilinear_up + 1; 13 | assert(pool_size == 9); 14 | pad_size = floor(pool_size)/2; 15 | % Dilation of class probabilities 16 | net.addLayer(['prob_' pre_up_name], dagnn.SoftMax(), ['prediction_' pre_up_name post_name], ['prob_' pre_up_name], {}); 17 | net.addLayer(['prob_dilate_' pre_up_name], dagnn.Pooling('stride', [1 1], 'poolSize', [pool_size pool_size], ... 18 | 'pad', [pad_size, pad_size, pad_size, pad_size]), ['prob_' pre_up_name], ['prob_' pre_up_name '_dilate'], {}); 19 | 20 | % Dilation of negative of class probabilities 21 | net.addLayer(['neg_prob_' pre_up_name], Neg(), ['prob_' pre_up_name], ['neg_prob_' pre_up_name], {}); 22 | ['neg_prob_' pre_up_name] 23 | net.addLayer(['neg_prob_dilate_' pre_up_name], dagnn.Pooling('stride', [1 1], 'poolSize', [pool_size pool_size], ... 24 | 'pad', [pad_size, pad_size, pad_size, pad_size]), ['neg_prob_' pre_up_name], ['neg_prob_' pre_up_name '_dilate'], {}); 25 | 26 | % Sum of two dilation 27 | net.addLayer(['bound_mask' pre_up_name], dagnn.Sum(), {['prob_' pre_up_name '_dilate'], ['neg_prob_' pre_up_name '_dilate']}, ['bound_mask' pre_up_name]) ; 28 | net.addLayer(['dot_prod_' up_name], DotProduct(), {['bound_mask' pre_up_name], ['prediction_' up_name '_add']}, ['pred_' up_name '_aft_DP']) ; 29 | net.setLayerInputs(['sum' up_name], {['prediction_' pre_up_name post_name], ['pred_' up_name '_aft_DP']}); 30 | 31 | 32 | -------------------------------------------------------------------------------- /libs/layerExt/Neg.m: -------------------------------------------------------------------------------- 1 | classdef Neg < dagnn.ElementWise 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | outputs{1} = -inputs{1}; 5 | end 6 | 7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 8 | derInputs{1} = -1 * derOutputs{1}; 9 | derParams = {} ; 10 | end 11 | 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /libs/layerExt/SegmentationLoss.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------ 2 | % This file is copied from matconvnet-fcn repository 3 | % (https://github.com/vlfeat/matconvnet-fcn) 4 | % ------------------------------------------------------------------ 5 | classdef SegmentationLoss < dagnn.Loss 6 | 7 | methods 8 | function outputs = forward(obj, inputs, params) 9 | mass = sum(sum(inputs{2} > 0,2),1) + 1 ; 10 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], ... 11 | 'loss', obj.loss, ... 12 | 'instanceWeights', 1./mass) ; 13 | n = obj.numAveraged ; 14 | m = n + size(inputs{1},4) ; 15 | obj.average = (n * obj.average + double(gather(outputs{1}))) / m ; 16 | obj.numAveraged = m ; 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | mass = sum(sum(inputs{2} > 0,2),1) + 1 ; 21 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, ... 22 | 'loss', obj.loss, ... 23 | 'instanceWeights', 1./mass) ; 24 | derInputs{2} = [] ; 25 | derParams = {} ; 26 | end 27 | 28 | function obj = SegmentationLoss(varargin) 29 | obj.load(varargin) ; 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /libs/layerExt/SegmentationLossLogistic.m: -------------------------------------------------------------------------------- 1 | classdef SegmentationLossLogistic < dagnn.Loss 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | sz = size(inputs{2}); 5 | mass = sz(1) * sz(2) + 1; 6 | 7 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], ... 8 | 'loss', obj.loss, ... 9 | 'instanceWeights', 1./mass) ; 10 | n = obj.numAveraged ; 11 | m = n + size(inputs{1},4) ; 12 | obj.average = (n * obj.average + double(gather(outputs{1}))) / m ; 13 | obj.numAveraged = m ; 14 | end 15 | 16 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 17 | sz = size(inputs{2}); 18 | mass = sz(1) * sz(2) + 1; 19 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, ... 20 | 'loss', obj.loss, ... 21 | 'instanceWeights', 1./mass) ; 22 | derInputs{2} = [] ; 23 | derParams = {} ; 24 | end 25 | 26 | function obj = SegmentationLossLogistic(varargin) 27 | obj.load(varargin) ; 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /libs/layerExt/bilinear_u.m: -------------------------------------------------------------------------------- 1 | function f = bilinear_u(k, numGroups, numClasses) 2 | %BILINEAR_U Create bilinear interpolation filters 3 | % BILINEAR_U(K, NUMGROUPS, NUMCLASSES) compute a square bilinear filter 4 | % of size k for deconv layer of depth numClasses and number of groups 5 | % numGroups 6 | 7 | factor = floor((k+1)/2) ; 8 | if rem(k,2)==1 9 | center = factor ; 10 | else 11 | center = factor + 0.5 ; 12 | end 13 | C = 1:k ; 14 | if numGroups ~= numClasses 15 | f = zeros(k,k,numGroups,numClasses) ; 16 | else 17 | f = zeros(k,k,1,numClasses) ; 18 | end 19 | 20 | for i =1:numClasses 21 | if numGroups ~= numClasses 22 | index = i ; 23 | else 24 | index = 1 ; 25 | end 26 | f(:,:,index,i) = (ones(1,k) - abs(C-center)./factor)'*(ones(1,k) - abs(C-center)./(factor)); 27 | end 28 | end -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | *.vcxproj text merge=union eol=crlf 3 | *.vcxproj.filters merge=union eol=crlf 4 | *.sln text merge=union eol=crlf 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/.gitignore: -------------------------------------------------------------------------------- 1 | *.xcodeproj/*xcuserdata* 2 | *.xcodeproj/project.xcworkspace/*xcuserdata* 3 | *.xcodeproj/project.xcworkspace/xcshareddata/ 4 | mex/* 5 | mex 6 | data 7 | *.o 8 | *.pyc 9 | *~ 10 | index.html 11 | matconvnet-*.tar.gz 12 | local 13 | 14 | # Documentation 15 | doc/figures/svg/*.pdf 16 | doc/figures/*.idraw 17 | doc/.texpadtmp/* 18 | doc/*.pdf 19 | doc/.build 20 | 21 | # Website 22 | doc/site/docs/mfiles 23 | doc/site/site 24 | doc/site/.build 25 | doc/site/theme/css/bootstrap.min.css 26 | doc/site/theme/css/bootstrap.min.css.map 27 | doc/site/theme/css/font-awesome.min.css 28 | doc/site/theme/fonts/fontawesome-webfont.eot 29 | doc/site/theme/fonts/fontawesome-webfont.svg 30 | doc/site/theme/fonts/fontawesome-webfont.ttf 31 | doc/site/theme/fonts/fontawesome-webfont.woff 32 | doc/site/theme/fonts/fontawesome-webfont.woff2 33 | doc/site/theme/js/bootstrap.min.js 34 | doc/site/theme/js/jquery.min.js 35 | doc/site/theme/js/jquery.min.map 36 | doc/site/theme/js/npm.js 37 | 38 | # Visual C 39 | *.suo 40 | *.user 41 | *.sdf 42 | *.opensdf 43 | doc/figures/svg/*.idraw 44 | 45 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/.gitmodules -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | ## How to contribute to MatConvNet 4 | 5 | For a description of how the library is structured, take a look at the 6 | [Developers notes](http://www.vlfeat.org/matconvnet/developers/) on 7 | the MatConvNet website. 8 | 9 | ### Issues 10 | 11 | We are grateful for any reported issues which help to remove bugs and 12 | improve the overall quality of the library. In particular, you can use 13 | the issue tracker to: 14 | 15 | * report bugs and unexpected crashes 16 | * discuss library design decisions 17 | * request new features 18 | 19 | When reporting bugs, it really helps if you can provide the following: 20 | 21 | * Which steps are needed to reproduce the issue 22 | * MATLAB, compiler and CUDA version (where appropriate) 23 | 24 | Before opening an issue to report a bug, please make sure that the bug 25 | is reproducible on the latest version of the master branch. 26 | 27 | The most difficult bugs to remove are those which cause crashes of the 28 | core functions (e.g. CUDA errors etc.). In those cases, it is really 29 | useful to create a *minimal example* which is able to reproduce the 30 | issue. We know that this may mean a bit of work, but it helps us to 31 | remove the bug more quickly. 32 | 33 | ### Pull requests 34 | 35 | Please make any Pull Requests against the `devel` branch rather than 36 | the `master` branch which is maintained as the latest stable release 37 | of the library. 38 | 39 | As a general rule, it is much easier to accept small Pull Requests 40 | that make a single improvement to the library than complex code 41 | changes that affect multiple parts of the library. When submitting 42 | substantial changes, it is useful if unit tests are provided with the 43 | code. 44 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 The MatConvNet team. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms are permitted 5 | provided that the above copyright notice and this paragraph are 6 | duplicated in all such forms and that any documentation, 7 | advertising materials, and other materials related to such 8 | distribution and use acknowledge that the software was developed 9 | by the . The name of the 10 | may not be used to endorse or promote products derived 11 | from this software without specific prior written permission. 12 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 13 | IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/README.md: -------------------------------------------------------------------------------- 1 | # MatConvNet: CNNs for MATLAB 2 | 3 | **MatConvNet** is a MATLAB toolbox implementing *Convolutional Neural 4 | Networks* (CNNs) for computer vision applications. It is simple, 5 | efficient, and can run and learn state-of-the-art CNNs. Several 6 | example CNNs are included to classify and encode images. Please visit 7 | the [homepage](http://www.vlfeat.org/matconvnet) to know more. 8 | 9 | In case of compilation issues, please read first the 10 | [Installation](http://www.vlfeat.org/matconvnet/install/) and 11 | [FAQ](http://www.vlfeat.org/matconvnet/faq/) section before creating an GitHub 12 | issue. For general inquiries regarding network design and training 13 | related questions, please use the 14 | [Discussion forum](https://groups.google.com/d/forum/matconvnet). 15 | 16 | 17 | vl_compilenn('enableGpu', true, 'cudaRoot', '/usr/local/cuda') 18 | 19 | vl_compilenn('enableGpu', true, ... 20 | 'cudaRoot', '/usr/local/cuda', ... 21 | 'cudaMethod', 'nvcc') 22 | 23 | vl_compilenn('enableGpu', true, ... 24 | 'cudaRoot', '/usr/local/cuda', ... 25 | 'cudaMethod', 'nvcc', ... 26 | 'enableCudnn', true, ... 27 | 'cudnnRoot', '/usr/local/cuda/cudnn-v5') ; 28 | 29 | LD_LIBRARY_PATH=/Developer/NVIDIA/CUDA-7.0/lib64 matlab -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/figures/imnet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/doc/figures/imnet.pdf -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/figures/pepper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/doc/figures/pepper.pdf -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/figures/stn-perf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/figures/stn-perf.png -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/figures/stn-samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/figures/stn-samples.png -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/gpu.md: -------------------------------------------------------------------------------- 1 | # Using GPU acceleration 2 | 3 | GPU support in MatConvNet builds on top of MATLAB GPU support in the 4 | [Parallel Computing Toolbox](http://www.mathworks.com/products/parallel-computing/). This 5 | toolbox requires CUDA-compatible cards, and you will need a copy of 6 | the corresponding 7 | [CUDA devkit](https://developer.nvidia.com/cuda-toolkit-archive) to 8 | compile GPU support in MatConvNet (see 9 | [compiling](install#compiling)). 10 | 11 | All the core computational functions (e.g. `vl_nnconv`) in the toolbox 12 | can work with either MATLAB arrays or MATLAB GPU arrays. Therefore, 13 | switching to use the GPU is as simple as converting the input CPU 14 | arrays in GPU arrays. 15 | 16 | In order to make the very best of powerful GPUs, it is important to 17 | balance the load between CPU and GPU in order to avoid starving the 18 | latter. In training on a problem like ImageNet, the CPU(s) in your 19 | system will be busy loading data from disk and streaming it to the GPU 20 | to evaluate the CNN and its derivative. MatConvNet includes the 21 | utility `vl_imreadjpeg` to accelerate and parallelize loading images 22 | into memory (this function is currently a bottleneck will be made more 23 | powerful in future releases). 24 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/js/mathjaxhelper.js: -------------------------------------------------------------------------------- 1 | /* 2 | #if false 3 | Prevent Unity to try compiling this js 4 | */ 5 | MathJax.Hub.Config({ 6 | "tex2jax": { inlineMath: [ [ '$', '$' ] ] } 7 | }); 8 | /* 9 | #endif 10 | */ -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/js/toggle.js: -------------------------------------------------------------------------------- 1 | function toggle_visibility(id) { 2 | var e = document.getElementById(id); 3 | if(e.style.display == 'block') 4 | e.style.display = 'none'; 5 | else 6 | e.style.display = 'block'; 7 | } 8 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/spatial-transformer.md: -------------------------------------------------------------------------------- 1 | # Spatial Transformer Networks 2 | 3 | This example demonstrates the use of a Spatial Transformer Network 4 | for classifying distorted MNIST digits in clutter. 5 | The source files used in this examples can be found in the 6 | `examples/spatial_transformer` directory. 7 | 8 | The spatial transformer network is defined in the `cnn_stn_cluttered_mnist.m` 9 | file. It has three components: (1) a localization network which 10 | predicts six affine transformation parameters for an input image, 11 | (2) a bilinear sampler which applies the above transformation 12 | to the input image, and (3) a classification network which classifies the 13 | output of the bilinear sampler. 14 | 15 | The picture below shows input images and their transformed versions as determined 16 | by the STN. Note how the STN has learned to rectify the input image. 17 | 18 | ![Transformations inferred by the Spatial Transformer Network for images from a cluttered MNIST dataset.](figures/stn-samples.png) 19 | 20 | The following graph compares the training and test errors of two CNNs: 21 | a STN and, a plain classification CNN (with the same configuration as the 22 | classification component of the STN). We note that the STN performs significantly 23 | better (STN test-error = 5.7%, CNN test-error = 14.2%). 24 | 25 | ![Classification error comparison between a STN and a CNN.](figures/stn-perf.png) 26 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/docs/training.md: -------------------------------------------------------------------------------- 1 | ## Using MatConvNet to train convnets 2 | 3 | MatConvNet can be used to train models, typically by using a form of 4 | stochastic gradient descent (SGD) and back-propagation. 5 | 6 | The following learning demonstrators are provided in the MatConvNet 7 | package: 8 | 9 | - **MNIST**. See `examples/mnist/cnn_mnist.m`. 10 | - **CIFAR**. See `examples/cifar/cnn_cifar.m`. 11 | - **ImageNet**. See `examples/imagenet/cnn_imagenet.m`. 12 | 13 | These demos are self-contained; MNIST and CIFAR, in particular, 14 | automatically download and unpack the required data, so that they 15 | should work out-of-the-box. 16 | 17 | While MNIST and CIFAR are small datasets (by today's standard) and 18 | training is feasible on a CPU, ImageNet requires a powerful GPU to 19 | complete in a reasonable time (a few days!). It also requires the 20 | `vl_imreadjpeg()` command in the toolbox to be compiled in order to 21 | accelerate reading large batches of JPEG images and avoid starving the 22 | GPU. 23 | 24 | All these demos use the `example/cnn_train.m` and 25 | `example/cnn_train_dag.m` SGD drivers, which are simple 26 | implementations of the standard SGD with momentum, done directly in 27 | MATLAB code. However, it should be easy to implement your own 28 | specialized or improved solver. 29 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/theme/content.html: -------------------------------------------------------------------------------- 1 | {% if meta.source %} 2 | 7 | {% endif %} 8 | 9 | {{ content }} 10 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/theme/js/base.js: -------------------------------------------------------------------------------- 1 | 2 | /* Highlight */ 3 | $( document ).ready(function() { 4 | hljs.initHighlightingOnLoad(); 5 | $('table').addClass('table table-striped table-hover'); 6 | }); 7 | 8 | 9 | $('body').scrollspy({ 10 | target: '.bs-sidebar', 11 | }); 12 | 13 | 14 | /* Prevent disabled links from causing a page reload */ 15 | $("li.disabled a").click(function() { 16 | event.preventDefault(); 17 | }); 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/doc/site/theme/toc.html: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/000004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/000004.jpg -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/000004_boxes.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/000004_boxes.mat -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Fast-RCNN demo 2 | 3 | This folder contains an example implementation of Fast-RCNN [1] in 4 | MatConvNet. The example trains and test on the PASCAL VOC 2007 data. 5 | 6 | There are three entry-point scripts: 7 | 8 | * `fast_rcnn_demo.m`: runs the original Caffe model imported in MatConvNet. 9 | * `fast_rcnn_train.m`: trains a new model from scratch, using pre-computed proposals. 10 | * `fast_rcnn_evaluate.m`: evaluates the trained model. 11 | 12 | Note that the code does not ship with a proposal generation method, so 13 | proposals must be precomputed (using e.g. edge boxes or selective 14 | search windows). 15 | 16 | The `fast_rcnn_demo.m` code should run out of the box, downloading the 17 | model as needed. 18 | 19 | To test the training code using the first GPU on your system, use 20 | something like: 21 | 22 | run matlab/vl_setupnn 23 | addpath examples/fast_rcnn 24 | fast_rcnn_train('train',struct('gpus',1)) ; 25 | fast_rcnn_evaluate('gpu',1) ; 26 | 27 | ## References 28 | 29 | 1. *Fast R-CNN*, R. Girshick, International Conference on Computer 30 | Vision (ICCV), 2015. 31 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_clip.m: -------------------------------------------------------------------------------- 1 | function boxes = bbox_clip(boxes, im_size) 2 | % bbox_clip Clip boxes to image boundaries. 3 | % 4 | % Copyright (C) 2016 Hakan Bilen. 5 | % All rights reserved. 6 | % 7 | % This file is part of the VLFeat library and is made available under 8 | % the terms of the BSD license (see the COPYING file). 9 | boxes(:,1) = max(min(boxes(:,1),im_size(2)),1); 10 | boxes(:,2) = max(min(boxes(:,2),im_size(1)),1); 11 | boxes(:,3) = max(min(boxes(:,3),im_size(2)),1); 12 | boxes(:,4) = max(min(boxes(:,4),im_size(1)),1); 13 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_overlap.m: -------------------------------------------------------------------------------- 1 | function overlaps = bbox_overlap(boxes1,boxes2) 2 | % Copyright (C) 2016 Hakan Bilen. 3 | % All rights reserved. 4 | % 5 | % This file is part of the VLFeat library and is made available under 6 | % the terms of the BSD license (see the COPYING file). 7 | x11 = boxes1(:,1); 8 | y11 = boxes1(:,2); 9 | x12 = boxes1(:,3); 10 | y12 = boxes1(:,4); 11 | 12 | x21 = boxes2(:,1); 13 | y21 = boxes2(:,2); 14 | x22 = boxes2(:,3); 15 | y22 = boxes2(:,4); 16 | 17 | N1 = size(boxes1,1); 18 | N2 = size(boxes2,1); 19 | 20 | area1 = (x12-x11+1) .* (y12-y11+1); 21 | area2 = (x22-x21+1) .* (y22-y21+1); 22 | 23 | overlaps = zeros(N1,N2); 24 | 25 | for i=1:N1 26 | 27 | xx1 = max(x11(i), x21); 28 | yy1 = max(y11(i), y21); 29 | xx2 = min(x12(i), x22); 30 | yy2 = min(y12(i), y22); 31 | 32 | w = max(0.0, xx2-xx1+1); 33 | h = max(0.0, yy2-yy1+1); 34 | 35 | inter = w.*h; 36 | overlaps(i,:) = inter ./ (area1(i) + area2 - inter); 37 | end 38 | 39 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_remove_duplicates.m: -------------------------------------------------------------------------------- 1 | function bboxeso = bbox_remove_duplicates(bboxes, minSize, maxNum) 2 | % Copyright (C) 2016 Hakan Bilen. 3 | % All rights reserved. 4 | % 5 | % This file is part of the VLFeat library and is made available under 6 | % the terms of the BSD license (see the COPYING file). 7 | bboxeso = cell(size(bboxes)); 8 | for i=1:numel(bboxes) 9 | bbox = bboxes{i}; 10 | % remove small bbox 11 | isGood = (bbox(:,3)>=bbox(:,1)-1+minSize) & (bbox(:,4)>=bbox(:,2)-1+minSize); 12 | bbox = bbox(isGood,:); 13 | % remove duplicate ones 14 | [dummy, uniqueIdx] = unique(bbox, 'rows', 'first'); 15 | uniqueIdx = sort(uniqueIdx); 16 | bbox = bbox(uniqueIdx,:); 17 | % limit number for training 18 | nB = min(size(bbox,1),maxNum); 19 | 20 | bboxeso{i} = bbox(1:nB,:); 21 | end 22 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_scale.m: -------------------------------------------------------------------------------- 1 | function boxOut = bbox_scale2(boxIn,scale,szOut) 2 | % Copyright (C) 2016 Hakan Bilen. 3 | % All rights reserved. 4 | % 5 | % This file is part of the VLFeat library and is made available under 6 | % the terms of the BSD license (see the COPYING file). 7 | 8 | if isempty(boxIn), boxOut = []; return; end 9 | 10 | boxOut = scale * (boxIn-1) + 1; 11 | 12 | boxOut = [max(1,round(boxOut(:,1))),... 13 | max(1,round(boxOut(:,2))),... 14 | min(szOut(1),round(boxOut(:,3))),... 15 | min(szOut(2),round(boxOut(:,4)))]; 16 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_transform.m: -------------------------------------------------------------------------------- 1 | function targets = bbox_transform(ex_rois, gt_rois) 2 | % Copyright (C) 2016 Hakan Bilen. 3 | % All rights reserved. 4 | % 5 | % This file is part of the VLFeat library and is made available under 6 | % the terms of the BSD license (see the COPYING file). 7 | 8 | ex_widths = ex_rois(:, 3) - ex_rois(:, 1) + 1.0 ; 9 | ex_heights = ex_rois(:, 4) - ex_rois(:, 2) + 1.0 ; 10 | ex_ctr_x = ex_rois(:, 1) + 0.5 * ex_widths ; 11 | ex_ctr_y = ex_rois(:, 2) + 0.5 * ex_heights ; 12 | 13 | gt_widths = gt_rois(:, 3) - gt_rois(:, 1) + 1.0 ; 14 | gt_heights = gt_rois(:, 4) - gt_rois(:, 2) + 1.0 ; 15 | gt_ctr_x = gt_rois(:, 1) + 0.5 * gt_widths ; 16 | gt_ctr_y = gt_rois(:, 2) + 0.5 * gt_heights ; 17 | 18 | targets_dx = (gt_ctr_x - ex_ctr_x) ./ ex_widths ; 19 | targets_dy = (gt_ctr_y - ex_ctr_y) ./ ex_heights ; 20 | targets_dw = log(gt_widths ./ ex_widths) ; 21 | targets_dh = log(gt_heights ./ ex_heights) ; 22 | 23 | targets = [targets_dx, targets_dy, targets_dw, targets_dh] ; -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/fast_rcnn/bbox_functions/bbox_transform_inv.m: -------------------------------------------------------------------------------- 1 | function pred_boxes = bbox_transform_inv(boxes, deltas) 2 | % Copyright (C) 2016 Hakan Bilen. 3 | % All rights reserved. 4 | % 5 | % This file is part of the VLFeat library and is made available under 6 | % the terms of the BSD license (see the COPYING file). 7 | 8 | if isempty(boxes), return; end 9 | 10 | widths = boxes(:,3) - boxes(:,1); 11 | heights = boxes(:,4) - boxes(:,2); 12 | ctr_x = boxes(:,1) + 0.5 * widths; 13 | ctr_y = boxes(:,2) + 0.5 * heights; 14 | 15 | dx = deltas(:,1); 16 | dy = deltas(:,2); 17 | dw = deltas(:,3); 18 | dh = deltas(:,4); 19 | 20 | pred_ctr_x = dx .* widths + ctr_x; 21 | pred_ctr_y = dy .* heights + ctr_y; 22 | pred_w = exp(dw) .* widths; 23 | pred_h = exp(dh) .* heights; 24 | 25 | pred_boxes = zeros(size(deltas), 'like', deltas); 26 | % x1 27 | pred_boxes(:, 1) = pred_ctr_x - 0.5 * pred_w; 28 | % y1 29 | pred_boxes(:, 2) = pred_ctr_y - 0.5 * pred_h; 30 | % x2 31 | pred_boxes(:, 3) = pred_ctr_x + 0.5 * pred_w; 32 | % y2 33 | pred_boxes(:, 4) = pred_ctr_y + 0.5 * pred_h; 34 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/imagenet/cnn_imagenet_googlenet.m: -------------------------------------------------------------------------------- 1 | function cnn_imagenet_googlenet() 2 | %CNN_IMAGENET_GOOGLENET Demonstrates how to use GoogLeNet 3 | 4 | run matlab/vl_setupnn 5 | modelPath = 'data/models/imagenet-googlenet-dag.mat' ; 6 | 7 | if ~exist(modelPath) 8 | mkdir(fileparts(modelPath)) ; 9 | urlwrite(... 10 | 'http://www.vlfeat.org/matconvnet/models/imagenet-googlenet-dag.mat', ... 11 | modelPath) ; 12 | end 13 | 14 | net = dagnn.DagNN.loadobj(load(modelPath)) ; 15 | 16 | im = imread('peppers.png') ; 17 | im_ = single(im) ; % note: 255 range 18 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 19 | im_ = im_ - net.meta.normalization.averageImage ; 20 | net.eval({'data', im_}) ; 21 | 22 | % show the classification result 23 | scores = squeeze(gather(net.vars(end).value)) ; 24 | [bestScore, best] = max(scores) ; 25 | figure(1) ; clf ; imagesc(im) ; 26 | title(sprintf('%s (%d), score %.3f',... 27 | net.meta.classes.description{best}, best, bestScore)) ; 28 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/imagenet/cnn_imagenet_minimal.m: -------------------------------------------------------------------------------- 1 | function cnn_imagenet_minimal() 2 | % CNN_IMAGENET_MINIMAL Minimalistic demonstration of how to run an ImageNet CNN model 3 | 4 | % Setup MatConvNet. 5 | run(fullfile(fileparts(mfilename('fullpath')), ... 6 | '..', '..', 'matlab', 'vl_setupnn.m')) ; 7 | 8 | % Download a pre-trained CNN from the web. 9 | if ~exist('imagenet-vgg-f.mat', 'file') 10 | fprintf('Downloading the VGG-F model ... this may take a while\n') ; 11 | urlwrite('http://www.vlfeat.org/matconvnet/models/imagenet-vgg-f.mat', ... 12 | 'imagenet-vgg-f.mat') ; 13 | end 14 | 15 | % Load the model and upgrade it to MatConvNet current version. 16 | net = load('imagenet-vgg-f.mat') ; 17 | net = vl_simplenn_tidy(net) ; 18 | 19 | % Obtain and preprocess an image. 20 | im = imread('peppers.png') ; 21 | im_ = single(im) ; % note: 255 range 22 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 23 | im_ = im_ - net.meta.normalization.averageImage ; 24 | 25 | % Run the CNN. 26 | res = vl_simplenn(net, im_) ; 27 | 28 | % Show the classification result. 29 | scores = squeeze(gather(res(end).x)) ; 30 | [bestScore, best] = max(scores) ; 31 | figure(1) ; clf ; imagesc(im) ; 32 | title(sprintf('%s (%d), score %.3f',... 33 | net.meta.classes.description{best}, best, bestScore)) ; 34 | 35 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/imagenet/cnn_imagenet_sync_labels.m: -------------------------------------------------------------------------------- 1 | function imdb = cnn_imagenet_sync_labels(imdb, net) 2 | % CNN_IMAGENET_SYNC_LABELS Match CNN and database labels 3 | % A CNN NET and the image database IMDB may use a different label ordering. 4 | % This function matches classes by name and reorder the labels 5 | % in IMDB to match NET. 6 | 7 | [~,perm] = ismember(imdb.classes.name, net.meta.classes.name); 8 | assert(all(perm ~= 0)); 9 | 10 | imdb.classes.description = imdb.classes.description(perm) ; 11 | imdb.classes.name = imdb.classes.name(perm) ; 12 | ok = imdb.images.label > 0 ; 13 | iperm(perm) = 1:numel(perm) ; 14 | imdb.images.label(ok) = perm(imdb.images.label(ok)) ; 15 | 16 | 17 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/imagenet/getImageStats.m: -------------------------------------------------------------------------------- 1 | function [averageImage, rgbMean, rgbCovariance] = getImageStats(images, varargin) 2 | %GETIMAGESTATS Get image statistics 3 | 4 | opts.gpus = [] ; 5 | opts.batchSize = 256 ; 6 | opts.imageSize = [256 256] ; 7 | opts.numThreads = 6 ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | avg = {} ; 11 | rgbm1 = {} ; 12 | rgbm2 = {} ; 13 | 14 | numGpus = numel(opts.gpus) ; 15 | if numGpus > 0 16 | fprintf('%s: resetting GPU device\n', mfilename) ; 17 | clear mex ; 18 | gpuDevice(opts.gpus(1)) 19 | end 20 | 21 | for t=1:opts.batchSize:numel(images) 22 | time = tic ; 23 | batch = t : min(t+opts.batchSize-1, numel(images)) ; 24 | fprintf('collecting image stats: batch starting with image %d ...', batch(1)) ; 25 | 26 | data = getImageBatch(images(batch), ... 27 | 'numThreads', opts.numThreads, ... 28 | 'imageSize', opts.imageSize, ... 29 | 'useGpu', numGpus > 0) ; 30 | 31 | z = reshape(shiftdim(data,2),3,[]) ; 32 | rgbm1{end+1} = mean(z,2) ; 33 | rgbm2{end+1} = z*z'/size(z,2) ; 34 | avg{end+1} = mean(data, 4) ; 35 | time = toc(time) ; 36 | fprintf(' %.1f Hz\n', numel(batch) / time) ; 37 | end 38 | 39 | averageImage = gather(mean(cat(4,avg{:}),4)) ; 40 | rgbm1 = gather(mean(cat(2,rgbm1{:}),2)) ; 41 | rgbm2 = gather(mean(cat(3,rgbm2{:}),3)) ; 42 | rgbMean = rgbm1 ; 43 | rgbCovariance = rgbm2 - rgbm1*rgbm1' ; 44 | 45 | if numGpus > 0 46 | fprintf('%s: finished with GPU device, resetting again\n', mfilename) ; 47 | clear mex ; 48 | gpuDevice(opts.gpus(1)) ; 49 | end 50 | fprintf('%s: all done\n', mfilename) ; 51 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/mnist/cnn_mnist_experiments.m: -------------------------------------------------------------------------------- 1 | %% Experiment with the cnn_mnist_fc_bnorm 2 | clear 3 | clc; 4 | close all; 5 | 6 | [net_bn, info_bn] = cnn_mnist(... 7 | 'expDir', 'data/mnist-bnorm', 'batchNormalization', true); 8 | 9 | [net_fc, info_fc] = cnn_mnist(... 10 | 'expDir', 'data/mnist-baseline', 'batchNormalization', false); 11 | 12 | figure(1) ; clf ; 13 | subplot(1,2,1) ; 14 | semilogy(info_fc.val.objective', 'o-') ; hold all ; 15 | semilogy(info_bn.val.objective', '+--') ; 16 | xlabel('Training samples [x 10^3]'); ylabel('energy') ; 17 | grid on ; 18 | h=legend('BSLN', 'BNORM') ; 19 | set(h,'color','none'); 20 | title('objective') ; 21 | subplot(1,2,2) ; 22 | plot(info_fc.val.error', 'o-') ; hold all ; 23 | plot(info_bn.val.error', '+--') ; 24 | h=legend('BSLN-val','BSLN-val-5','BNORM-val','BNORM-val-5') ; 25 | grid on ; 26 | xlabel('Training samples [x 10^3]'); ylabel('error') ; 27 | set(h,'color','none') ; 28 | title('error') ; 29 | drawnow ; -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/spatial_transformer/readme.txt: -------------------------------------------------------------------------------- 1 | Example scripts to train a spatial transformer network [1] 2 | for cluttered MNIST dataset. 3 | 4 | Demonstrates how to initialize and train the network. 5 | 6 | References: 7 | ----------- 8 | 1. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman 9 | Spatial transformer networks 10 | Advances in Neural Information Processing Systems, 2015 11 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/examples/vggfaces/cnn_vgg_faces.m: -------------------------------------------------------------------------------- 1 | function cnn_vgg_faces() 2 | %CNN_VGG_FACES Demonstrates how to use VGG-Face 3 | 4 | % Setup MatConvNet. 5 | run(fullfile(fileparts(mfilename('fullpath')), ... 6 | '..', '..', 'matlab', 'vl_setupnn.m')) ; 7 | 8 | % Load the VGG-Face model. 9 | modelPath = fullfile(vl_rootnn,'data','models','vgg-face.mat') ; 10 | if ~exist(modelPath) 11 | fprintf('Downloading the VGG-Face model ... this may take a while\n') ; 12 | mkdir(fileparts(modelPath)) ; 13 | urlwrite(... 14 | 'http://www.vlfeat.org/matconvnet/models/vgg-face.mat', ... 15 | modelPath) ; 16 | end 17 | 18 | % Load the model and upgrade it to MatConvNet current version. 19 | net = load('data/models/vgg-face.mat') ; 20 | net = vl_simplenn_tidy(net) ; 21 | 22 | % Load a test image from Wikipedia and run the model. 23 | im = imread('https://upload.wikimedia.org/wikipedia/commons/4/4a/Aamir_Khan_March_2015.jpg') ; 24 | im = im(1:250,:,:) ; % crop 25 | im_ = single(im) ; % note: 255 range 26 | im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ; 27 | im_ = bsxfun(@minus,im_,net.meta.normalization.averageImage) ; 28 | res = vl_simplenn(net, im_) ; 29 | 30 | % Show the classification result. 31 | scores = squeeze(gather(res(end).x)) ; 32 | [bestScore, best] = max(scores) ; 33 | figure(1) ; clf ; imagesc(im) ; axis equal off ; 34 | title(sprintf('%s (%d), score %.3f',... 35 | net.meta.classes.description{best}, best, bestScore), ... 36 | 'Interpreter', 'none') ; 37 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matconvnet.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 14 4 | VisualStudioVersion = 14.0.24720.0 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "matconvnet", "matconvnet.vcxproj", "{B0BD9132-1D90-4267-A07A-B44DE497A9C7}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|Win32 = Debug|Win32 11 | Debug|x64 = Debug|x64 12 | Release|Win32 = Release|Win32 13 | Release|x64 = Release|x64 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.ActiveCfg = Debug|Win32 17 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|Win32.Build.0 = Debug|Win32 18 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Debug|x64.ActiveCfg = Debug|Win32 19 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.ActiveCfg = Release|Win32 20 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|Win32.Build.0 = Release|Win32 21 | {B0BD9132-1D90-4267-A07A-B44DE497A9C7}.Release|x64.ActiveCfg = Release|Win32 22 | EndGlobalSection 23 | GlobalSection(SolutionProperties) = preSolution 24 | HideSolutionNode = FALSE 25 | EndGlobalSection 26 | EndGlobal 27 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matconvnet.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/addLayer.m: -------------------------------------------------------------------------------- 1 | function addLayer(obj, name, block, inputs, outputs, params, varargin) 2 | %ADDLAYER Adds a layer to a DagNN 3 | % ADDLAYER(NAME, LAYER, INPUTS, OUTPUTS, PARAMS) adds the 4 | % specified layer to the network. NAME is a string with the layer 5 | % name, used as a unique indentifier. BLOCK is the object 6 | % implementing the layer, which should be a subclass of the 7 | % Layer. INPUTS, OUTPUTS are cell arrays of variable names, and 8 | % PARAMS of parameter names. 9 | % 10 | % See Also REMOVELAYER(). 11 | opts.skipRebuild = false; 12 | opts = vl_argparse(opts, varargin); 13 | 14 | index = find(strcmp(name, {obj.layers.name})) ; 15 | if ~isempty(index), error('There is already a layer with name ''%s''.', name), end 16 | index = numel(obj.layers) + 1 ; 17 | 18 | if nargin < 6, params = {} ; end 19 | if ischar(inputs), inputs = {inputs} ; end 20 | if ischar(outputs), outputs = {outputs} ; end 21 | if ischar(params), params = {params} ; end 22 | 23 | obj.layers(index) = struct(... 24 | 'name', {name}, ... 25 | 'inputs', {inputs}, ... 26 | 'outputs', {outputs}, ... 27 | 'params', {params}, ... 28 | 'inputIndexes', {[]}, ... 29 | 'outputIndexes', {[]}, ... 30 | 'paramIndexes', {[]}, ... 31 | 'forwardTime', {[]}, ... 32 | 'backwardTime', {[]}, ... 33 | 'block', {block}) ; 34 | obj.layers(index).block.attach(obj, index) ; 35 | if ~opts.skipRebuild, obj.rebuild() ; end ; 36 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/getVarSizes.m: -------------------------------------------------------------------------------- 1 | function sizes = getVarSizes(obj, inputSizes) 2 | %GETVARSIZES Get the size of the variables 3 | % SIZES = GETVARSIZES(OBJ, INPUTSIZES) computes the SIZES of the 4 | % DagNN variables given the size of the inputs. `inputSizes` is 5 | % a cell array of the type `{'inputName', inputSize, ...}` 6 | % Returns a cell array with sizes of all network variables. 7 | % 8 | % Example, compute the storage needed for a batch size of 256 for an 9 | % imagenet-like network: 10 | % ``` 11 | % batch_size = 256; single_num_bytes = 4; 12 | % input_size = [net.meta.normalization.imageSize, batch_size]; 13 | % var_sizes = net.getVarSizes({'data', input_size}); 14 | % fprintf('Network activations will take %.2fMiB in single.\n', ... 15 | % sum(prod(cell2mat(var_sizes, 1))) * single_num_bytes ./ 1024^3); 16 | % ``` 17 | 18 | % Copyright (C) 2015 Andrea Vedaldi, Karel Lenc. 19 | % All rights reserved. 20 | % 21 | % This file is part of the VLFeat library and is made available under 22 | % the terms of the BSD license (see the COPYING file). 23 | 24 | nv = numel(obj.vars) ; 25 | sizes = num2cell(NaN(nv, 4),2)' ; 26 | 27 | for i = 1:2:numel(inputSizes) 28 | v = obj.getVarIndex(inputSizes{i}) ; 29 | if isnan(v) 30 | error('Variable `%s` not found in the network.', inputSizes{i}); 31 | end; 32 | if isempty(inputSizes{i+1}) 33 | sizes{v} = [0 0 0 0] ; 34 | else 35 | sizes{v} = [inputSizes{i+1}(:)' ones(1, 4 - numel(inputSizes{i+1}))] ; 36 | end 37 | end 38 | 39 | for layer = obj.layers(obj.executionOrder) 40 | in = layer.inputIndexes ; 41 | out = layer.outputIndexes ; 42 | sizes(out) = layer.block.getOutputSizes(sizes(in)) ; 43 | end 44 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/initParams.m: -------------------------------------------------------------------------------- 1 | function initParams(obj) 2 | % INITPARAM Initialize the paramers of the DagNN 3 | % OBJ.INITPARAM() uses the INIT() method of each layer to initialize 4 | % the corresponding parameters (usually randomly). 5 | 6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 7 | % All rights reserved. 8 | % 9 | % This file is part of the VLFeat library and is made available under 10 | % the terms of the BSD license (see the COPYING file). 11 | 12 | for l = 1:numel(obj.layers) 13 | p = obj.getParamIndex(obj.layers(l).params) ; 14 | params = obj.layers(l).block.initParams() ; 15 | switch obj.device 16 | case 'cpu' 17 | params = cellfun(@gather, params, 'UniformOutput', false) ; 18 | case 'gpu' 19 | params = cellfun(@gpuArray, params, 'UniformOutput', false) ; 20 | end 21 | [obj.params(p).value] = deal(params{:}) ; 22 | end 23 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/move.m: -------------------------------------------------------------------------------- 1 | function move(obj, device) 2 | %MOVE Move the DagNN to either CPU or GPU 3 | % MOVE(obj, 'cpu') moves the DagNN obj to the CPU. 4 | % 5 | % MOVE(obj, 'gpu') moves the DagNN obj to the GPU. 6 | 7 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 8 | % All rights reserved. 9 | % 10 | % This file is part of the VLFeat library and is made available under 11 | % the terms of the BSD license (see the COPYING file). 12 | 13 | obj.reset() ; 14 | obj.device = device ; 15 | switch device 16 | case 'gpu' 17 | for i=1:numel(obj.params) 18 | obj.params(i).value = gpuArray(obj.params(i).value) ; 19 | end 20 | case 'cpu' 21 | for i=1:numel(obj.params) 22 | obj.params(i).value = gather(obj.params(i).value) ; 23 | end 24 | otherwise 25 | error('DEVICE must be either ''cpu'' or ''gpu''.') ; 26 | end 27 | for l = 1:numel(obj.layers) 28 | obj.layers(l).block.move(device) ; 29 | end 30 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/removeLayer.m: -------------------------------------------------------------------------------- 1 | function removeLayer(obj, layerName) 2 | %REMOVELAYER Remove a layer from the network 3 | % REMOVELAYER(OBJ, NAME) removes the layer NAME from the DagNN object 4 | % OBJ. NAME can be a string or a cell array of strings. 5 | 6 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 7 | % All rights reserved. 8 | % 9 | % This file is part of the VLFeat library and is made available under 10 | % the terms of the BSD license (see the COPYING file). 11 | 12 | if ischar(layerName), layerName = {layerName}; end; 13 | idxs = obj.getLayerIndex(layerName); 14 | if any(isnan(idxs)) 15 | error('Invalid layer name `%s`', ... 16 | strjoin(layerName(isnan(idxs)), ', ')); 17 | end 18 | obj.layers(idxs) = [] ; 19 | obj.rebuild() ; 20 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/renameLayer.m: -------------------------------------------------------------------------------- 1 | function renameLayer(obj, oldName, newName, varargin) 2 | %RENAMELAYER Rename a layer 3 | % RENAMELAYER(OLDNAME, NEWNAME) changes the name of the layer 4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an 5 | % existing layer. 6 | 7 | opts.quiet = false ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | % Find the layer to rename 11 | v = obj.getLayerIndex(oldName) ; 12 | if isnan(v) 13 | % There is no such layer, nothing to do 14 | if ~opts.quiet 15 | warning('There is no layer ''%s''.', oldName) ; 16 | end 17 | return ; 18 | end 19 | 20 | % Check if newName is an existing layer 21 | newNameExists = any(strcmp(newName, {obj.layers.name})) ; 22 | if newNameExists 23 | error('There is already a layer ''%s''.', newName) ; 24 | end 25 | 26 | % Replace oldName with newName in all the layers 27 | obj.layers(v).name = newName ; 28 | obj.rebuild() ; 29 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/renameVar.m: -------------------------------------------------------------------------------- 1 | function renameVar(obj, oldName, newName, varargin) 2 | %RENAMEVAR Rename a variable 3 | % RENAMEVAR(OLDNAME, NEWNAME) changes the name of the variable 4 | % OLDNAME into NEWNAME. NEWNAME should not be the name of an 5 | % existing variable. 6 | 7 | opts.quiet = false ; 8 | opts = vl_argparse(opts, varargin) ; 9 | 10 | % Find the variable to rename 11 | v = obj.getVarIndex(oldName) ; 12 | if isnan(v) 13 | % There is no such a variable, nothing to do 14 | if ~opts.quiet 15 | warning('There is no variable ''%s''.', oldName) ; 16 | end 17 | return ; 18 | end 19 | 20 | % Check if newName is an existing variable 21 | newNameExists = any(strcmp(newName, {obj.vars.name})) ; 22 | 23 | % Replace oldName with newName in all the layers 24 | for l = 1:numel(obj.layers) 25 | for f = {'inputs', 'outputs'} 26 | f = char(f) ; 27 | sel = find(strcmp(oldName, obj.layers(l).(f))) ; 28 | [obj.layers(l).(f){sel}] = deal(newName) ; 29 | end 30 | end 31 | 32 | % If newVariable is a variable in the graph, then there is not 33 | % anything else to do. obj.rebuild() will remove the slot 34 | % in obj.vars() for oldName as that variable becomes unused. 35 | % 36 | % If, however, newVariable is not in the graph already, then 37 | % the slot in obj.vars() is preserved and only the variable name 38 | % is changed. 39 | 40 | if ~newNameExists 41 | obj.vars(v).name = newName ; 42 | % update variable name hash otherwise rebuild() won't find this var 43 | % corectly 44 | obj.varNames.(newName) = v ; 45 | end 46 | 47 | obj.rebuild() ; 48 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/reset.m: -------------------------------------------------------------------------------- 1 | function reset(obj) 2 | %RESET Reset the DagNN 3 | % RESET(obj) resets the DagNN obj. The function clears any intermediate value stored in the DagNN 4 | % object, including parameter gradients. It also calls the reset 5 | % function of every layer. 6 | 7 | obj.clearParameterServer() ; 8 | [obj.vars.value] = deal([]) ; 9 | [obj.vars.der] = deal([]) ; 10 | [obj.params.der] = deal([]) ; 11 | for l = 1:numel(obj.layers) 12 | obj.layers(l).block.reset() ; 13 | end 14 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/saveobj.m: -------------------------------------------------------------------------------- 1 | function s = saveobj(obj) 2 | %SAVEOBJ Save a DagNN to a vanilla MATLAB structure 3 | % S = OBJ.SAVEOBJ() saves the DagNN OBJ to a vanilla MATLAB 4 | % structure S. This is particularly convenient to preserve future 5 | % compatibility and to ship networks that are pure structures, 6 | % instead of embedding dependencies to code. 7 | % 8 | % The object can be reconstructe by `obj = DagNN.loadobj(s)`. 9 | % 10 | % As a side-effect the network is being reset (all variables are cleared) 11 | % and is transfered to CPU. 12 | % 13 | % See Also: dagnn.DagNN.loadobj, dagnn.DagNN.reset 14 | 15 | % Copyright (C) 2015-2016 Karel Lenc and Andrea Vedaldi. 16 | % All rights reserved. 17 | % 18 | % This file is part of the VLFeat library and is made available under 19 | % the terms of the BSD license (see the COPYING file). 20 | 21 | device = obj.device ; 22 | obj.move('cpu') ; 23 | s.vars = struct(... 24 | 'name', {obj.vars.name}, ... 25 | 'precious', {obj.vars.precious}) ; 26 | s.params = struct(... 27 | 'name', {obj.params.name}, ... 28 | 'value', {obj.params.value}, ... 29 | 'learningRate', {obj.params.learningRate}, ... 30 | 'weightDecay', {obj.params.weightDecay}) ; 31 | s.layers = struct(... 32 | 'name', {obj.layers.name}, ... 33 | 'type', {[]}, ... 34 | 'inputs', {obj.layers.inputs}, ... 35 | 'outputs', {obj.layers.outputs}, ... 36 | 'params', {obj.layers.params}, ... 37 | 'block', {[]}) ; 38 | s.meta = obj.meta ; 39 | 40 | for l = 1:numel(obj.layers) 41 | block = obj.layers(l).block ; 42 | slayer = block.save() ; 43 | s.layers(l).type = class(block) ; 44 | s.layers(l).block = slayer ; 45 | end 46 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/setLayerInputs.m: -------------------------------------------------------------------------------- 1 | function v = setLayerInputs(obj, layer, inputs) 2 | %SETLAYERINPUTS Set or change the inputs to a layer 3 | % Example: NET.SETLAYERINPUTS('layerName', {'input1', 'input2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for input = inputs 8 | v(end+1) = obj.addVar(char(input)) ; 9 | end 10 | obj.layers(l).inputs = inputs ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/setLayerOutputs.m: -------------------------------------------------------------------------------- 1 | function v = setLayerOutputs(obj, layer, outputs) 2 | %SETLAYEROUTPUTS Set or change the outputs of a layer 3 | % Example: NET.SETLAYEROUTPUTS('layerName', {'output1', 'output2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for output = outputs 8 | v(end+1) = obj.addVar(char(output)) ; 9 | end 10 | obj.layers(l).outputs = outputs ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/@DagNN/setLayerParams.m: -------------------------------------------------------------------------------- 1 | function v = setLayerParams(obj, layer, params) 2 | %SETLAYEPARAMS Set or change the parameters of a layer 3 | % Example: NET.SETLAYERPARAMS('layerName', {'param1', 'param2', ...}) 4 | 5 | v = [] ; 6 | l = obj.getLayerIndex(layer) ; 7 | for param = params 8 | v(end+1) = obj.addParam(char(param)) ; 9 | end 10 | obj.layers(l).params = params ; 11 | obj.rebuild() ; 12 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/BilinearSampler.m: -------------------------------------------------------------------------------- 1 | % Wrapper for BilinearSampler block: 2 | % (c) 2016 Ankush Gupta 3 | 4 | classdef BilinearSampler < dagnn.Layer 5 | methods 6 | function outputs = forward(obj, inputs, params) 7 | outputs = vl_nnbilinearsampler(inputs{1}, inputs{2}); 8 | outputs = {outputs}; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | [dX,dG] = vl_nnbilinearsampler(inputs{1}, inputs{2}, derOutputs{1}); 13 | derInputs = {dX,dG}; 14 | derParams = {}; 15 | end 16 | 17 | function outputSizes = getOutputSizes(obj, inputSizes) 18 | xSize = inputSizes{1}; 19 | gSize = inputSizes{2}; 20 | outputSizes = {[gSize(2), gSize(3), xSize(3), xSize(4)]}; 21 | end 22 | 23 | function obj = BilinearSampler(varargin) 24 | obj.load(varargin); 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/DropOut.m: -------------------------------------------------------------------------------- 1 | classdef DropOut < dagnn.ElementWise 2 | properties 3 | rate = 0.5 4 | frozen = false 5 | end 6 | 7 | properties (Transient) 8 | mask 9 | end 10 | 11 | methods 12 | function outputs = forward(obj, inputs, params) 13 | if strcmp(obj.net.mode, 'test') 14 | outputs = inputs ; 15 | return ; 16 | end 17 | if obj.frozen & ~isempty(obj.mask) 18 | outputs{1} = vl_nndropout(inputs{1}, 'mask', obj.mask) ; 19 | else 20 | [outputs{1}, obj.mask] = vl_nndropout(inputs{1}, 'rate', obj.rate) ; 21 | end 22 | end 23 | 24 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 25 | if strcmp(obj.net.mode, 'test') 26 | derInputs = derOutputs ; 27 | derParams = {} ; 28 | return ; 29 | end 30 | derInputs{1} = vl_nndropout(inputs{1}, derOutputs{1}, 'mask', obj.mask) ; 31 | derParams = {} ; 32 | end 33 | 34 | % --------------------------------------------------------------------- 35 | function obj = DropOut(varargin) 36 | obj.load(varargin{:}) ; 37 | end 38 | 39 | function obj = reset(obj) 40 | reset@dagnn.ElementWise(obj) ; 41 | obj.mask = [] ; 42 | obj.frozen = false ; 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/ElementWise.m: -------------------------------------------------------------------------------- 1 | classdef ElementWise < dagnn.Layer 2 | %ELEMENTWISE DagNN layers that operate at individual spatial locations 3 | methods 4 | function [outputSizes, transforms] = forwardGeometry(self, inputSizes, paramSizes) 5 | outputSizes = inputSizes ; 6 | transforms = {eye(6)} ; 7 | end 8 | 9 | function rfs = getReceptiveFields(obj) 10 | rfs.size = [1 1] ; 11 | rfs.stride = [1 1] ; 12 | rfs.offset = [1 1] ; 13 | end 14 | 15 | function outputSizes = getOutputSizes(obj, inputSizes) 16 | outputSizes = inputSizes ; 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/LRN.m: -------------------------------------------------------------------------------- 1 | classdef LRN < dagnn.ElementWise 2 | properties 3 | param = [5 1 0.0001/5 0.75] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnnormalize(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnnormalize(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = LRN(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/Loss.m: -------------------------------------------------------------------------------- 1 | classdef Loss < dagnn.ElementWise 2 | properties 3 | loss = 'softmaxlog' 4 | opts = {} 5 | end 6 | 7 | properties (Transient) 8 | average = 0 9 | numAveraged = 0 10 | end 11 | 12 | methods 13 | function outputs = forward(obj, inputs, params) 14 | outputs{1} = vl_nnloss(inputs{1}, inputs{2}, [], 'loss', obj.loss, obj.opts{:}) ; 15 | n = obj.numAveraged ; 16 | m = n + size(inputs{1},4) ; 17 | obj.average = (n * obj.average + gather(outputs{1})) / m ; 18 | obj.numAveraged = m ; 19 | end 20 | 21 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 22 | derInputs{1} = vl_nnloss(inputs{1}, inputs{2}, derOutputs{1}, 'loss', obj.loss, obj.opts{:}) ; 23 | derInputs{2} = [] ; 24 | derParams = {} ; 25 | end 26 | 27 | function reset(obj) 28 | obj.average = 0 ; 29 | obj.numAveraged = 0 ; 30 | end 31 | 32 | function outputSizes = getOutputSizes(obj, inputSizes, paramSizes) 33 | outputSizes{1} = [1 1 1 inputSizes{1}(4)] ; 34 | end 35 | 36 | function rfs = getReceptiveFields(obj) 37 | % the receptive field depends on the dimension of the variables 38 | % which is not known until the network is run 39 | rfs(1,1).size = [NaN NaN] ; 40 | rfs(1,1).stride = [NaN NaN] ; 41 | rfs(1,1).offset = [NaN NaN] ; 42 | rfs(2,1) = rfs(1,1) ; 43 | end 44 | 45 | function obj = Loss(varargin) 46 | obj.load(varargin) ; 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/MaskGating.m: -------------------------------------------------------------------------------- 1 | classdef MaskGating < dagnn.ElementWise 2 | properties 3 | size 4 | hasBias = false 5 | end 6 | properties (Transient) 7 | numInputs 8 | end 9 | 10 | methods 11 | 12 | function outputs = forward(obj, inputs, params) 13 | params = inputs{end}; 14 | inputs = inputs(1:end-1); 15 | obj.numInputs = numel(inputs) ; 16 | 17 | outputs{1} = 0; 18 | for k = 1:numel(inputs) 19 | outputs{1} = outputs{1} + bsxfun(@times, inputs{k}, params(:,:,k)) ; 20 | end 21 | end 22 | 23 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 24 | params = inputs{end}; 25 | inputs = inputs(1:end-1); 26 | 27 | for k = 1:obj.numInputs 28 | derInputs{k} = bsxfun(@times, derOutputs{1}, params(:,:,k) ); 29 | end 30 | derParams = {} ; 31 | derInputs{end+1} = []; 32 | end 33 | 34 | function obj = Scale(varargin) 35 | obj.load(varargin) ; 36 | end 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/NormOffset.m: -------------------------------------------------------------------------------- 1 | classdef NormOffset < dagnn.ElementWise 2 | properties 3 | param = [1 0.5] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnnoffset(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnnoffset(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = NormOffset(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/Pooling.m: -------------------------------------------------------------------------------- 1 | classdef Pooling < dagnn.Filter 2 | properties 3 | method = 'max' 4 | poolSize = [1 1] 5 | opts = {'cuDNN'} 6 | end 7 | 8 | methods 9 | function outputs = forward(self, inputs, params) 10 | outputs{1} = vl_nnpool(inputs{1}, self.poolSize, ... 11 | 'pad', self.pad, ... 12 | 'stride', self.stride, ... 13 | 'method', self.method, ... 14 | self.opts{:}) ; 15 | end 16 | 17 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs) 18 | derInputs{1} = vl_nnpool(inputs{1}, self.poolSize, derOutputs{1}, ... 19 | 'pad', self.pad, ... 20 | 'stride', self.stride, ... 21 | 'method', self.method, ... 22 | self.opts{:}) ; 23 | derParams = {} ; 24 | end 25 | 26 | function kernelSize = getKernelSize(obj) 27 | kernelSize = obj.poolSize ; 28 | end 29 | 30 | function outputSizes = getOutputSizes(obj, inputSizes) 31 | outputSizes = getOutputSizes@dagnn.Filter(obj, inputSizes) ; 32 | outputSizes{1}(3) = inputSizes{1}(3) ; 33 | end 34 | 35 | function obj = Pooling(varargin) 36 | obj.load(varargin) ; 37 | end 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/Scale.m: -------------------------------------------------------------------------------- 1 | classdef Scale < dagnn.ElementWise 2 | properties 3 | size 4 | hasBias = true 5 | end 6 | 7 | methods 8 | 9 | function outputs = forward(obj, inputs, params) 10 | args = horzcat(inputs, params) ; 11 | outputs{1} = bsxfun(@times, args{1}, args{2}) ; 12 | if obj.hasBias 13 | outputs{1} = bsxfun(@plus, outputs{1}, args{3}) ; 14 | end 15 | end 16 | 17 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 18 | args = horzcat(inputs, params) ; 19 | sz = [size(args{2}) 1 1 1 1] ; 20 | sz = sz(1:4) ; 21 | dargs{1} = bsxfun(@times, derOutputs{1}, args{2}) ; 22 | dargs{2} = derOutputs{1} .* args{1} ; 23 | for k = find(sz == 1) 24 | dargs{2} = sum(dargs{2}, k) ; 25 | end 26 | if obj.hasBias 27 | dargs{3} = derOutputs{1} ; 28 | for k = find(sz == 1) 29 | dargs{3} = sum(dargs{3}, k) ; 30 | end 31 | end 32 | derInputs = dargs(1:numel(inputs)) ; 33 | derParams = dargs(numel(inputs)+(1:numel(params))) ; 34 | end 35 | 36 | function obj = Scale(varargin) 37 | obj.load(varargin) ; 38 | end 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/Sigmoid.m: -------------------------------------------------------------------------------- 1 | classdef Sigmoid < dagnn.ElementWise 2 | methods 3 | function outputs = forward(obj, inputs, params) 4 | outputs{1} = vl_nnsigmoid(inputs{1}) ; 5 | end 6 | 7 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 8 | derInputs{1} = vl_nnsigmoid(inputs{1}, derOutputs{1}) ; 9 | derParams = {} ; 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/SoftMax.m: -------------------------------------------------------------------------------- 1 | classdef SoftMax < dagnn.ElementWise 2 | methods 3 | function outputs = forward(self, inputs, params) 4 | outputs{1} = vl_nnsoftmax(inputs{1}) ; 5 | end 6 | 7 | function [derInputs, derParams] = backward(self, inputs, params, derOutputs) 8 | derInputs{1} = vl_nnsoftmax(inputs{1}, derOutputs{1}) ; 9 | derParams = {} ; 10 | end 11 | 12 | function obj = SoftMax(varargin) 13 | obj.load(varargin) ; 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/SpatialNorm.m: -------------------------------------------------------------------------------- 1 | classdef SpatialNorm < dagnn.ElementWise 2 | properties 3 | param = [2 2 10 2] 4 | end 5 | 6 | methods 7 | function outputs = forward(obj, inputs, params) 8 | outputs{1} = vl_nnspnorm(inputs{1}, obj.param) ; 9 | end 10 | 11 | function [derInputs, derParams] = backward(obj, inputs, param, derOutputs) 12 | derInputs{1} = vl_nnspnorm(inputs{1}, obj.param, derOutputs{1}) ; 13 | derParams = {} ; 14 | end 15 | 16 | function obj = SpatialNorm(varargin) 17 | obj.load(varargin) ; 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/+dagnn/Sum.m: -------------------------------------------------------------------------------- 1 | classdef Sum < dagnn.ElementWise 2 | %SUM DagNN sum layer 3 | % The SUM layer takes the sum of all its inputs and store the result 4 | % as its only output. 5 | 6 | properties (Transient) 7 | numInputs 8 | end 9 | 10 | methods 11 | function outputs = forward(obj, inputs, params) 12 | obj.numInputs = numel(inputs) ; 13 | outputs{1} = inputs{1} ; 14 | for k = 2:obj.numInputs 15 | outputs{1} = outputs{1} + inputs{k} ; 16 | end 17 | end 18 | 19 | function [derInputs, derParams] = backward(obj, inputs, params, derOutputs) 20 | for k = 1:obj.numInputs 21 | derInputs{k} = derOutputs{1} ; 22 | end 23 | derParams = {} ; 24 | end 25 | 26 | function outputSizes = getOutputSizes(obj, inputSizes) 27 | outputSizes{1} = inputSizes{1} ; 28 | for k = 2:numel(inputSizes) 29 | if all(~isnan(inputSizes{k})) && all(~isnan(outputSizes{1})) 30 | if ~isequal(inputSizes{k}, outputSizes{1}) 31 | warning('Sum layer: the dimensions of the input variables is not the same.') ; 32 | end 33 | end 34 | end 35 | end 36 | 37 | function rfs = getReceptiveFields(obj) 38 | numInputs = numel(obj.net.layers(obj.layerIndex).inputs) ; 39 | rfs.size = [1 1] ; 40 | rfs.stride = [1 1] ; 41 | rfs.offset = [1 1] ; 42 | rfs = repmat(rfs, numInputs, 1) ; 43 | end 44 | 45 | function obj = Sum(varargin) 46 | obj.load(varargin) ; 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/compatibility/parallel/gather.m: -------------------------------------------------------------------------------- 1 | function x=gather(x) 2 | % GATHER Compatibility stub for the GATHER() function 3 | % GATHER() is a function in the Parallel MATLAB toolbox. MATCONVNET 4 | % can work without it. 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/compatibility/parallel/labindex.m: -------------------------------------------------------------------------------- 1 | function i = labindex() 2 | i = 1 ; 3 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/compatibility/parallel/numlabs.m: -------------------------------------------------------------------------------- 1 | function n = numlabs() 2 | n = 1 ; 3 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/simplenn/vl_simplenn_move.m: -------------------------------------------------------------------------------- 1 | function net = vl_simplenn_move(net, destination) 2 | %VL_SIMPLENN_MOVE Move a SimpleNN network between CPU and GPU. 3 | % NET = VL_SIMPLENN_MOVE(NET, 'gpu') moves the network to the 4 | % current GPU device. NET = VL_SIMPLENN_MOVE(NET, 'cpu') moves the 5 | % network to the CPU. 6 | % 7 | % See also: VL_SIMPLENN(). 8 | 9 | % Copyright (C) 2014-15 Andrea Vedaldi. 10 | % All rights reserved. 11 | % 12 | % This file is part of the VLFeat library and is made available under 13 | % the terms of the BSD license (see the COPYING file). 14 | 15 | switch destination 16 | case 'gpu', moveop = @(x) gpuArray(x) ; 17 | case 'cpu', moveop = @(x) gather(x) ; 18 | otherwise, error('Unknown destination ''%s''.', destination) ; 19 | end 20 | for l=1:numel(net.layers) 21 | switch net.layers{l}.type 22 | case {'conv', 'convt', 'bnorm'} 23 | for f = {'filters', 'biases', 'filtersMomentum', 'biasesMomentum'} 24 | f = char(f) ; 25 | if isfield(net.layers{l}, f) 26 | net.layers{l}.(f) = moveop(net.layers{l}.(f)) ; 27 | end 28 | end 29 | for f = {'weights', 'momentum'} 30 | f = char(f) ; 31 | if isfield(net.layers{l}, f) 32 | for j=1:numel(net.layers{l}.(f)) 33 | net.layers{l}.(f){j} = moveop(net.layers{l}.(f){j}) ; 34 | end 35 | end 36 | end 37 | otherwise 38 | % nothing to do ? 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/simplenn/vl_simplenn_start_parserv.m: -------------------------------------------------------------------------------- 1 | function vl_simplenn_start_parserv(net, ps) 2 | %VL_SIMPLENN_START_PARSERV Setup a parameter server for this network 3 | % VL_SIMPLENN_START_PARSERV(NET, PS) registers the network 4 | % parameter derivatives with the specified ParameterServer instance 5 | % PS and then starts the server. 6 | 7 | for i = 1:numel(net.layers) 8 | for j = 1:numel(net.layers{i}.weights) 9 | value = net.layers{i}.weights{j} ; 10 | name = sprintf('l%d_%d',i,j) ; 11 | if strcmp(class(value),'gpuArray') 12 | deviceType = 'gpu' ; 13 | dataType = classUnderlying(value) ; 14 | else 15 | deviceType = 'cpu' ; 16 | dataType = class(value) ; 17 | end 18 | ps.register(... 19 | name, ... 20 | size(value), ... 21 | dataType, ... 22 | deviceType) ; 23 | end 24 | end 25 | ps.start() ; 26 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/data.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "data.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/datamex.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "datamex.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/bilinearsampler.hpp: -------------------------------------------------------------------------------- 1 | // @file bilinearsampler.hpp 2 | // @brief Bilinear sampler implementation 3 | // @author Ankush Gupta 4 | // @author Andrea Vedaldi 5 | 6 | /* 7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef VL_BILINEARSAMPLER_H 15 | #define VL_BILINEARSAMPLER_H 16 | 17 | #include "../data.hpp" 18 | #include 19 | 20 | // defines the dispatcher for CUDA kernels: 21 | namespace vl { namespace impl { 22 | 23 | template 24 | struct bilinearsampler { 25 | 26 | static vl::ErrorCode 27 | forward(Context& context, 28 | type* output, 29 | type const* data, 30 | type const* grid, 31 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality, 32 | size_t inHeight, size_t inWidth, size_t inCardinality) ; 33 | 34 | 35 | static vl::ErrorCode 36 | backward(Context& context, 37 | type* derData, 38 | type* derGrid, 39 | type const* data, 40 | type const* grid, 41 | type const* derOutput, 42 | size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality, 43 | size_t inHeight, size_t inWidth, size_t inCardinality) ; 44 | } ; 45 | 46 | } } 47 | 48 | #endif /* defined(VL_BILINEARSAMPLER_H) */ 49 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/copy.hpp: -------------------------------------------------------------------------------- 1 | // @file copy.hpp 2 | // @brief Copy and other data operations 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__copy__ 14 | #define __vl__copy__ 15 | 16 | #include "../data.hpp" 17 | 18 | namespace vl { namespace impl { 19 | 20 | template 21 | struct operations 22 | { 23 | typedef type data_type ; 24 | static vl::ErrorCode copy(data_type * dest, data_type const * src, size_t numElements) ; 25 | static vl::ErrorCode fill(data_type * dest, size_t numElements, data_type value) ; 26 | } ; 27 | } } 28 | 29 | #endif /* defined(__vl__copy__) */ 30 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/copy_cpu.cpp: -------------------------------------------------------------------------------- 1 | // @file copy_cpu.cpp 2 | // @brief Copy and other data operations (CPU) 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #include "copy.hpp" 14 | #include 15 | 16 | namespace vl { namespace impl { 17 | 18 | template 19 | struct operations 20 | { 21 | typedef type data_type ; 22 | 23 | static vl::ErrorCode 24 | copy(data_type * dest, 25 | data_type const * src, 26 | size_t numElements) 27 | { 28 | memcpy(dest, src, numElements * sizeof(data_type)) ; 29 | return VLE_Success ; 30 | } 31 | 32 | static vl::ErrorCode 33 | fill(data_type * dest, 34 | size_t numElements, 35 | data_type value) 36 | { 37 | for (size_t k = 0 ; k < numElements ; ++k) { 38 | dest[k] = value ; 39 | } 40 | return VLE_Success ; 41 | } 42 | } ; 43 | 44 | } } 45 | 46 | template struct vl::impl::operations ; 47 | 48 | #ifdef ENABLE_DOUBLE 49 | template struct vl::impl::operations ; 50 | #endif 51 | 52 | 53 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/copy_gpu.cu: -------------------------------------------------------------------------------- 1 | // @file copy_gpu.cu 2 | // @brief Copy and other data operations (GPU) 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #include "copy.hpp" 14 | #include "../datacu.hpp" 15 | #include 16 | 17 | template __global__ void 18 | fill_kernel (type * data, type value, size_t size) 19 | { 20 | int index = threadIdx.x + blockIdx.x * blockDim.x ; 21 | if (index < size) data[index] = value ; 22 | } 23 | 24 | namespace vl { namespace impl { 25 | 26 | template 27 | struct operations 28 | { 29 | typedef type data_type ; 30 | 31 | static vl::ErrorCode 32 | copy(data_type * dest, 33 | data_type const * src, 34 | size_t numElements) 35 | { 36 | cudaMemcpy(dest, src, numElements * sizeof(data_type), cudaMemcpyDeviceToDevice) ; 37 | return VLE_Success ; 38 | } 39 | 40 | static vl::ErrorCode 41 | fill(data_type * dest, 42 | size_t numElements, 43 | data_type value) 44 | { 45 | fill_kernel 46 | <<>> 47 | (dest, numElements, value) ; 48 | 49 | cudaError_t error = cudaGetLastError() ; 50 | if (error != cudaSuccess) { 51 | return VLE_Cuda ; 52 | } 53 | return VLE_Success ; 54 | } 55 | } ; 56 | 57 | } } 58 | 59 | template struct vl::impl::operations ; 60 | 61 | #ifdef ENABLE_DOUBLE 62 | template struct vl::impl::operations ; 63 | #endif -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/im2row.hpp: -------------------------------------------------------------------------------- 1 | // @file im2row.hpp 2 | // @brief Stack image patches as matrix rows 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__im2row__ 14 | #define __vl__im2row__ 15 | 16 | #include "../data.hpp" 17 | #include 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct im2row { 23 | 24 | static vl::ErrorCode 25 | forward(vl::Context& context, 26 | type* stacked, 27 | type const* data, 28 | size_t height, size_t width, size_t depth, 29 | size_t windowHeight, size_t windowWidth, 30 | size_t strideY, size_t strideX, 31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight, 32 | int dilateY, int dialteX) ; 33 | 34 | static vl::ErrorCode 35 | backward(vl::Context& context, 36 | type* data, 37 | type const* stacked, 38 | size_t height, size_t width, size_t depth, 39 | size_t windowHeight, size_t windowWidth, 40 | size_t strideY, size_t strideX, 41 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight, 42 | int dilateY, int dilateX) ; 43 | } ; 44 | 45 | } } 46 | 47 | #endif /* defined(__vl__im2row__) */ 48 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/nnbias_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbias_blas.hpp 2 | // @brief biasolution block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbias_cudnn__ 14 | #define __vl__nnbias_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | // todo: data type should be handled internally? 22 | 23 | template 24 | struct nnbias_cudnn 25 | { 26 | static vl::ErrorCode 27 | forward(vl::Context& context, 28 | vl::Tensor output, double outputMult, 29 | vl::Tensor data, double dataMult, 30 | vl::Tensor biases, double biasesMult) ; 31 | 32 | static vl::ErrorCode 33 | backward(vl::Context& context, 34 | vl::Tensor derData, double derDataMult, 35 | vl::Tensor derBiases, double derBiasesMult, 36 | vl::Tensor derOutput, double derOutputMult) ; 37 | } ; 38 | 39 | } } 40 | 41 | #endif /* defined(__vl__nnbias_cudnn__) */ 42 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/nnbilinearsampler_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbilinearsampler_cudnn.hpp 2 | // @brief BilinearSampler CuDNN-based implementation. 3 | // @author Ankush Gupta, Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2016 Ankush Gupta and Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__bilinearsampler_cudnn__ 14 | #define __vl__bilinearsampler_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct nnbilinearsampler_cudnn 23 | { 24 | static vl::ErrorCode 25 | forward(Context& context, 26 | Tensor output, 27 | Tensor data, 28 | Tensor grid) ; 29 | 30 | static vl::ErrorCode 31 | backward(Context& context, 32 | Tensor derData, 33 | Tensor derGrid, 34 | Tensor data, 35 | Tensor grid, 36 | Tensor derOutput) ; 37 | } ; 38 | 39 | } } 40 | 41 | #endif /* defined(__vl__nnbilinearsampler_cudnn__) */ 42 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/nnconv_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnconv_blas.hpp 2 | // @brief Convolution block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnconv_cudnn__ 14 | #define __vl__nnconv_cudnn__ 15 | 16 | #include "../data.hpp" 17 | #include "cudnn.h" 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct nnconv_cudnn 23 | { 24 | static vl::ErrorCode 25 | forward(Context& context, 26 | Tensor output, double outputMult, 27 | Tensor data, double dataMult, 28 | Tensor filters, 29 | Tensor biases, 30 | int strideX, int strideY, 31 | int padLeft, int padRight, 32 | int padTop, int padBottom, 33 | int dilateX, int dilateY) ; 34 | 35 | static vl::ErrorCode 36 | backward(Context& context, 37 | Tensor derData, 38 | Tensor derFilters, 39 | Tensor derBiases, 40 | Tensor data, 41 | Tensor filters, 42 | Tensor derOutput, 43 | int strideX, int strideY, 44 | int padLeft, int padRight, 45 | int padTop, int padBottom, 46 | int dilateX, int dilateY) ; 47 | } ; 48 | 49 | } } 50 | #endif /* defined(__vl__nnconv_cudnn__) */ 51 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/nnpooling_cudnn.hpp: -------------------------------------------------------------------------------- 1 | // @file nnpooling_blas.hpp 2 | // @brief Pooling block CuDNN-based implementation. 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnpooling_cudnn__ 14 | #define __vl__nnpooling_cudnn__ 15 | 16 | #include "../nnpooling.hpp" 17 | #include "../data.hpp" 18 | #include "cudnn.h" 19 | 20 | 21 | namespace vl { namespace impl { 22 | 23 | // todo: data type should be handled internally? 24 | 25 | template 26 | struct nnpooling_cudnn 27 | { 28 | static vl::ErrorCode 29 | forward(Context& context, 30 | Tensor output, 31 | Tensor data, 32 | vl::PoolingMethod method, 33 | int poolHeight, int poolWidth, 34 | int strideY, int strideX, 35 | int padTop, int padBottom, 36 | int padLeft, int padRight) ; 37 | 38 | static vl::ErrorCode 39 | backward(Context& context, 40 | Tensor derData, 41 | Tensor data, 42 | Tensor output, 43 | Tensor derOutput, 44 | vl::PoolingMethod method, 45 | int poolHeight, int poolWidth, 46 | int strideY, int strideX, 47 | int padTop, int padBottom, 48 | int padLeft, int padRight) ; 49 | }; 50 | 51 | } } 52 | 53 | #endif /* defined(__vl__nnpooling_cudnn__) */ 54 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/normalize.hpp: -------------------------------------------------------------------------------- 1 | // @file normalize.hpp 2 | // @brief Normalize block implementation 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__normalize__ 14 | #define __vl__normalize__ 15 | 16 | #include "../data.hpp" 17 | #include 18 | 19 | namespace vl { namespace impl { 20 | 21 | template 22 | struct lrn 23 | { 24 | static vl::ErrorCode 25 | forward(type* output, 26 | type const* data, 27 | size_t height, size_t width, size_t depth, size_t size, 28 | size_t normDetph, 29 | type kappa, type alpha, type beta) ; 30 | 31 | static vl::ErrorCode 32 | backward(type* derData, 33 | type const* data, 34 | type const* derOutput, 35 | size_t height, size_t width, size_t depth, size_t size, 36 | size_t normDetph, 37 | type kappa, type alpha, type beta) ; 38 | } ; 39 | 40 | } } 41 | 42 | #endif /* __vl__normalize__ */ 43 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/impl/subsample.hpp: -------------------------------------------------------------------------------- 1 | // @file subsampling.hpp 2 | // @brief Subsampling block implementation 3 | // @author Andrea Vedaldi 4 | // @author Karel Lenc 5 | 6 | /* 7 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef VL_NNSUBSAMPLE_H 15 | #define VL_NNSUBSAMPLE_H 16 | 17 | #include "../data.hpp" 18 | #include 19 | 20 | namespace vl { namespace impl { 21 | 22 | template 23 | struct subsample { 24 | 25 | static vl::ErrorCode 26 | forward(vl::Context& context, 27 | type* output, 28 | type const* data, 29 | size_t height, size_t width, size_t depth, 30 | size_t strideY, size_t strideX, 31 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 32 | 33 | static vl::ErrorCode 34 | backward(vl::Context& context, 35 | type* derData, 36 | type const* derOutput, 37 | size_t height, size_t width, size_t depth, 38 | size_t strideY, size_t strideX, 39 | size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) ; 40 | } ; 41 | 42 | } } 43 | 44 | #endif /* defined(VL_NNSUBSAMPLE_H) */ 45 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/imread.hpp: -------------------------------------------------------------------------------- 1 | // @file imread.hpp 2 | // @brief Image reader 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__imread__ 14 | #define __vl__imread__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | #define VL_IMAGE_ERROR_MSG_MAX_LENGTH 256 21 | 22 | struct ImageShape 23 | { 24 | size_t height ; 25 | size_t width ; 26 | size_t depth ; 27 | 28 | ImageShape() ; 29 | ImageShape(size_t height, size_t width, size_t depth) ; 30 | ImageShape(ImageShape const & im) ; 31 | ImageShape & operator = (ImageShape const & im) ; 32 | bool operator == (ImageShape const & im) ; 33 | 34 | size_t getNumElements() const ; 35 | void clear() ; 36 | } ; 37 | 38 | class Image 39 | { 40 | public: 41 | Image() ; 42 | Image(Image const & im) ; 43 | Image(ImageShape const & shape, float * memory = NULL) ; 44 | ImageShape const & getShape() const ; 45 | float * getMemory() const ; 46 | void clear() ; 47 | 48 | protected: 49 | ImageShape shape ; 50 | float * memory ; 51 | } ; 52 | 53 | class ImageReader 54 | { 55 | public: 56 | ImageReader() ; 57 | ~ImageReader() ; 58 | vl::ErrorCode readShape(ImageShape & image, char const * fileName) ; 59 | vl::ErrorCode readPixels(float * memory, char const * fileName) ; 60 | char const * getLastErrorMessage() const ; 61 | 62 | private: 63 | class Impl ; 64 | Impl * impl ; 65 | } ; 66 | } 67 | 68 | #endif 69 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnbias.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnsubsample.cu should be compiled instead" 3 | #endif 4 | #include "nnbias.cu" 5 | 6 | /** 7 | @brief nnbias_forward 8 | @param context context. 9 | @param output output tensor $\by$ [output]. 10 | @param outputMult output tensor multiplier $\alpha$. 11 | @param data data tensor $\bx$. 12 | @param dataMult data tensor multiplier $\beta$. 13 | @param biases biases tensor $\bb$. 14 | @param biasesMult biases tensor multiplier $\gamma$. 15 | 16 | The function computes 17 | @f[ 18 | y_{ijkd} \leftarrow 19 | \alpha y_{ijkd} + 20 | \beta x_{ijkd} + 21 | \gamma b_k. 22 | @f] 23 | 24 | @a data can be the null tensor, in which case this tensor 25 | is dropped in the summation. 26 | */ 27 | 28 | /** 29 | @brief nnbias_backward 30 | @param context context. 31 | @param derData data derivative tensor $d\bx$ [output]. 32 | @param derDataMult data derivative tensor multiplier $\eta$. 33 | @param derBiases biases derivative tensor $d\bb$ [output]. 34 | @param derBiasesMult biased derivative tensor multiplier $\tau$. 35 | @param data data tensor $\bx$. 36 | @param dataMult data tensor multiplier $\beta$. 37 | @param biases biases tensor $\bb$. 38 | @param biasesMult biases tensor multiplier $\gamma$. 39 | 40 | If @a derData is the null tensor, this derivative is not comptued and 41 | @param biases can also be null. 42 | 43 | If @a derBiases is the null tensor, this derivative is not computed and 44 | @param data can also be null. 45 | */ 46 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnbias.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbias.hpp 2 | // @brief Bias block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2015 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbias__ 14 | #define __vl__nnbias__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | vl::ErrorCode 21 | nnbias_forward(vl::Context& context, 22 | vl::Tensor output, double outputMult, 23 | vl::Tensor data, double dataMult, 24 | vl::Tensor biases, double biasesMult) ; 25 | 26 | vl::ErrorCode 27 | nnbias_backward(vl::Context& context, 28 | vl::Tensor derData, double derDataMult, 29 | vl::Tensor derBiases, double derBiasesMult, 30 | vl::Tensor derOutput, double derOutputMult) ; 31 | } 32 | 33 | #endif /* defined(__vl__nnbias__) */ 34 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnbilinearsampler.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnbnorm.cu should be compiled instead" 3 | #endif 4 | #include "nnbilinearsampler.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnbilinearsampler.hpp: -------------------------------------------------------------------------------- 1 | // @file nnbilinearsampler.hpp 2 | // @brief Bilinear sampler block 3 | // @author Ankush Gupta 4 | // @author Andrea Vedaldi 5 | 6 | /* 7 | Copyright (C) 2016- Ankush Gupta and Andrea Vedaldi. 8 | All rights reserved. 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnbilinearsampler__ 14 | #define __vl__nnbilinearsampler__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | vl::ErrorCode 21 | nnbilinearsampler_forward(vl::Context& context, 22 | vl::Tensor output, 23 | vl::Tensor data, 24 | vl::Tensor grid) ; 25 | 26 | vl::ErrorCode 27 | nnbilinearsampler_backward(vl::Context& context, 28 | vl::Tensor derData, 29 | vl::Tensor derGrid, 30 | vl::Tensor data, 31 | vl::Tensor grid, 32 | vl::Tensor derOutput) ; 33 | } 34 | 35 | #endif /* defined(__vl__nnbilinearsampler__) */ 36 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnbnorm.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnbnorm.cu should be compiled instead" 3 | #endif 4 | #include "nnbnorm.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnconv.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnconv.cu should be compiled instead" 3 | #endif 4 | #include "nnconv.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnfullyconnected.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnfullyconnected.cu should be compiled instead" 3 | #endif 4 | #include "nnfullyconnected.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnfullyconnected.hpp: -------------------------------------------------------------------------------- 1 | // @file nnfullyconnected.hpp 2 | // @brief Fully-connected block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | 14 | #ifndef __vl__nnfullyconnected__ 15 | #define __vl__nnfullyconnected__ 16 | 17 | #include "data.hpp" 18 | 19 | namespace vl { 20 | 21 | vl::ErrorCode 22 | nnfullyconnected_forward(vl::Context& context, 23 | vl::Tensor output, 24 | vl::Tensor data, 25 | vl::Tensor filters, 26 | vl::Tensor biases) ; 27 | 28 | vl::ErrorCode 29 | nnfullyconnected_backward(vl::Context& context, 30 | vl::Tensor derData, 31 | vl::Tensor derFilters, 32 | vl::Tensor derBiases, 33 | vl::Tensor data, 34 | vl::Tensor filters, 35 | vl::Tensor derOutput) ; 36 | } 37 | 38 | 39 | #endif /* defined(__vl__nnfullyconnected__) */ 40 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnnormalize.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnnormalize.cu should be compiled instead" 3 | #endif 4 | #include "nnnormalize.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnnormalize.hpp: -------------------------------------------------------------------------------- 1 | // @file nnnormalize.hpp 2 | // @brief Normalization block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnnormalize__ 14 | #define __vl__nnnormalize__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | 21 | vl::ErrorCode 22 | nnlrn_forward(vl::Context& context, 23 | vl::Tensor output, 24 | vl::Tensor data, 25 | size_t normDetph, 26 | double kappa, double alpha, double beta) ; 27 | 28 | vl::ErrorCode 29 | nnlrn_backward(vl::Context& context, 30 | vl::Tensor derData, 31 | vl::Tensor data, 32 | vl::Tensor derOutput, 33 | size_t normDetph, 34 | double kappa, double alpha, double beta) ; 35 | } 36 | 37 | #endif /* defined(__vl__nnnormalize__) */ 38 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnpooling.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnpooling.cu should be compiled instead" 3 | #endif 4 | #include "nnpooling.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnpooling.hpp: -------------------------------------------------------------------------------- 1 | // @file nnpooling.hpp 2 | // @brief Pooling block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnpooling__ 14 | #define __vl__nnpooling__ 15 | 16 | #include "data.hpp" 17 | #include 18 | 19 | namespace vl { 20 | 21 | enum PoolingMethod { vlPoolingMax, vlPoolingAverage } ; 22 | 23 | vl::ErrorCode 24 | nnpooling_forward(vl::Context& context, 25 | vl::Tensor output, 26 | vl::Tensor data, 27 | PoolingMethod method, 28 | int poolHeight, int poolWidth, 29 | int strideY, int strideX, 30 | int padTop, int padBottom, 31 | int padLeft, int padRight) ; 32 | 33 | vl::ErrorCode 34 | nnpooling_backward(vl::Context& context, 35 | vl::Tensor derData, 36 | vl::Tensor data, 37 | vl::Tensor derOutput, 38 | PoolingMethod method, 39 | int poolHeight, int poolWidth, 40 | int strideY, int strideX, 41 | int padTop, int padBottom, 42 | int padLeft, int padRight) ; 43 | } 44 | 45 | #endif /* defined(__vl__nnpooling__) */ 46 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnroipooling.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnroipooling.cu should be compiled instead" 3 | #endif 4 | #include "nnroipooling.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnroipooling.hpp: -------------------------------------------------------------------------------- 1 | // @file nnroipooling.hpp 2 | // @brief Spatial Pyramid block 3 | // @author Hakan Bilen 4 | // @author Abishek Dutta 5 | // @author Andrea Vedaldi 6 | /* 7 | Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi. 8 | All rights reserved. 9 | 10 | This file is part of the VLFeat library and is made available under 11 | the terms of the BSD license (see the COPYING file). 12 | */ 13 | 14 | #ifndef __vl__nnroipooling__ 15 | #define __vl__nnroipooling__ 16 | 17 | #include "data.hpp" 18 | #include 19 | 20 | namespace vl { 21 | enum ROIPoolingMethod { vlROIPoolingMax, vlROIPoolingAverage } ; 22 | 23 | vl::ErrorCode 24 | nnroipooling_forward(vl::Context& context, 25 | vl::Tensor output, 26 | vl::Tensor data, 27 | vl::Tensor rois, 28 | ROIPoolingMethod method, 29 | int const subdivisions[2], 30 | double const transform[6]) ; 31 | 32 | vl::ErrorCode 33 | nnroipooling_backward(vl::Context& context, 34 | vl::Tensor derData, 35 | vl::Tensor data, 36 | vl::Tensor rois, 37 | vl::Tensor derOutput, 38 | ROIPoolingMethod method, 39 | int const subdivisions[2], 40 | double const transform[6]) ; 41 | } 42 | 43 | #endif /* defined(__vl__nnroipooling__) */ 44 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnsubsample.cpp: -------------------------------------------------------------------------------- 1 | #ifdef ENABLE_GPU 2 | #error "The file nnsubsample.cu should be compiled instead" 3 | #endif 4 | #include "nnsubsample.cu" 5 | 6 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/bits/nnsubsample.hpp: -------------------------------------------------------------------------------- 1 | // @file nnsubsample.hpp 2 | // @brief Subsamping block 3 | // @author Andrea Vedaldi 4 | 5 | /* 6 | Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. 7 | All rights reserved. 8 | 9 | This file is part of the VLFeat library and is made available under 10 | the terms of the BSD license (see the COPYING file). 11 | */ 12 | 13 | #ifndef __vl__nnsubsample__ 14 | #define __vl__nnsubsample__ 15 | 16 | #include "data.hpp" 17 | 18 | namespace vl { 19 | 20 | vl::ErrorCode 21 | nnsubsample_forward(vl::Context& context, 22 | vl::Tensor output, 23 | vl::Tensor data, 24 | vl::Tensor biases, 25 | int strideY, int strideX, 26 | int padTop, int padBottom, 27 | int padLeft, int padRight) ; 28 | 29 | vl::ErrorCode 30 | nnsubsample_backward(vl::Context& context, 31 | vl::Tensor derData, 32 | vl::Tensor derBiases, 33 | vl::Tensor derOutput, 34 | int strideY, int strideX, 35 | int padTop, int padBottom, 36 | int padLeft, int padRight) ; 37 | } 38 | 39 | #endif /* defined(__vl__nnsubsample__) */ 40 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_cudatool.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_cudatool.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_imreadjpeg.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_imreadjpeg.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_imreadjpeg_old.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_imreadjpeg.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnbilinearsampler.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnbilinearsampler.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnbnorm.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnbnorm.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnconv.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnconv.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnconvt.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnconvt.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnnormalize.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnnormalize.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnpool.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnpool.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_nnroipool.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_nnroipool.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_taccummex.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_taccummex.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/src/vl_tmove.cpp: -------------------------------------------------------------------------------- 1 | #if ENABLE_GPU 2 | #error This file should not be compiled with GPU support enabled 3 | #endif 4 | #include "vl_tmove.cu" 5 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnconcat.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnconcat(inputs, dim, dzdy, varargin) 2 | %VL_NNCONCAT CNN concatenate multiple inputs. 3 | % Y = VL_NNCONCAT(INPUTS, DIM) concatenates the inputs in the cell 4 | % array INPUTS along dimension DIM generating an output Y. 5 | % 6 | % DZDINPUTS = VL_NNCONCAT(INPUTS, DIM, DZDY) computes the derivatives 7 | % of the block projected onto DZDY. DZDINPUTS has one element for 8 | % each element of INPUTS, each of which is an array that has the same 9 | % dimensions of the corresponding array in INPUTS. 10 | 11 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 12 | % All rights reserved. 13 | % 14 | % This file is part of the VLFeat library and is made available under 15 | % the terms of the BSD license (see the COPYING file). 16 | 17 | opts.inputSizes = [] ; 18 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 19 | 20 | if nargin < 2, dim = 3; end; 21 | if nargin < 3, dzdy = []; end; 22 | 23 | if isempty(dzdy) 24 | y = cat(dim, inputs{:}); 25 | else 26 | if isempty(opts.inputSizes) 27 | opts.inputSizes = cellfun(@(inp) [size(inp,1),size(inp,2),size(inp,3),size(inp,4)], inputs, 'UniformOutput', false) ; 28 | end 29 | start = 1 ; 30 | y = cell(1, numel(opts.inputSizes)) ; 31 | s.type = '()' ; 32 | s.subs = {':', ':', ':', ':'} ; 33 | for i = 1:numel(opts.inputSizes) 34 | stop = start + opts.inputSizes{i}(dim) ; 35 | s.subs{dim} = start:stop-1 ; 36 | y{i} = subsref(dzdy,s) ; 37 | start = stop ; 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nncrop.m: -------------------------------------------------------------------------------- 1 | function y = vl_nncrop(x, crop, dzdy, inputSize) 2 | %VL_NNCROP CNN crop. 3 | % Y = VL_NNCROP(X, CROP) crops the input X spatially. CROP specifies the 4 | % amount of cropping as [TOP, BOTTOM, LEFT, RIGHT]. 5 | % 6 | % DZDX = VL_NNCROP(X, CROP, DZDY) computes the derivative DZDX of the 7 | % function projected on the output derivative DZDY. DZDX has the same 8 | % dimension as X and DZDY the same dimension as Y. 9 | % 10 | % DZDX = VL_NNCROP([], CROP, DZDY, INPUTSIZE) is an alternative to 11 | % the previous call in which X is omitted and its size is passed as 12 | % INPUTSIZE. 13 | 14 | % Copyright (C) 2015 Sebastien Ehrhardt and Andrea Vedaldi. 15 | % All rights reserved. 16 | % 17 | % This file is part of the VLFeat library and is made available under 18 | % the terms of the BSD license (see the COPYING file). 19 | 20 | if nargin < 4 21 | sz = [size(x,1) size(x,2) size(x,3) size(x,4)] ; 22 | else 23 | sz = inputSize ; 24 | end 25 | 26 | sv = 1 + crop(1) : sz(1) - crop(2) ; 27 | su = 1 + crop(3) : sz(2) - crop(4) ; 28 | 29 | if nargin <= 2 || isempty(dzdy) 30 | y = x(sv, su, :, :) ; 31 | else 32 | if isa(dzdy, 'gpuArray') 33 | y = gpuArray.zeros(sz, classUnderlying(dzdy)) ; 34 | else 35 | y = zeros(sz, class(dzdy)) ; 36 | end 37 | y(sv, su, :, :) = dzdy ; 38 | end 39 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnnoffset.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnnoffset(x, param, dzdy) 2 | %VL_NNNOFFSET CNN norm-dependent offset. 3 | % Y = VL_NNNOFFSET(X, PARAM) subtracts from each element of X the 4 | % weighted norm of the feature channels: 5 | % 6 | % X(i,j,k) = X(i,j,k) - PARAM(1) * L(i,j) ^ PARAM(2) 7 | % 8 | % where 9 | % 10 | % L(i,j) = sum_K X(i,j,k)^2 11 | % 12 | % DZDX = VL_NNNOFFSET(X, PARAM, DZDY) computes the derivative of the 13 | % block projected onto DZDY. DZDX and DZDY have the same dimensions 14 | % as X and Y respectively. 15 | 16 | % Copyright (C) 2014 Andrea Vedaldi. 17 | % All rights reserved. 18 | % 19 | % This file is part of the VLFeat library and is made available under 20 | % the terms of the BSD license (see the COPYING file). 21 | 22 | L = sum(x.^2,3) ; 23 | L = max(L, 1e-8) ; 24 | 25 | if nargin <= 2 26 | y = bsxfun(@minus, x, param(1)*L.^param(2)) ; 27 | else 28 | y = dzdy - bsxfun(@times, (2*param(1)*param(2))* x, sum(dzdy,3) .* (L.^(param(2)-1))) ; 29 | end 30 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnnormalize.m: -------------------------------------------------------------------------------- 1 | %VL_NNNORMALIZE CNN Local Response Normalization (LRN) 2 | % Y = VL_NNORMALIZE(X, PARAM) computes the so-called Local Response 3 | % Normalization (LRN) operator. This operator performs a 4 | % channel-wise sliding window normalization of each column of the 5 | % input array X. The normalized output is given by: 6 | % 7 | % Y(i,j,k) = X(i,j,k) / L(i,j,k)^BETA 8 | % 9 | % where the normalization factor is given by 10 | % 11 | % L(i,j,k) = KAPPA + ALPHA * (sum_{q in Q(k)} X(i,j,k)^2, 12 | % 13 | % PARAM = [N KAPPA ALPHA BETA], and N is the size of the window. The 14 | % window Q(k) is defined as: 15 | % 16 | % Q(k) = [max(1, k-FLOOR((N-1)/2)), min(D, k+CEIL((N-1)/2))]. 17 | % 18 | % where D is the number of feature channels in X. Note in particular 19 | % that, by setting N >= 2D, the function can be used to normalize 20 | % all the channels as a single group (useful to achieve L2 21 | % normalization). 22 | % 23 | % DZDX = VL_NNORMALIZE(X, PARAM, DZDY) computes the derivative of 24 | % the block projected onto DZDY. DZDX and DZDY have the same 25 | % dimensions as X and Y respectively. 26 | % 27 | % **Remark:** Some CNN libraries (e.g. Caffe) use a slightly 28 | % different convention for the parameters of the LRN. Caffe in 29 | % particular uses the convention: 30 | % 31 | % PARAM_CAFFE = [N KAPPA N*ALPHA BETA] 32 | % 33 | % i.e. the ALPHA paramter is multiplied by N. 34 | 35 | % Copyright (C) 2014 Andrea Vedaldi. 36 | % All rights reserved. 37 | % 38 | % This file is part of the VLFeat library and is made available under 39 | % the terms of the BSD license (see the COPYING file). 40 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnnormalizelp.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnnormalizelp(x,dzdy,varargin) 2 | %VL_NNNORMALIZELP CNN Lp normalization 3 | % Y = VL_NNNORMALIZELP(X) normalizes in Lp norm each spatial 4 | % location in the array X: 5 | % 6 | % Y(i,j,k) = X(i,j,k) / sum_q (X(i,j,q).^p + epsilon)^(1/p) 7 | % 8 | % DZDX = VL_NNNORMALIZELP(X, DZDY) computes the derivative of the 9 | % function with respect to X projected onto DZDY. 10 | % 11 | % VL_NNNORMALIZE(___, 'opts', val, ...) takes the following options: 12 | % 13 | % `p`:: 2 14 | % The exponent of the Lp norm. Warning: currently only even 15 | % exponents are supported. 16 | % 17 | % `epsilon`:: 0.01 18 | % The constant added to the sum of p-powers before taking the 19 | % 1/p square root (see the formula above). 20 | % 21 | % `spatial`:: `false` 22 | % If `true`, sum along the two spatial dimensions instead of 23 | % along the feature channels. 24 | % 25 | % See also: VL_NNNORMALIZE(). 26 | 27 | opts.epsilon = 1e-2 ; 28 | opts.p = 2 ; 29 | opts.spatial = false ; 30 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 31 | 32 | if ~opts.spatial 33 | massp = sum(x.^opts.p,3) + opts.epsilon ; 34 | else 35 | massp = sum(sum(x.^opts.p,1),2) + opts.epsilon ; 36 | end 37 | mass = massp.^(1/opts.p) ; 38 | y = bsxfun(@rdivide, x, mass) ; 39 | 40 | if nargin < 2 || isempty(dzdy) 41 | return ; 42 | else 43 | dzdy = bsxfun(@rdivide, dzdy, mass) ; 44 | if ~opts.spatial 45 | tmp = sum(dzdy .* x, 3) ; 46 | else 47 | tmp = sum(sum(dzdy .* x, 1),2); 48 | end 49 | y = dzdy - bsxfun(@times, tmp, bsxfun(@rdivide, x.^(opts.p-1), massp)) ; 50 | end 51 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnrelu.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnrelu(x,dzdy,varargin) 2 | %VL_NNRELU CNN rectified linear unit. 3 | % Y = VL_NNRELU(X) applies the rectified linear unit to the data 4 | % X. X can have arbitrary size. 5 | % 6 | % DZDX = VL_NNRELU(X, DZDY) computes the derivative of the block 7 | % projected onto DZDY. DZDX and DZDY have the same dimensions as 8 | % X and Y respectively. 9 | % 10 | % VL_NNRELU(...,'OPT',VALUE,...) takes the following options: 11 | % 12 | % `Leak`:: 0 13 | % Set the leak factor, a non-negative number. Y is equal to X if 14 | % X is not smaller than zero; otherwise, Y is equal to X 15 | % multipied by the leak factor. By default, the leak factor is 16 | % zero; for values greater than that one obtains the leaky ReLU 17 | % unit. 18 | % 19 | % ADVANCED USAGE 20 | % 21 | % As a further optimization, in the backward computation it is 22 | % possible to replace X with Y, namely, if Y = VL_NNRELU(X), then 23 | % VL_NNRELU(X,DZDY) gives the same result as VL_NNRELU(Y,DZDY). 24 | % This is useful because it means that the buffer X does not need to 25 | % be remembered in the backward pass. 26 | 27 | % Copyright (C) 2014-15 Andrea Vedaldi. 28 | % All rights reserved. 29 | % 30 | % This file is part of the VLFeat library and is made available under 31 | % the terms of the BSD license (see the COPYING file). 32 | 33 | opts.leak = 0 ; 34 | opts = vl_argparse(opts, varargin, 'nonrecursive') ; 35 | 36 | if opts.leak == 0 37 | if nargin <= 1 || isempty(dzdy) 38 | y = max(x, 0) ; 39 | else 40 | y = dzdy .* (x > 0) ; 41 | end 42 | else 43 | if nargin <= 1 || isempty(dzdy) 44 | y = x .* (opts.leak + (1 - opts.leak) * (x > 0)) ; 45 | else 46 | y = dzdy .* (opts.leak + (1 - opts.leak) * (x > 0)) ; 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnsigmoid.m: -------------------------------------------------------------------------------- 1 | function out = vl_nnsigmoid(x,dzdy) 2 | %VL_NNSIGMOID CNN sigmoid nonlinear unit. 3 | % Y = VL_NNSIGMOID(X) computes the sigmoid of the data X. X can 4 | % have an arbitrary size. The sigmoid is defined as follows: 5 | % 6 | % SIGMOID(X) = 1 / (1 + EXP(-X)). 7 | % 8 | % DZDX = VL_NNSIGMOID(X, DZDY) computes the derivative of the 9 | % block projected onto DZDY. DZDX and DZDY have the same 10 | % dimensions as X and Y respectively. 11 | 12 | % Copyright (C) 2015 Karel Lenc. 13 | % All rights reserved. 14 | % 15 | % This file is part of the VLFeat library and is made available under 16 | % the terms of the BSD license (see the COPYING file). 17 | 18 | y = 1 ./ (1 + exp(-x)); 19 | 20 | if nargin <= 1 || isempty(dzdy) 21 | out = y ; 22 | else 23 | out = dzdy .* (y .* (1 - y)) ; 24 | end 25 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnsoftmax.m: -------------------------------------------------------------------------------- 1 | function Y = vl_nnsoftmax(X,dzdY) 2 | %VL_NNSOFTMAX CNN softmax. 3 | % Y = VL_NNSOFTMAX(X) applies the softmax operator the data X. X 4 | % has dimension H x W x D x N, packing N arrays of W x H 5 | % D-dimensional vectors. 6 | % 7 | % D can be thought of as the number of possible classes and the 8 | % function computes the softmax along the D dimension. Often W=H=1, 9 | % but this is not a requirement, as the operator is applied 10 | % convolutionally at all spatial locations. 11 | % 12 | % DZDX = VL_NNSOFTMAX(X, DZDY) computes the derivative of the block 13 | % projected onto DZDY. DZDX and DZDY have the same dimensions as 14 | % X and Y respectively. 15 | 16 | % Copyright (C) 2014 Andrea Vedaldi. 17 | % All rights reserved. 18 | % 19 | % This file is part of the VLFeat library and is made available under 20 | % the terms of the BSD license (see the COPYING file). 21 | 22 | E = exp(bsxfun(@minus, X, max(X,[],3))) ; 23 | L = sum(E,3) ; 24 | Y = bsxfun(@rdivide, E, L) ; 25 | 26 | if nargin <= 1, return ; end 27 | 28 | % backward 29 | Y = Y .* bsxfun(@minus, dzdY, sum(dzdY .* Y, 3)) ; 30 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_nnspnorm.m: -------------------------------------------------------------------------------- 1 | function y = vl_nnspnorm(x, param, dzdy) 2 | %VL_NNSPNORM CNN spatial normalization. 3 | % Y = VL_NNSPNORM(X, PARAM) computes the spatial normalization of 4 | % the data X with parameters PARAM = [PH PW ALPHA BETA]. Here PH and 5 | % PW define the size of the spatial neighbourhood used for 6 | % nomalization. 7 | % 8 | % For each feature channel, the function computes the sum of squares 9 | % of X inside each rectangle, N2(i,j). It then divides each element 10 | % of X as follows: 11 | % 12 | % Y(i,j) = X(i,j) / (1 + ALPHA * N2(i,j))^BETA. 13 | % 14 | % DZDX = VL_NNSPNORM(X, PARAM, DZDY) computes the derivative of the 15 | % block projected onto DZDY. DZDX and DZDY have the same dimensions 16 | % as X and Y respectively. 17 | 18 | % Copyright (C) 2015 Karel Lenc and Andrea Vedaldi. 19 | % All rights reserved. 20 | % 21 | % This file is part of the VLFeat library and is made available under 22 | % the terms of the BSD license (see the COPYING file). 23 | 24 | pad = floor((param(1:2)-1)/2) ; 25 | pad = [pad ; param(1:2)-1-pad] ; 26 | 27 | n2 = vl_nnpool(x.*x, param(1:2), 'method', 'avg', 'pad', pad) ; 28 | f = 1 + param(3) * n2 ; 29 | 30 | if nargin <= 2 || isempty(dzdy) 31 | y = f.^(-param(4)) .* x ; 32 | else 33 | t = vl_nnpool(x.*x, param(1:2), f.^(-param(4)-1) .* dzdy .* x, 'method', 'avg', 'pad', pad) ; 34 | y = f.^(-param(4)) .* dzdy - 2 * param(3)*param(4) * x .* t ; 35 | end -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_rootnn.m: -------------------------------------------------------------------------------- 1 | function root = vl_rootnn() 2 | %VL_ROOTNN Get the root path of the MatConvNet toolbox. 3 | % VL_ROOTNN() returns the path to the MatConvNet toolbox. 4 | 5 | % Copyright (C) 2014 Andrea Vedaldi. 6 | % All rights reserved. 7 | % 8 | % This file is part of the VLFeat library and is made available under 9 | % the terms of the BSD license (see the COPYING file). 10 | 11 | root = fileparts(fileparts(mfilename('fullpath'))) ; 12 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_setupnn.m: -------------------------------------------------------------------------------- 1 | function vl_setupnn() 2 | %VL_SETUPNN Setup the MatConvNet toolbox. 3 | % VL_SETUPNN() function adds the MatConvNet toolbox to MATLAB path. 4 | 5 | % Copyright (C) 2014-15 Andrea Vedaldi. 6 | % All rights reserved. 7 | % 8 | % This file is part of the VLFeat library and is made available under 9 | % the terms of the BSD license (see the COPYING file). 10 | 11 | root = vl_rootnn() ; 12 | addpath(fullfile(root, 'matlab')) ; 13 | addpath(fullfile(root, 'matlab', 'mex')) ; 14 | addpath(fullfile(root, 'matlab', 'simplenn')) ; 15 | addpath(fullfile(root, 'matlab', 'xtest')) ; 16 | addpath(fullfile(root, 'examples')) ; 17 | 18 | if ~exist('gather') 19 | warning('The MATLAB Parallel Toolbox does not seem to be installed. Activating compatibility functions.') ; 20 | addpath(fullfile(root, 'matlab', 'compatibility', 'parallel')) ; 21 | end 22 | 23 | if numel(dir(fullfile(root, 'matlab', 'mex', 'vl_nnconv.mex*'))) == 0 24 | warning('MatConvNet is not compiled. Consider running `vl_compilenn`.'); 25 | end 26 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/vl_taccum.m: -------------------------------------------------------------------------------- 1 | function a = vl_taccum(alpha, a, beta, b) 2 | %VL_TACCUM Compute A = alpha A + beta B 3 | % A = VL_TACCUM(ALPHA, A, BETA, B) computes efficiently A = alpha A 4 | % + beta B. For GPU arrays, it performs its computation in place, by 5 | % modifiying A without creating an additional copy. 6 | 7 | % Copyright (C) 2016 Andrea Vedaldi. 8 | % All rights reserved. 9 | % 10 | % This file is part of the VLFeat library and is made available under 11 | % the terms of the BSD license (see the COPYING file). 12 | 13 | if isscalar(a) 14 | a = alpha * a + beta * b ; 15 | return ; 16 | elseif isa(a, 'gpuArray') 17 | vl_taccummex(alpha, a, beta, b, 'inplace') ; 18 | else 19 | a = vl_taccummex(alpha, a, beta, b) ; 20 | end 21 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnbnorm.m: -------------------------------------------------------------------------------- 1 | classdef nnbnorm < nntest 2 | properties (TestParameter) 3 | rows = {2 8 13} 4 | cols = {2 8 17} 5 | numDims = {1 3 4} 6 | batchSize = {2 7} 7 | end 8 | methods (Test) 9 | function basic(test, rows, cols, numDims, batchSize) 10 | r = rows ; 11 | c = cols ; 12 | nd = numDims ; 13 | bs = batchSize ; 14 | x = test.randn(r, c, nd, bs) ; 15 | %g = test.randn(1, 1, nd, 1) ; 16 | %b = test.randn(1, 1, nd, 1) ; 17 | g = test.randn(nd, 1) / test.range ; 18 | b = test.randn(nd, 1) / test.range ; 19 | 20 | y = vl_nnbnorm(x,g,b) ; 21 | dzdy = test.randn(size(y)) ; 22 | [dzdx,dzdg,dzdb] = vl_nnbnorm(x,g,b,dzdy) ; 23 | 24 | test.der(@(x) vl_nnbnorm(x,g,b), x, dzdy, dzdx, test.range * 1e-3) ; 25 | test.der(@(g) vl_nnbnorm(x,g,b), g, dzdy, dzdg, 1e-2) ; 26 | test.der(@(b) vl_nnbnorm(x,g,b), b, dzdy, dzdb, 1e-3) ; 27 | end 28 | end 29 | end -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nndropout.m: -------------------------------------------------------------------------------- 1 | classdef nndropout < nntest 2 | methods (Test) 3 | function basic(test) 4 | x = test.randn(4,5,10,3) ; 5 | [y,mask] = vl_nndropout(x) ; 6 | dzdy = test.randn(size(y)) ; 7 | dzdx = vl_nndropout(x,dzdy,'mask',mask) ; 8 | test.der(@(x) vl_nndropout(x,'mask',mask), x, dzdy, dzdx, 1e-3*test.range) ; 9 | end 10 | end 11 | end 12 | 13 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnmnist.m: -------------------------------------------------------------------------------- 1 | classdef nnmnist < nntest 2 | properties (TestParameter) 3 | networkType = {'dagnn', 'simplenn'} 4 | end 5 | 6 | methods (TestClassSetup) 7 | function init(test) 8 | addpath(fullfile(vl_rootnn, 'examples', 'mnist')); 9 | end 10 | end 11 | 12 | methods (Test) 13 | function valErrorRate(test, networkType) 14 | clear mex ; % will reset GPU, remove MCN to avoid crashing 15 | % MATLAB on exit (BLAS issues?) 16 | if strcmp(test.dataType, 'double'), return ; end 17 | switch test.currentDevice 18 | case 'cpu' 19 | gpus = []; 20 | case 'gpu' 21 | gpus = 1; 22 | end 23 | trainOpts = struct('numEpochs', 1, 'continue', false, 'gpus', gpus, ... 24 | 'plotStatistics', false); 25 | if strcmp(networkType, 'simplenn') 26 | trainOpts.errorLabels = {'error', 'top5err'} ; 27 | end 28 | [~, info] = cnn_mnist('train', trainOpts, 'networkType', networkType); 29 | test.verifyLessThan(info.train.error, 0.08); 30 | test.verifyLessThan(info.val.error, 0.025); 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnnormalize.m: -------------------------------------------------------------------------------- 1 | classdef nnnormalize < nntest 2 | properties (TestParameter) 3 | group = {2 3 4 5 6 8 9 10 11 12 13 14 15 16 17} 4 | sgroup = {2 3 4 5 6 7} 5 | end 6 | 7 | methods (Test) 8 | function basic(test, group) 9 | param = [group, .1, .5, .75] ; 10 | x = test.randn(3,2,10,4) ; 11 | y = vl_nnnormalize(x,param) ; 12 | dzdy = test.rand(size(y))-0.5 ; 13 | dzdx = vl_nnnormalize(x,param,dzdy) ; 14 | test.der(@(x) vl_nnnormalize(x,param), x, dzdy, dzdx, test.range * 1e-3, 0.3) ; 15 | end 16 | 17 | function compare_to_naive(test, sgroup) 18 | param = [sgroup, .1, .5, .75] ; 19 | x = test.randn(3,2,10,4) ; 20 | y = vl_nnnormalize(gather(x),param) ; 21 | y_ = test.zeros(size(y)) ; 22 | x_ = gather(x) ; 23 | for i=1:size(x,1) 24 | for j=1:size(x,2) 25 | for n=1:size(x,4) 26 | t = test.zeros(1,1,size(x,3),1) ; 27 | t(1,1,:,1) = (param(2) + param(3)*conv(squeeze(x_(i,j,:,n)).^2, ... 28 | ones(param(1),1), 'same')).^(-param(4)) ; 29 | y_(i,j,:,n) = x_(i,j,:,n) .* t ; 30 | end 31 | end 32 | end 33 | test.eq(y,y_) ; 34 | end 35 | 36 | function l2(test) 37 | x = test.randn(1,1,10,1) ; 38 | y = vl_nnnormalize(x, [20, 0, 1, .5]) ; 39 | test.eq(sum(y(:).^2), test.toDataType(1), 1e-2) ; 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnnormalizelp.m: -------------------------------------------------------------------------------- 1 | classdef nnnormalizelp < nntest 2 | properties (TestParameter) 3 | h = {1 2 3 4} 4 | w = {1 2 3 4} 5 | d = {2 3 4} 6 | p = {2 4} 7 | end 8 | 9 | methods (Test) 10 | function basicl2(test, h,w,d) 11 | x = test.randn(h,w,d,3) ; 12 | y = vl_nnnormalizelp(x) ; 13 | dzdy = test.rand(size(y))-0.5 ; 14 | dzdx = vl_nnnormalizelp(x,dzdy) ; 15 | test.der(@(x) vl_nnnormalizelp(x), x, dzdy, dzdx, 1e-4, 0.3) ; 16 | end 17 | 18 | function lp(test, p) 19 | x = test.randn(2,3,5,3) / test.range ; 20 | y = vl_nnnormalizelp(x, [], 'p', p) ; 21 | dzdy = test.rand(size(y))-0.5 ; 22 | dzdx = vl_nnnormalizelp(x,dzdy, 'p', p) ; 23 | test.der(@(x) vl_nnnormalizelp(x,[],'p',p), x, dzdy, dzdx, 1e-4, 0.3) ; 24 | end 25 | 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnoffset.m: -------------------------------------------------------------------------------- 1 | classdef nnoffset < nntest 2 | methods (Test) 3 | function basic(test) 4 | param = [.34, .5] ; 5 | x = test.randn(4,5,10,3) ; 6 | y = vl_nnnoffset(x,param) ; 7 | dzdy = test.randn(size(y)) ; 8 | dzdx = vl_nnnoffset(x,param,dzdy) ; 9 | test.der(@(x) vl_nnnoffset(x,param), x, dzdy, dzdx, 1e-3*test.range) ; 10 | end 11 | end 12 | end -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnpdist.m: -------------------------------------------------------------------------------- 1 | classdef nnpdist < nntest 2 | properties (TestParameter) 3 | oneToOne = {false, true} 4 | noRoot = {false, true} 5 | p = {.5 1 2 3} 6 | aggregate = {false, true} 7 | end 8 | methods (Test) 9 | function basic(test,oneToOne, noRoot, p, aggregate) 10 | if aggregate 11 | % make it smaller to avoid numerical derivative issues with 12 | % float 13 | h = 3 ; 14 | w = 2 ; 15 | else 16 | h = 13 ; 17 | w = 17 ; 18 | end 19 | d = 4 ; 20 | n = 5 ; 21 | x = test.randn(h,w,d,n) ; 22 | if oneToOne 23 | x0 = test.randn(h,w,d,n) ; 24 | else 25 | x0 = test.randn(1,1,d,n) ; 26 | end 27 | opts = {'noRoot', noRoot, 'aggregate', aggregate} ; 28 | 29 | y = vl_nnpdist(x, x0, p, opts{:}) ; 30 | 31 | % make sure they are not too close in any dimension as this may be a 32 | % problem for the finite difference dereivatives as one could 33 | % approach 0 which is not differentiable for some p-norms 34 | 35 | s = abs(bsxfun(@minus, x, x0)) < test.range*1e-1 ; 36 | x(s) = x(s) + 5*test.range ; 37 | 38 | dzdy = test.rand(size(y)) ; 39 | [dzdx, dzdx0] = vl_nnpdist(x,x0,p,dzdy,opts{:}) ; 40 | test.der(@(x) vl_nnpdist(x,x0,p,opts{:}), x, dzdy, dzdx, test.range * 1e-3) ; 41 | if oneToOne 42 | % Pdist does not implement backprop of the bsxfun 43 | test.der(@(x0) vl_nnpdist(x,x0,p,opts{:}), x0, dzdy, dzdx0, test.range * 1e-3) ; 44 | end 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnrelu.m: -------------------------------------------------------------------------------- 1 | classdef nnrelu < nntest 2 | properties 3 | x 4 | end 5 | 6 | methods (TestClassSetup) 7 | function data(test,device) 8 | % make sure that all elements in x are different. in this way, 9 | % we can compute numerical derivatives reliably by adding a delta < .5. 10 | x = test.randn(15,14,3,2) ; 11 | x(:) = randperm(numel(x))' ; 12 | % avoid non-diff value for test 13 | x(x==0)=1 ; 14 | test.x = x ; 15 | test.range = 10 ; 16 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end 17 | end 18 | end 19 | 20 | methods (Test) 21 | function basic(test) 22 | x = test.x ; 23 | y = vl_nnrelu(x) ; 24 | dzdy = test.randn(size(y)) ; 25 | dzdx = vl_nnrelu(x,dzdy) ; 26 | test.der(@(x) vl_nnrelu(x), x, dzdy, dzdx, 1e-2 * test.range) ; 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnroipool.m: -------------------------------------------------------------------------------- 1 | classdef nnroipool < nntest 2 | properties 3 | x 4 | end 5 | 6 | properties (TestParameter) 7 | method = {'avg', 'max'} 8 | subdivisions = {[1 1], [2 1], [1 2], [3 7], [16 16]} 9 | end 10 | 11 | methods (TestClassSetup) 12 | function data(test,device) 13 | % make sure that all elements in x are different. in this way, 14 | % we can compute numerical derivatives reliably by adding a delta < .5. 15 | x = test.randn(15,14,3,2) ; 16 | x(:) = randperm(numel(x))' ; 17 | test.x = x ; 18 | test.range = 10 ; 19 | if strcmp(device,'gpu'), test.x = gpuArray(test.x) ; end 20 | end 21 | end 22 | 23 | methods (Test) 24 | function basic(test,method,subdivisions) 25 | R = [1 1 1 2 2 2 1 1 ; 26 | 0 1 2 0 1 2 1 1 ; 27 | 0 4 3 0 1 2 1 1 ; 28 | 15 5 6 15 4 2 9 0 ; 29 | 14 7 9 14 4 8 1 0] ; 30 | R = test.toDevice(test.toDataType(R)) ; 31 | x = test.x ; 32 | args = {'method', method, 'subdivisions', subdivisions} ; 33 | y = vl_nnroipool(x,R,args{:}) ; 34 | dzdy = test.randn(size(y)) ; 35 | dzdx = vl_nnroipool(x,R,dzdy,args{:}) ; 36 | test.der(@(x) vl_nnroipool(x,R,args{:}), ... 37 | x, dzdy, dzdx, test.range * 1e-2) ; 38 | end 39 | 40 | function identity(test,method) 41 | x = test.toDevice(test.toDataType((2:10)'*(1:10))) ; 42 | R = test.toDevice(test.toDataType([1, 1, 1, 9, 10])) ; 43 | T = [0 1 0 ; 1 0 0] ; 44 | opts = {'method', method, ... 45 | 'subdivisions', [9,10], ... 46 | 'transform', T} ; 47 | y = vl_nnroipool(x,R,opts{:}) ; 48 | test.eq(x,y) ; 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnsigmoid.m: -------------------------------------------------------------------------------- 1 | classdef nnsigmoid < nntest 2 | methods (Test) 3 | function basic(test) 4 | x = test.randn(5,5,1,1)/test.range ; 5 | y = vl_nnsigmoid(x) ; 6 | dzdy = test.randn(size(y)) ; 7 | dzdx = vl_nnsigmoid(x,dzdy) ; 8 | test.der(@(x) vl_nnsigmoid(x), x, dzdy, dzdx, 1e-3) ; 9 | end 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnsoftmax.m: -------------------------------------------------------------------------------- 1 | classdef nnsoftmax < nntest 2 | properties (TestParameter) 3 | h = {1 2 3} 4 | w = {1 2} 5 | end 6 | methods (Test) 7 | function basic(test,h,w) 8 | d = 10 ; 9 | n = 3 ; 10 | x = test.randn(h,w,d,n)/test.range ; 11 | y = vl_nnsoftmax(x) ; 12 | dzdy = test.randn(size(y)) ; 13 | dzdx = vl_nnsoftmax(x, dzdy) ; 14 | test.der(@(x) vl_nnsoftmax(x), x, dzdy, dzdx, 1e-2) ; 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnsoftmaxloss.m: -------------------------------------------------------------------------------- 1 | classdef nnsoftmaxloss < nntest 2 | properties (TestParameter) 3 | weighed = {false true} 4 | multilab = {false true} 5 | end 6 | 7 | methods (Test) 8 | function basic(test, multilab, weighed) 9 | C = 10 ; 10 | n = 3 ; 11 | if multilab 12 | c = reshape(mod(0:3*4*n-1,C)+1, 3, 4, 1, n) ; 13 | else 14 | c = reshape([7 2 1],1,1,1,[]) ; 15 | end 16 | if weighed 17 | c = cat(3, c, test.rand(size(c))) ; 18 | end 19 | 20 | % compare direct and indirect composition; this cannot 21 | % take large test.ranges 22 | x = test.rand(3,4,C,n)/test.range + 0.001 ; % non-negative 23 | y = vl_nnsoftmaxloss(x,c) ; 24 | if size(c,3) == 1 25 | opts = {'loss','log'} ; 26 | else 27 | opts = {'loss','log','instanceWeights',c(:,:,2,:)} ; 28 | end 29 | y_ = vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),[],opts{:}) ; 30 | dzdy = test.randn(size(y)) ; 31 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ; 32 | dzdx_ = vl_nnsoftmax(x,vl_nnloss(vl_nnsoftmax(x),c(:,:,1,:),dzdy,opts{:})) ; 33 | test.eq(y,y_) ; 34 | test.eq(dzdx,dzdx_) ; 35 | test.der(@(x) vl_nnsoftmaxloss(x,c), x, dzdy, dzdx, 0.001, -5e1) ; 36 | 37 | % now larger input range 38 | x = test.rand(3,4,C,n) + test.range * 0.001 ; % non-negative 39 | y = vl_nnsoftmaxloss(x,c) ; 40 | dzdy = test.randn(size(y)) ; 41 | dzdx = vl_nnsoftmaxloss(x,c,dzdy) ; 42 | test.der(@(x) vl_nnsoftmaxloss(x,c), ... 43 | x, dzdy, dzdx, test.range * 0.001, -5e1) ; 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/suite/nnspnorm.m: -------------------------------------------------------------------------------- 1 | classdef nnspnorm < nntest 2 | methods (Test) 3 | function basic(test) 4 | h = 13 ; 5 | w = 17 ; 6 | d = 4 ; 7 | n = 5 ; 8 | param = [3, 3, 0.1, 0.75] ; 9 | x = test.randn(h,w,d,n) ; 10 | y = vl_nnspnorm(x, param) ; 11 | dzdy = test.rand(h, w, d, n) ; 12 | dzdx = vl_nnspnorm(x, param, dzdy) ; 13 | test.der(@(x) vl_nnspnorm(x,param), x, dzdy, dzdx, test.range * 1e-3) ; 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_bench_bnorm.m: -------------------------------------------------------------------------------- 1 | function vl_bench_bnorm(gpu) 2 | if nargin < 1 3 | gpu = false ; 4 | end 5 | 6 | T = 100 ; 7 | x = randn(64,64,32,32,'single') ; 8 | g = randn(32,1,'single') ; 9 | b = randn(32,1,'single') ; 10 | 11 | if gpu 12 | x = gpuArray(x) ; 13 | g = gpuArray(g) ; 14 | b = gpuArray(b) ; 15 | end 16 | 17 | tic 18 | for t=1:T 19 | y = vl_nnbnorm(x,g,b) ; 20 | end 21 | if gpu, wait(gpuDevice) ; end 22 | fprintf('new: %f\n',toc); 23 | 24 | tic 25 | for t=1:T 26 | y_ = vl_nnbnorm_old(x,g,b) ; 27 | end 28 | if gpu, wait(gpuDevice) ; end 29 | fprintf('old: %f\n',toc); 30 | 31 | dzdy = randn(size(y),'single') ; 32 | if gpu 33 | dzdy = gpuArray(dzdy) ; 34 | end 35 | 36 | tic 37 | for t=1:T 38 | [a,b,c] = vl_nnbnorm(x,g,b,dzdy) ; 39 | end 40 | if gpu, wait(gpuDevice) ; end 41 | fprintf('new deriv: %f\n',toc); 42 | 43 | tic 44 | for t=1:T 45 | [a_,b_,c_] = vl_nnbnorm_old(x,g,b,dzdy) ; 46 | end 47 | if gpu, wait(gpuDevice) ; end 48 | fprintf('old deriv: %f\n',toc); 49 | 50 | vl_testsim(y,y_); 51 | vl_testsim(a,a_); 52 | vl_testsim(b,b_); 53 | vl_testsim(c,c_); 54 | end 55 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_test_bnorm.m: -------------------------------------------------------------------------------- 1 | %% 2 | % Test function to compare nn_bnorm and its GPU/CPU implementation with 3 | % using VLFEAT 4 | %% 5 | 6 | gpu = false; 7 | gpu = true ; 8 | 9 | T = 1 ; 10 | x = randn(64,64,32,32,'single') ; 11 | g = randn(32,1,'single') ; 12 | b = randn(32,1,'single') ; 13 | 14 | if gpu 15 | x = gpuArray(x) ; 16 | g = gpuArray(g) ; 17 | b = gpuArray(b) ; 18 | end 19 | 20 | a=vl_nnbnorm(x,g,b); 21 | a_=vl_nnbnorm_old(x,g,b); 22 | 23 | vl_testsim(a,a_) 24 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_test_economic_relu.m: -------------------------------------------------------------------------------- 1 | % VL_TEST_ECONOMIC_RELU 2 | function vl_test_economic_relu() 3 | 4 | x = randn(11,12,8,'single'); 5 | w = randn(5,6,8,9,'single'); 6 | b = randn(1,9,'single') ; 7 | 8 | net.layers{1} = struct('type', 'conv', ... 9 | 'filters', w, ... 10 | 'biases', b, ... 11 | 'stride', 1, ... 12 | 'pad', 0); 13 | net.layers{2} = struct('type', 'relu') ; 14 | 15 | res = vl_simplenn(net, x) ; 16 | dzdy = randn(size(res(end).x), 'like', res(end).x) ; 17 | clear res ; 18 | 19 | res_ = vl_simplenn(net, x, dzdy) ; 20 | res__ = vl_simplenn(net, x, dzdy, [], 'conserveMemory', true) ; 21 | 22 | a=whos('res_') ; 23 | b=whos('res__') ; 24 | assert(a.bytes > b.bytes) ; 25 | vl_testsim(res_(1).dzdx,res__(1).dzdx,1e-4) ; 26 | vl_testsim(res_(1).dzdw{1},res__(1).dzdw{1},1e-4) ; 27 | vl_testsim(res_(1).dzdw{2},res__(1).dzdw{2},1e-4) ; 28 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_test_gpureset.m: -------------------------------------------------------------------------------- 1 | for explictMexReset = [false] 2 | 3 | % reset the same GPU device 4 | for t = 1:6 5 | if explictMexReset, clear mex ; end 6 | if mod(t-1,2) == 0 7 | disp('vl_test_gpureset: resetting GPU') ; 8 | gpuDevice(1) ; 9 | else 10 | disp('vl_test_gpureset: not resetting GPU') ; 11 | end 12 | if t > 1, disp(a) ; end 13 | a = gpuArray(single(ones(10))) ; 14 | b = gpuArray(single(ones(5))) ; 15 | c = vl_nnconv(a,b,[],'nocudnn') ; 16 | end 17 | 18 | % resetting GPU arguments to a MEX file should fail properly 19 | a = gpuArray(single(ones(10))) ; 20 | b = gpuArray(single(ones(5))) ; 21 | c = vl_nnconv(a,b,[],'nocudnn') ; 22 | 23 | gpuDevice(1) ; 24 | disp(a) ; 25 | try 26 | c = vl_nnconv(a,b,[],'nocudnn') ; 27 | catch e 28 | assert(strcmp('parallel:gpu:array:InvalidData', e.identifier)) ; 29 | end 30 | 31 | % switch GPU devices 32 | if gpuDeviceCount > 1 33 | disp('vl_text_gpureset: test switching GPU device') ; 34 | for t = 1:gpuDeviceCount 35 | if explictMexReset, clear mex ; end 36 | fprintf('vl_test_gpureset: switching to gpu %d\n', t) ; 37 | gpuDevice(t) ; 38 | a = gpuArray(single(ones(10))) ; 39 | b = gpuArray(single(ones(5))) ; 40 | c = vl_nnconv(a,b,[],'nocudnn') ; 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_test_imreadjpeg.m: -------------------------------------------------------------------------------- 1 | function vl_test_imreadjpeg 2 | % VL_TEST_IMREADJPEG 3 | 4 | % Test basic file reading capability 5 | for t=1:6 6 | files{t} = which(sprintf('office_%d.jpg', t)) ; 7 | end 8 | ims = vl_imreadjpeg(files) ; 9 | 10 | % Test inserting a non-image file 11 | files_ = files ; 12 | files_{3} = [mfilename('fullpath') '.m']; 13 | ims_ = vl_imreadjpeg(files_) ; 14 | for t=setdiff(1:6,3) 15 | assert(isequal(ims{t},ims_{t})) ; 16 | end 17 | 18 | % Test inserting a non-esiting file 19 | files__ = files_ ; 20 | files__{4} = 'idontexist.jpg' ; 21 | ims__ = vl_imreadjpeg(files__) ; 22 | for t=setdiff(1:6,[3 4]) 23 | assert(isequal(ims{t},ims__{t})) ; 24 | end 25 | 26 | for n = 1:4 27 | % Test prefetching 28 | vl_imreadjpeg(files,'prefetch', 'numThreads', n) ; 29 | ims___ = vl_imreadjpeg(files) ; 30 | assert(isequal(ims,ims___)) ; 31 | 32 | % Hardening: test prefetching, clearing mex, fetching 33 | vl_imreadjpeg(files,'prefetch') ; 34 | clear mex ; 35 | ims___ = vl_imreadjpeg(files, 'numThreads', n) ; 36 | assert(isequal(ims,ims___)) ; 37 | end 38 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/matlab/xtest/vl_test_print.m: -------------------------------------------------------------------------------- 1 | function vl_test_print(varargin) 2 | 3 | addpath(fullfile(vl_rootnn(), 'examples', 'mnist')); 4 | 5 | net = cnn_mnist_init('networkType', 'dagnn'); 6 | net.print(varargin{:}); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/utils/get-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | local_dir="$1" 4 | url="$2" 5 | 6 | function get_filename_from_url() { 7 | regexp='^([^\/]*\/)+' 8 | echo -n "$1" | sed -r "s/$regexp//g" 9 | } 10 | 11 | function get_remote_file_size() { 12 | curl -sI "$1" | grep Content-Length | grep -o '[0-9][0-9]*' 13 | } 14 | 15 | filename=$(get_filename_from_url "$url") 16 | local_path="$local_dir/$filename" 17 | remote_size=$(get_remote_file_size "$url") 18 | 19 | echo "Getting: $url" 20 | echo " File: $filename" 21 | echo " Local file path: $local_path" 22 | echo " Remote file size: $remote_size" 23 | 24 | if [ -e "$local_path" ] 25 | then 26 | local_size=$(stat -c%s "$local_path") 27 | echo " Local file size: $local_size" 28 | if [[ "$local_size" -eq "$remote_size" ]] 29 | then 30 | echo " Local and remote file sizes match: not downloading" 31 | exit 0 32 | else 33 | echo " Trying to resume partial download" 34 | if curl -f -C - -o "$local_path" "$url" 35 | then 36 | echo " Download completed successfully" 37 | exit 0 38 | else 39 | echo " Could not resume" 40 | fi 41 | fi 42 | fi 43 | 44 | echo " Downloading the whole file" 45 | curl -f -o "$local_path" "$url" 46 | exit $? 47 | -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/utils/proto/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimerykong/Recurrent-Pixel-Embedding-for-Instance-Grouping/748ade6b969c7861c2a9009cd0f0ffb27004677c/libs/matconvnet-1.0-beta23_modifiedDagnn/utils/proto/__init__.py -------------------------------------------------------------------------------- /libs/matconvnet-1.0-beta23_modifiedDagnn/utils/proto/get-protos.sh: -------------------------------------------------------------------------------- 1 | # FCN 2 | wget -nc "https://raw.githubusercontent.com/longjon/caffe/6e3916766c6b63bff07e2cfadf210ee5e46af807/src/caffe/proto/caffe.proto" --output-document=./caffe_6e3916.proto 3 | protoc ./caffe_6e3916.proto --python_out=./ 4 | 5 | # b590f1d (ResNet) 6 | wget -nc "https://raw.githubusercontent.com/BVLC/caffe/b590f1d27eb5cbd9bc7b9157d447706407c68682/src/caffe/proto/caffe.proto" --output-document=./caffe_b590f1d.proto 7 | protoc ./caffe_b590f1d.proto --python_out=./ 8 | -------------------------------------------------------------------------------- /libs/myFunctions/addCombo_conv_BN_relu.m: -------------------------------------------------------------------------------- 1 | function [sName] = addCombo_conv_BN_relu(net, sName, baseName, kernelSZ, hasBias, stride, pad, dilate) 2 | 3 | 4 | lName = [baseName '_conv']; 5 | block = dagnn.Conv('size', kernelSZ, 'hasBias', hasBias, 'stride', stride, 'pad', pad, 'dilate', dilate); 6 | net.addLayer(lName, block, sName, lName, {[lName '_f']}); 7 | filter = randn(kernelSZ, 'single')*sqrt(2/kernelSZ(end)); 8 | net.params(net.layers(net.getLayerIndex(lName)).paramIndexes).value = filter; 9 | net.params(net.layers(net.getLayerIndex(lName)).paramIndexes).weightDecay = 1; 10 | net.params(net.layers(net.getLayerIndex(lName)).paramIndexes).learningRate = 10; 11 | sName = lName; 12 | 13 | lName = [baseName, '_bn']; 14 | block = dagnn.BatchNorm('numChannels', kernelSZ(end)); 15 | % block.usingGlobal = false; 16 | net.addLayer(lName, block, sName, lName, {[lName '_g'], [lName '_b'], [lName '_m']}); 17 | pidx = net.getParamIndex({[lName '_g'], [lName '_b'], [lName '_m']}); 18 | net.params(pidx(1)).weightDecay = 1; 19 | net.params(pidx(2)).weightDecay = 1; 20 | net.params(pidx(1)).learningRate = 10; 21 | net.params(pidx(2)).learningRate = 10; 22 | net.params(pidx(3)).learningRate = 0.1; 23 | net.params(pidx(3)).trainMethod = 'average'; 24 | net.params(pidx(1)).value = ones([kernelSZ(end) 1], 'single'); % slope 25 | net.params(pidx(2)).value = zeros([kernelSZ(end) 1], 'single'); % bias 26 | net.params(pidx(3)).value = zeros([kernelSZ(end) 2], 'single'); % moments 27 | sName = lName; 28 | 29 | lName = [baseName, '_relu']; 30 | block = dagnn.ReLU('leak', 0); 31 | net.addLayer(lName, block, sName, lName); 32 | sName = lName; 33 | 34 | 35 | -------------------------------------------------------------------------------- /libs/myFunctions/extractStats.m: -------------------------------------------------------------------------------- 1 | function stats = extractStats(net) 2 | % ------------------------------------------------------------------------- 3 | sel = find(cellfun(@(x) isa(x,'dagnn.Loss'), {net.layers.block})) ; 4 | stats = struct() ; 5 | for i = 1:numel(sel) 6 | stats.(net.layers(sel(i)).outputs{1}) = net.layers(sel(i)).block.average ; 7 | end 8 | -------------------------------------------------------------------------------- /libs/myFunctions/getRawCounts.m: -------------------------------------------------------------------------------- 1 | function rawcounts = getRawCounts(gt, resim, numClass) 2 | % Calculate the counts for prediction and ground truth.. 3 | num = numClass + 1; 4 | rawcounts = zeros([num, num]); 5 | locs = gt(:) >= 0; 6 | sumim = 1+gt+resim*num; 7 | hs = histc(sumim(locs), 1:num*num); 8 | rawcounts = reshape(hs(:), size(rawcounts)); 9 | -------------------------------------------------------------------------------- /libs/myFunctions/index2RGBlabel.m: -------------------------------------------------------------------------------- 1 | function [ RGBlabel, evalLabelMap ]= index2RGBlabel(indexMap, colorLabel, classID) 2 | % 3 | % indexMap contains classID in [0~numClass-1], where numClass is size(colorLabel,1) 4 | % colorLabel contains rgb values for all the numClass classes. 5 | % 6 | % Shu Kong @uci 7 | % 2/3/2017 8 | 9 | %% 10 | if nargin<3 11 | classID = zeros(1, size(colorLabel,1)); 12 | end 13 | 14 | numClass = size(colorLabel,1); 15 | R = zeros(size(indexMap)); 16 | G = zeros(size(indexMap)); 17 | B = zeros(size(indexMap)); 18 | evalLabelMap = zeros(size(indexMap)); 19 | for i = 0:numClass-1 20 | R(indexMap==i) = colorLabel(i+1,1); 21 | G(indexMap==i) = colorLabel(i+1,2); 22 | B(indexMap==i) = colorLabel(i+1,3); 23 | 24 | evalLabelMap(indexMap==i) = classID(i+1); 25 | end 26 | RGBlabel = cat(3, R,G,B); -------------------------------------------------------------------------------- /libs/myFunctions/loadState.m: -------------------------------------------------------------------------------- 1 | function [net, stats] = loadState(fileName) 2 | % ------------------------------------------------------------------------- 3 | load(fileName, 'net', 'stats') ; 4 | net = dagnn.DagNN.loadobj(net) ; -------------------------------------------------------------------------------- /libs/myFunctions/myfindLastCheckpoint.m: -------------------------------------------------------------------------------- 1 | % ------------------------------------------------------------------------- 2 | function epoch = myfindLastCheckpoint(modelDir, prefixStr) 3 | % ------------------------------------------------------------------------- 4 | list = dir(fullfile(modelDir, sprintf('%snet-epoch-*.mat', prefixStr))) ; 5 | tokens = regexp({list.name}, 'net-epoch-([\d]+).mat', 'tokens') ; 6 | epoch = cellfun(@(x) sscanf(x{1}{1}, '%d'), tokens) ; 7 | epoch = max([epoch 0]) ; 8 | 9 | 10 | -------------------------------------------------------------------------------- /libs/myFunctions/prepareGPUs.m: -------------------------------------------------------------------------------- 1 | function prepareGPUs(opts, cold) 2 | numGpus = numel(opts.gpus) ; 3 | if numGpus > 1 4 | % check parallel pool integrity as it could have timed out 5 | pool = gcp('nocreate') ; 6 | if ~isempty(pool) && pool.NumWorkers ~= numGpus 7 | delete(pool) ; 8 | end 9 | pool = gcp('nocreate') ; 10 | if isempty(pool) 11 | parpool('local', numGpus) ; 12 | cold = true ; 13 | end 14 | if exist(opts.memoryMapFile) 15 | delete(opts.memoryMapFile) ; 16 | end 17 | 18 | end 19 | if numGpus >= 1 && cold 20 | fprintf('%s: resetting GPU\n', mfilename) 21 | if numGpus == 1 22 | gpuDevice(opts.gpus) 23 | else 24 | spmd, gpuDevice(opts.gpus(labindex)), end 25 | end 26 | end -------------------------------------------------------------------------------- /libs/myFunctions/saveState.m: -------------------------------------------------------------------------------- 1 | function saveState(fileName, net, stats) 2 | % ------------------------------------------------------------------------- 3 | net_ = net ; 4 | net = net_.saveobj() ; 5 | save(fileName, 'net', 'stats') ; 6 | -------------------------------------------------------------------------------- /libs/myFunctions/showDagNetFlow.m: -------------------------------------------------------------------------------- 1 | function showDagNetFlow(netbasemodel) 2 | % show information flow within the architecture 3 | % 4 | % Shu Kong 5 | % 08/09/2016 6 | 7 | %% 8 | for i = 1:numel(netbasemodel.layers) 9 | fprintf('layer-%03d %s -- ', i, netbasemodel.layers(i).name ); 10 | 11 | %% input 12 | fprintf('\n\tinput:\n'); 13 | for j = 1:length(netbasemodel.layers(i).inputIndexes) 14 | fprintf('\t\t%s\n', netbasemodel.vars(netbasemodel.layers(i).inputIndexes(j)).name); 15 | end 16 | %% output 17 | fprintf('\toutput:\n'); 18 | for j = 1:length(netbasemodel.layers(i).outputIndexes) 19 | fprintf('\t\t%s\n', netbasemodel.vars(netbasemodel.layers(i).outputIndexes(j)).name); 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /libs/myFunctions/switchFigure.m: -------------------------------------------------------------------------------- 1 | 2 | function switchFigure(n) 3 | if get(0,'CurrentFigure') ~= n 4 | try 5 | set(0,'CurrentFigure',n) ; 6 | catch 7 | figure(n) ; 8 | end 9 | end 10 | --------------------------------------------------------------------------------