├── .gitattributes ├── .gitignore ├── Data ├── Testing │ ├── Barbara.png │ ├── Child.png │ ├── Child_gnd.bmp │ ├── Child_res.bmp │ ├── IC.png │ ├── Lena.png │ ├── Lena_gnd.bmp │ ├── Lena_res.png │ ├── barbara_gnd.bmp │ ├── gnd.bmp │ ├── input.bmp │ ├── result.bmp │ └── result.txt └── Training │ ├── t1.bmp │ ├── t11.bmp │ ├── t12.bmp │ ├── t13.bmp │ ├── t14.bmp │ ├── t16.bmp │ ├── t17.bmp │ ├── t18.bmp │ ├── t19.bmp │ ├── t2.bmp │ ├── t20.bmp │ ├── t21.bmp │ ├── t22.bmp │ ├── t23.bmp │ ├── t24.bmp │ ├── t25.bmp │ ├── t26.bmp │ ├── t27.bmp │ ├── t28.bmp │ ├── t3.bmp │ ├── t30.bmp │ ├── t31.bmp │ ├── t32.bmp │ ├── t34.bmp │ ├── t35.bmp │ ├── t36.bmp │ ├── t37.bmp │ ├── t38.bmp │ ├── t39.bmp │ ├── t4.bmp │ ├── t40.bmp │ ├── t42.bmp │ ├── t43.bmp │ ├── t44.bmp │ ├── t46.bmp │ ├── t47.bmp │ ├── t48.bmp │ ├── t49.bmp │ ├── t5.bmp │ ├── t50.bmp │ ├── t51.bmp │ ├── t52.bmp │ ├── t59.bmp │ ├── t6.bmp │ ├── t60.bmp │ ├── t61.bmp │ ├── t62.bmp │ ├── t63.bmp │ ├── t66.bmp │ ├── t7.bmp │ ├── tt1.bmp │ ├── tt12.bmp │ ├── tt14.bmp │ ├── tt15.bmp │ ├── tt17.bmp │ ├── tt18.bmp │ ├── tt19.bmp │ ├── tt2.bmp │ ├── tt20.bmp │ ├── tt21.bmp │ ├── tt24.bmp │ ├── tt25.bmp │ ├── tt26.bmp │ ├── tt27.bmp │ ├── tt3.bmp │ ├── tt4.bmp │ ├── tt5.bmp │ ├── tt7.bmp │ └── tt9.bmp ├── Demo_Dictionary_Training.m ├── Demo_SR.m ├── Dictionary ├── D_1024_0.15_5.mat └── D_512_0.15_5.mat ├── L1QP_FeatureSign_yang.m ├── Previous ├── SR-Results.rar └── ScSR.rar ├── README.dat ├── RegularizedSC ├── L1QP_FeatureSign_Set.m ├── L1QP_FeatureSign_yang.m ├── construct_reg_mat.m ├── display_network_nonsquare2.m ├── getObjective_RegSc.m ├── l2ls_learn_basis_dual.m ├── reg_sparse_coding.m ├── regsc.m └── sc2 │ ├── .svn │ ├── entries │ ├── format │ ├── prop-base │ │ ├── cgf_sc2.dll.svn-base │ │ ├── cgf_sc2.mexa64.svn-base │ │ └── cgf_sc2.mexglx.svn-base │ └── text-base │ │ ├── cgf_fitS_sc2.m.svn-base │ │ ├── cgf_sc.c.svn-base │ │ ├── cgf_sc2.dll.svn-base │ │ ├── cgf_sc2.mexa64.svn-base │ │ ├── cgf_sc2.mexglx.svn-base │ │ ├── getObjective2.m.svn-base │ │ ├── makefile.linux.svn-base │ │ └── makefile.win32.svn-base │ ├── cgf_fitS_sc2.m │ ├── cgf_sc.c │ ├── cgf_sc2.dll │ ├── cgf_sc2.mexa64 │ ├── cgf_sc2.mexglx │ ├── getObjective.asv │ ├── getObjective2.m │ ├── getObjective3.m │ ├── getObjective_knn.m │ ├── getObjective_sc.m │ ├── makefile.linux │ ├── makefile.win32 │ └── nrf │ ├── .svn │ ├── entries │ ├── format │ └── text-base │ │ ├── README.svn-base │ │ ├── brent.c.svn-base │ │ ├── frprmn.c.svn-base │ │ ├── getreent.c.svn-base │ │ ├── impure.c.svn-base │ │ ├── linmin.c.svn-base │ │ ├── makefile.linux.svn-base │ │ ├── makefile.win32.svn-base │ │ ├── mnbrak.c.svn-base │ │ ├── nrutil.c.svn-base │ │ └── nrutil.h.svn-base │ ├── README │ ├── brent.c │ ├── frprmn.c │ ├── getreent.c │ ├── impure.c │ ├── linmin.c │ ├── makefile.linux │ ├── makefile.win32 │ ├── mnbrak.c │ ├── nrutil.c │ └── nrutil.h ├── ScSR.m ├── backprojection.m ├── compute_rmse.m ├── extr_lIm_fea.m ├── lin_scale.m ├── patch_pruning.m ├── qssim.m ├── rnd_smp_patch.m ├── sample_patches.m ├── ssim_index.m └── train_coupled_dict.m /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows Installer files 12 | *.cab 13 | *.msi 14 | *.msm 15 | *.msp 16 | 17 | # ========================= 18 | # Operating System Files 19 | # ========================= 20 | 21 | # OSX 22 | # ========================= 23 | 24 | .DS_Store 25 | .AppleDouble 26 | .LSOverride 27 | 28 | # Icon must ends with two \r. 29 | Icon 30 | 31 | # Thumbnails 32 | ._* 33 | 34 | # Files that might appear on external disk 35 | .Spotlight-V100 36 | .Trashes 37 | -------------------------------------------------------------------------------- /Data/Testing/Barbara.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Barbara.png -------------------------------------------------------------------------------- /Data/Testing/Child.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Child.png -------------------------------------------------------------------------------- /Data/Testing/Child_gnd.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Child_gnd.bmp -------------------------------------------------------------------------------- /Data/Testing/Child_res.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Child_res.bmp -------------------------------------------------------------------------------- /Data/Testing/IC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/IC.png -------------------------------------------------------------------------------- /Data/Testing/Lena.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Lena.png -------------------------------------------------------------------------------- /Data/Testing/Lena_gnd.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Lena_gnd.bmp -------------------------------------------------------------------------------- /Data/Testing/Lena_res.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/Lena_res.png -------------------------------------------------------------------------------- /Data/Testing/barbara_gnd.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/barbara_gnd.bmp -------------------------------------------------------------------------------- /Data/Testing/gnd.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/gnd.bmp -------------------------------------------------------------------------------- /Data/Testing/input.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/input.bmp -------------------------------------------------------------------------------- /Data/Testing/result.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Testing/result.bmp -------------------------------------------------------------------------------- /Data/Testing/result.txt: -------------------------------------------------------------------------------- 1 | Child 2 | 32.6932 0.85615 0.92825 3 | 4 | Lena 5 | 31.1951 0.78121 0.8948 6 | 7 | 8 | -------------------------------------------------------------------------------- /Data/Training/t1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t1.bmp -------------------------------------------------------------------------------- /Data/Training/t11.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t11.bmp -------------------------------------------------------------------------------- /Data/Training/t12.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t12.bmp -------------------------------------------------------------------------------- /Data/Training/t13.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t13.bmp -------------------------------------------------------------------------------- /Data/Training/t14.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t14.bmp -------------------------------------------------------------------------------- /Data/Training/t16.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t16.bmp -------------------------------------------------------------------------------- /Data/Training/t17.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t17.bmp -------------------------------------------------------------------------------- /Data/Training/t18.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t18.bmp -------------------------------------------------------------------------------- /Data/Training/t19.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t19.bmp -------------------------------------------------------------------------------- /Data/Training/t2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t2.bmp -------------------------------------------------------------------------------- /Data/Training/t20.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t20.bmp -------------------------------------------------------------------------------- /Data/Training/t21.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t21.bmp -------------------------------------------------------------------------------- /Data/Training/t22.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t22.bmp -------------------------------------------------------------------------------- /Data/Training/t23.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t23.bmp -------------------------------------------------------------------------------- /Data/Training/t24.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t24.bmp -------------------------------------------------------------------------------- /Data/Training/t25.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t25.bmp -------------------------------------------------------------------------------- /Data/Training/t26.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t26.bmp -------------------------------------------------------------------------------- /Data/Training/t27.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t27.bmp -------------------------------------------------------------------------------- /Data/Training/t28.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t28.bmp -------------------------------------------------------------------------------- /Data/Training/t3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t3.bmp -------------------------------------------------------------------------------- /Data/Training/t30.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t30.bmp -------------------------------------------------------------------------------- /Data/Training/t31.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t31.bmp -------------------------------------------------------------------------------- /Data/Training/t32.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t32.bmp -------------------------------------------------------------------------------- /Data/Training/t34.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t34.bmp -------------------------------------------------------------------------------- /Data/Training/t35.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t35.bmp -------------------------------------------------------------------------------- /Data/Training/t36.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t36.bmp -------------------------------------------------------------------------------- /Data/Training/t37.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t37.bmp -------------------------------------------------------------------------------- /Data/Training/t38.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t38.bmp -------------------------------------------------------------------------------- /Data/Training/t39.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t39.bmp -------------------------------------------------------------------------------- /Data/Training/t4.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t4.bmp -------------------------------------------------------------------------------- /Data/Training/t40.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t40.bmp -------------------------------------------------------------------------------- /Data/Training/t42.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t42.bmp -------------------------------------------------------------------------------- /Data/Training/t43.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t43.bmp -------------------------------------------------------------------------------- /Data/Training/t44.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t44.bmp -------------------------------------------------------------------------------- /Data/Training/t46.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t46.bmp -------------------------------------------------------------------------------- /Data/Training/t47.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t47.bmp -------------------------------------------------------------------------------- /Data/Training/t48.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t48.bmp -------------------------------------------------------------------------------- /Data/Training/t49.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t49.bmp -------------------------------------------------------------------------------- /Data/Training/t5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t5.bmp -------------------------------------------------------------------------------- /Data/Training/t50.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t50.bmp -------------------------------------------------------------------------------- /Data/Training/t51.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t51.bmp -------------------------------------------------------------------------------- /Data/Training/t52.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t52.bmp -------------------------------------------------------------------------------- /Data/Training/t59.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t59.bmp -------------------------------------------------------------------------------- /Data/Training/t6.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t6.bmp -------------------------------------------------------------------------------- /Data/Training/t60.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t60.bmp -------------------------------------------------------------------------------- /Data/Training/t61.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t61.bmp -------------------------------------------------------------------------------- /Data/Training/t62.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t62.bmp -------------------------------------------------------------------------------- /Data/Training/t63.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t63.bmp -------------------------------------------------------------------------------- /Data/Training/t66.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t66.bmp -------------------------------------------------------------------------------- /Data/Training/t7.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/t7.bmp -------------------------------------------------------------------------------- /Data/Training/tt1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt1.bmp -------------------------------------------------------------------------------- /Data/Training/tt12.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt12.bmp -------------------------------------------------------------------------------- /Data/Training/tt14.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt14.bmp -------------------------------------------------------------------------------- /Data/Training/tt15.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt15.bmp -------------------------------------------------------------------------------- /Data/Training/tt17.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt17.bmp -------------------------------------------------------------------------------- /Data/Training/tt18.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt18.bmp -------------------------------------------------------------------------------- /Data/Training/tt19.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt19.bmp -------------------------------------------------------------------------------- /Data/Training/tt2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt2.bmp -------------------------------------------------------------------------------- /Data/Training/tt20.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt20.bmp -------------------------------------------------------------------------------- /Data/Training/tt21.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt21.bmp -------------------------------------------------------------------------------- /Data/Training/tt24.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt24.bmp -------------------------------------------------------------------------------- /Data/Training/tt25.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt25.bmp -------------------------------------------------------------------------------- /Data/Training/tt26.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt26.bmp -------------------------------------------------------------------------------- /Data/Training/tt27.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt27.bmp -------------------------------------------------------------------------------- /Data/Training/tt3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt3.bmp -------------------------------------------------------------------------------- /Data/Training/tt4.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt4.bmp -------------------------------------------------------------------------------- /Data/Training/tt5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt5.bmp -------------------------------------------------------------------------------- /Data/Training/tt7.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt7.bmp -------------------------------------------------------------------------------- /Data/Training/tt9.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Data/Training/tt9.bmp -------------------------------------------------------------------------------- /Demo_Dictionary_Training.m: -------------------------------------------------------------------------------- 1 | % ======================================================================== 2 | % Demo codes for dictionary training by joint sparse coding 3 | % 4 | % Reference 5 | % J. Yang et al. Image super-resolution as sparse representation of raw 6 | % image patches. CVPR 2008. 7 | % J. Yang et al. Image super-resolution via sparse representation. IEEE 8 | % Transactions on Image Processing, Vol 19, Issue 11, pp2861-2873, 2010 9 | % 10 | % Jianchao Yang 11 | % ECE Department, University of Illinois at Urbana-Champaign 12 | % For any questions, send email to jyang29@uiuc.edu 13 | % ========================================================================= 14 | 15 | clear all; clc; close all; 16 | addpath(genpath('RegularizedSC')); 17 | 18 | TR_IMG_PATH = 'Data/Training'; 19 | 20 | dict_size = 512; % dictionary size 21 | lambda = 0.15; % sparsity regularization 22 | patch_size = 5; % image patch size 23 | nSmp = 100000; % number of patches to sample 24 | upscale = 2; % upscaling factor 25 | 26 | % randomly sample image patches 27 | [Xh, Xl] = rnd_smp_patch(TR_IMG_PATH, '*.bmp', patch_size, nSmp, upscale); 28 | 29 | % prune patches with small variances, threshould chosen based on the 30 | % training data 31 | [Xh, Xl] = patch_pruning(Xh, Xl, 10); 32 | 33 | % joint sparse coding 34 | [Dh, Dl] = train_coupled_dict(Xh, Xl, dict_size, lambda); 35 | dict_path = ['Dictionary/D_' num2str(dict_size) '_' num2str(lambda) '_' num2str(patch_size) '.mat' ]; 36 | save(dict_path, 'Dh', 'Dl'); -------------------------------------------------------------------------------- /Demo_SR.m: -------------------------------------------------------------------------------- 1 | % ========================================================================= 2 | % Simple demo codes for image super-resolution via sparse representation 3 | % 4 | % Reference 5 | % J. Yang et al. Image super-resolution as sparse representation of raw 6 | % image patches. CVPR 2008. 7 | % J. Yang et al. Image super-resolution via sparse representation. IEEE 8 | % Transactions on Image Processing, Vol 19, Issue 11, pp2861-2873, 2010 9 | % 10 | % Jianchao Yang 11 | % ECE Department, University of Illinois at Urbana-Champaign 12 | % For any questions, send email to jyang29@uiuc.edu 13 | % ========================================================================= 14 | 15 | clear all; clc; 16 | 17 | image_list = {'The_Big_Bang_Theory1_S19E01_0248_wanted.png'; 18 | 'The_Simpsons_S19E01_0003_wanted.png' 19 | }; 20 | for i = 1:size(image_list,1) 21 | fn_full = fullfile(sprintf('Data/Testing/%s_res.png',image_list{i}(1:end-4))); 22 | if exist(fn_full,'file') 23 | continue; 24 | end 25 | % read test image 26 | im_l = imread(sprintf('Data/Testing/%s',image_list{i})); 27 | 28 | % set parameters 29 | lambda = 0.2; % sparsity regularization 30 | overlap = 4; % the more overlap the better (patch size 5x5) 31 | up_scale = 2; % scaling factor, depending on the trained dictionary 32 | maxIter = 20; % if 0, do not use backprojection 33 | 34 | % load dictionary 35 | load('Dictionary/D_1024_0.15_5.mat'); 36 | 37 | % change color space, work on illuminance only 38 | im_l_ycbcr = rgb2ycbcr(im_l); 39 | im_l_y = im_l_ycbcr(:, :, 1); 40 | im_l_cb = im_l_ycbcr(:, :, 2); 41 | im_l_cr = im_l_ycbcr(:, :, 3); 42 | 43 | % image super-resolution based on sparse representation 44 | [im_h_y] = ScSR(im_l_y, 2, Dh, Dl, lambda, overlap); 45 | [im_h_y] = ScSR(im_h_y, 2, Dh, Dl, lambda, overlap); 46 | [im_h_y] = backprojection(im_h_y, im_l_y, maxIter); 47 | 48 | % upscale the chrominance simply by "bicubic" 49 | [nrow, ncol] = size(im_h_y); 50 | 51 | im_h_cb = imresize(im_l_cb, [nrow, ncol], 'bicubic'); 52 | im_h_cr = imresize(im_l_cr, [nrow, ncol], 'bicubic'); 53 | 54 | im_h_ycbcr = zeros([nrow, ncol, 3]); 55 | im_h_ycbcr(:, :, 1) = im_h_y; 56 | im_h_ycbcr(:, :, 2) = im_h_cb; 57 | im_h_ycbcr(:, :, 3) = im_h_cr; 58 | im_h = ycbcr2rgb(uint8(im_h_ycbcr)); 59 | % bicubic interpolation for reference 60 | im_b = imresize(im_l, [nrow, ncol], 'bicubic'); 61 | 62 | %save image 63 | 64 | fid = fopen(fn_full,'w+'); 65 | fclose(fid); 66 | imwrite(im_h,fn_full); 67 | end %while 68 | % % read ground truth image 69 | % im = imread('Data/Testing/House_Of_Cards_2013_S02E01_0135_wanted.png'); 70 | % 71 | % % compute PSNR for the illuminance channel 72 | % bb_rmse = compute_rmse(im, im_b); 73 | % sp_rmse = compute_rmse(im, im_h); 74 | % [qssim_sp,~] = qssim(im, im_h); 75 | % [qssim_in,~] = qssim(im, im_b); 76 | % 77 | % im_gray = rgb2gray(im); 78 | % im_h_gray = rgb2gray(im_h); 79 | % im_b_gray = rgb2gray(im_b); 80 | % [ssim_sp,~] = ssim_index(im_gray,im_h_gray); 81 | % [ssim_in,~] = ssim_index(im_gray,im_b_gray); 82 | % 83 | % fn_full = fullfile('Data/Testing/House_Of_Cards_2013_S02E01_0135_wanted_res.png'); 84 | % fid = fopen(fn_full,'w+'); 85 | % fclose(fid); 86 | % imwrite(im_h,fn_full); 87 | % 88 | % bb_psnr = 20*log10(255/bb_rmse); 89 | % sp_psnr = 20*log10(255/sp_rmse); 90 | % 91 | % 92 | % fprintf('PSNR for Bicubic Interpolation: %f dB\n', bb_psnr); 93 | % fprintf('PSNR for Sparse Representation Recovery: %f dB\n', sp_psnr); 94 | % 95 | % disp([num2str(bb_psnr),num2str(ssim_in),num2str(qssim_in)]); 96 | % 97 | % disp([num2str(sp_psnr),num2str(ssim_sp),num2str(qssim_sp)]); 98 | % % show the images 99 | % figure, imshow(im_h); 100 | % title('Sparse Recovery'); 101 | % figure, imshow(im_b); 102 | % title('Bicubic Interpolation'); -------------------------------------------------------------------------------- /Dictionary/D_1024_0.15_5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Dictionary/D_1024_0.15_5.mat -------------------------------------------------------------------------------- /Dictionary/D_512_0.15_5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/Dictionary/D_512_0.15_5.mat -------------------------------------------------------------------------------- /L1QP_FeatureSign_yang.m: -------------------------------------------------------------------------------- 1 | %% L1QP_FeatureSign solves nonnegative quadradic programming 2 | %% using Feature Sign. 3 | %% 4 | %% min 0.5*x'*A*x+b'*x+\lambda*|x| 5 | %% 6 | %% [net,control]=NNQP_FeatureSign(net,A,b,control) 7 | %% 8 | %% 9 | %% 10 | 11 | function [x]=L1QP_FeatureSign_yang(lambda,A,b) 12 | 13 | A = double(A); 14 | b = double(b); 15 | 16 | EPS = 1e-9; 17 | x=zeros(size(A, 1), 1); %coeff 18 | 19 | grad=A*sparse(x)+b; 20 | [ma mi]=max(abs(grad).*(x==0)); 21 | 22 | while true, 23 | 24 | 25 | if grad(mi)>lambda+EPS, 26 | x(mi)=(lambda-grad(mi))/A(mi,mi); 27 | elseif grad(mi)<-lambda-EPS, 28 | x(mi)=(-lambda-grad(mi))/A(mi,mi); 29 | else 30 | if all(x==0) 31 | break; 32 | end 33 | end 34 | 35 | while true, 36 | a=x~=0; %active set 37 | Aa=A(a,a); 38 | ba=b(a); 39 | xa=x(a); 40 | 41 | %new b based on unchanged sign 42 | vect = -lambda*sign(xa)-ba; 43 | x_new= Aa\vect; 44 | idx = find(x_new); 45 | o_new=(vect(idx)/2 + ba(idx))'*x_new(idx) + lambda*sum(abs(x_new(idx))); 46 | 47 | %cost based on changing sign 48 | s=find(xa.*x_new<=0); 49 | if isempty(s) 50 | x(a)=x_new; 51 | loss=o_new; 52 | break; 53 | end 54 | x_min=x_new; 55 | o_min=o_new; 56 | d=x_new-xa; 57 | t=d./xa; 58 | for zd=s', 59 | x_s=xa-d/t(zd); 60 | x_s(zd)=0; %make sure it's zero 61 | % o_s=L1QP_loss(net,Aa,ba,x_s); 62 | idx = find(x_s); 63 | o_s = (Aa(idx, idx)*x_s(idx)/2 + ba(idx))'*x_s(idx)+lambda*sum(abs(x_s(idx))); 64 | if o_slambda+EPS, 26 | x(mi)=(lambda-grad(mi))/A(mi,mi); 27 | elseif grad(mi)<-lambda-EPS, 28 | x(mi)=(-lambda-grad(mi))/A(mi,mi); 29 | else 30 | if all(x==0) 31 | break; 32 | end 33 | end 34 | 35 | while true, 36 | a=x~=0; %active set 37 | Aa=A(a,a); 38 | ba=b(a); 39 | xa=x(a); 40 | 41 | %new b based on unchanged sign 42 | vect = -lambda*sign(xa)-ba; 43 | x_new= Aa\vect; 44 | idx = find(x_new); 45 | o_new=(vect(idx)/2 + ba(idx))'*x_new(idx) + lambda*sum(abs(x_new(idx))); 46 | 47 | %cost based on changing sign 48 | s=find(xa.*x_new<=0); 49 | if isempty(s) 50 | x(a)=x_new; 51 | loss=o_new; 52 | break; 53 | end 54 | x_min=x_new; 55 | o_min=o_new; 56 | d=x_new-xa; 57 | t=d./xa; 58 | for zd=s', 59 | x_s=xa-d/t(zd); 60 | x_s(zd)=0; %make sure it's zero 61 | % o_s=L1QP_loss(net,Aa,ba,x_s); 62 | idx = find(x_s); 63 | o_s = (Aa(idx, idx)*x_s(idx)/2 + ba(idx))'*x_s(idx)+lambda*sum(abs(x_s(idx))); 64 | if o_sM continue; end 29 | clim=max(abs(A(:,k))); 30 | array(buf+(i-1)*(xsz+buf)+[1:xsz],buf+(j-1)*(ysz+buf)+[1:ysz])=... 31 | reshape(A(:,k),xsz,ysz)/clim; 32 | k=k+1; 33 | end 34 | end 35 | 36 | if isreal(array) 37 | h=imagesc(array,'EraseMode','none',[-1 1]); 38 | else 39 | h=imagesc(20*log10(abs(array)),'EraseMode','none',[-1 1]); 40 | end; 41 | axis image off 42 | 43 | drawnow 44 | 45 | warning on all 46 | -------------------------------------------------------------------------------- /RegularizedSC/getObjective_RegSc.m: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, fsparsity, fregs] = getObjective_RegSc(X, B, S, Sigma, beta, gamma) 2 | 3 | Err = X - B*S; 4 | 5 | fresidue = 0.5*sum(sum(Err.^2)); 6 | 7 | fsparsity = gamma*sum(sum(abs(S))); 8 | 9 | fregs = 0; 10 | for ii = size(S, 1), 11 | fregs = fregs + beta*S(:, ii)'*Sigma*S(:, ii); 12 | end 13 | 14 | fobj = fresidue + fsparsity + fregs; -------------------------------------------------------------------------------- /RegularizedSC/l2ls_learn_basis_dual.m: -------------------------------------------------------------------------------- 1 | function B = l2ls_learn_basis_dual(X, S, l2norm, Binit) 2 | % Learning basis using Lagrange dual (with basis normalization) 3 | % 4 | % This code solves the following problem: 5 | % 6 | % minimize_B 0.5*||X - B*S||^2 7 | % subject to ||B(:,j)||_2 <= l2norm, forall j=1...size(S,1) 8 | % 9 | % The detail of the algorithm is described in the following paper: 10 | % 'Efficient Sparse Codig Algorithms', Honglak Lee, Alexis Battle, Rajat Raina, Andrew Y. Ng, 11 | % Advances in Neural Information Processing Systems (NIPS) 19, 2007 12 | % 13 | % Written by Honglak Lee 14 | % Copyright 2007 by Honglak Lee, Alexis Battle, Rajat Raina, and Andrew Y. Ng 15 | 16 | L = size(X,1); 17 | N = size(X,2); 18 | M = size(S, 1); 19 | 20 | tic 21 | SSt = S*S'; 22 | XSt = X*S'; 23 | 24 | if exist('Binit', 'var') 25 | dual_lambda = diag(Binit\XSt - SSt); 26 | else 27 | dual_lambda = 10*abs(rand(M,1)); % any arbitrary initialization should be ok. 28 | end 29 | 30 | c = l2norm^2; 31 | trXXt = sum(sum(X.^2)); 32 | 33 | lb=zeros(size(dual_lambda)); 34 | options = optimset('GradObj','on', 'Hessian','on'); 35 | % options = optimset('GradObj','on', 'Hessian','on', 'TolFun', 1e-7); 36 | 37 | [x, fval, exitflag, output] = fmincon(@(x) fobj_basis_dual(x, SSt, XSt, X, c, trXXt), dual_lambda, [], [], [], [], lb, [], [], options); 38 | % output.iterations 39 | fval_opt = -0.5*N*fval; 40 | dual_lambda= x; 41 | 42 | Bt = (SSt+diag(dual_lambda)) \ XSt'; 43 | B_dual= Bt'; 44 | fobjective_dual = fval_opt; 45 | 46 | 47 | B= B_dual; 48 | fobjective = fobjective_dual; 49 | toc 50 | 51 | return; 52 | 53 | 54 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 55 | 56 | function [f,g,H] = fobj_basis_dual(dual_lambda, SSt, XSt, X, c, trXXt) 57 | % Compute the objective function value at x 58 | L= size(XSt,1); 59 | M= length(dual_lambda); 60 | 61 | SSt_inv = inv(SSt + diag(dual_lambda)); 62 | 63 | % trXXt = sum(sum(X.^2)); 64 | if L>M 65 | % (M*M)*((M*L)*(L*M)) => MLM + MMM = O(M^2(M+L)) 66 | f = -trace(SSt_inv*(XSt'*XSt))+trXXt-c*sum(dual_lambda); 67 | 68 | else 69 | % (L*M)*(M*M)*(M*L) => LMM + LML = O(LM(M+L)) 70 | f = -trace(XSt*SSt_inv*XSt')+trXXt-c*sum(dual_lambda); 71 | end 72 | f= -f; 73 | 74 | if nargout > 1 % fun called with two output arguments 75 | % Gradient of the function evaluated at x 76 | g = zeros(M,1); 77 | temp = XSt*SSt_inv; 78 | g = sum(temp.^2) - c; 79 | g= -g; 80 | 81 | 82 | if nargout > 2 83 | % Hessian evaluated at x 84 | % H = -2.*((SSt_inv*XSt'*XSt*SSt_inv).*SSt_inv); 85 | H = -2.*((temp'*temp).*SSt_inv); 86 | H = -H; 87 | end 88 | end 89 | 90 | return -------------------------------------------------------------------------------- /RegularizedSC/reg_sparse_coding.m: -------------------------------------------------------------------------------- 1 | function [B, S, stat] = reg_sparse_coding(X, num_bases, Sigma, beta, gamma, num_iters, batch_size, initB, fname_save) 2 | % 3 | % Regularized sparse coding 4 | % 5 | % Inputs 6 | % X -data samples, column wise 7 | % num_bases -number of bases 8 | % Sigma -smoothing matrix for regularization 9 | % beta -smoothing regularization 10 | % gamma -sparsity regularization 11 | % num_iters -number of iterations 12 | % batch_size -batch size 13 | % initB -initial dictionary 14 | % fname_save -file name to save dictionary 15 | % 16 | % Outputs 17 | % B -learned dictionary 18 | % S -sparse codes 19 | % stat -statistics about the training 20 | % 21 | % Written by Jianchao Yang @ IFP UIUC, Sep. 2009. 22 | 23 | pars = struct; 24 | pars.patch_size = size(X,1); 25 | pars.num_patches = size(X,2); 26 | pars.num_bases = num_bases; 27 | pars.num_trials = num_iters; 28 | pars.beta = beta; 29 | pars.gamma = gamma; 30 | pars.VAR_basis = 1; % maximum L2 norm of each dictionary atom 31 | 32 | if ~isa(X, 'double'), 33 | X = cast(X, 'double'); 34 | end 35 | 36 | if isempty(Sigma), 37 | Sigma = eye(pars.num_bases); 38 | end 39 | 40 | if exist('batch_size', 'var') && ~isempty(batch_size) 41 | pars.batch_size = batch_size; 42 | else 43 | pars.batch_size = size(X, 2); 44 | end 45 | 46 | if exist('fname_save', 'var') && ~isempty(fname_save) 47 | pars.filename = fname_save; 48 | else 49 | pars.filename = sprintf('Results/reg_sc_b%d_%s', num_bases, datestr(now, 30)); 50 | end 51 | 52 | pars 53 | 54 | % initialize basis 55 | if ~exist('initB') || isempty(initB) 56 | B = rand(pars.patch_size, pars.num_bases)-0.5; 57 | B = B - repmat(mean(B,1), size(B,1),1); 58 | B = B*diag(1./sqrt(sum(B.*B))); 59 | else 60 | disp('Using initial B...'); 61 | B = initB; 62 | end 63 | 64 | [L M]=size(B); 65 | 66 | t=0; 67 | % statistics variable 68 | stat= []; 69 | stat.fobj_avg = []; 70 | stat.elapsed_time=0; 71 | 72 | % optimization loop 73 | while t < pars.num_trials 74 | t=t+1; 75 | start_time= cputime; 76 | stat.fobj_total=0; 77 | % Take a random permutation of the samples 78 | indperm = randperm(size(X,2)); 79 | 80 | sparsity = []; 81 | 82 | for batch=1:(size(X,2)/pars.batch_size), 83 | % This is data to use for this step 84 | batch_idx = indperm((1:pars.batch_size)+pars.batch_size*(batch-1)); 85 | Xb = X(:,batch_idx); 86 | 87 | % learn coefficients (conjugate gradient) 88 | S = L1QP_FeatureSign_Set(Xb, B, Sigma, pars.beta, pars.gamma); 89 | 90 | sparsity(end+1) = length(find(S(:) ~= 0))/length(S(:)); 91 | 92 | % get objective 93 | [fobj] = getObjective_RegSc(Xb, B, S, Sigma, pars.beta, pars.gamma); 94 | stat.fobj_total = stat.fobj_total + fobj; 95 | % update basis 96 | B = l2ls_learn_basis_dual(Xb, S, pars.VAR_basis); 97 | end 98 | 99 | % get statistics 100 | stat.fobj_avg(t) = stat.fobj_total / pars.num_patches; 101 | stat.elapsed_time(t) = cputime - start_time; 102 | 103 | fprintf(['epoch= %d, sparsity = %f, fobj= %f, took %0.2f ' ... 104 | 'seconds\n'], t, mean(sparsity), stat.fobj_avg(t), stat.elapsed_time(t)); 105 | 106 | % save results 107 | fprintf('saving results ...\n'); 108 | experiment = []; 109 | experiment.matfname = sprintf('%s.mat', pars.filename); 110 | save(experiment.matfname, 't', 'pars', 'B', 'stat'); 111 | fprintf('saved as %s\n', experiment.matfname); 112 | end 113 | 114 | return 115 | 116 | function retval = assert(expr) 117 | retval = true; 118 | if ~expr 119 | error('Assertion failed'); 120 | retval = false; 121 | end 122 | return 123 | -------------------------------------------------------------------------------- /RegularizedSC/regsc.m: -------------------------------------------------------------------------------- 1 | % Regularized sparse coding 2 | % 3 | 4 | clear all; close all; clc; 5 | 6 | addpath('data'); 7 | addpath('L1REG'); 8 | addpath('sc2'); 9 | 10 | load('mnist_patches_100000.mat'); 11 | 12 | nBases = 128; 13 | Sigma = construct_reg_mat(nBases, 'Tikhonov'); 14 | beta = 1e-1; 15 | gamma = 0.1; 16 | num_iters = 50; 17 | 18 | [B, S, stat] = reg_sparse_coding(X_total, nBases, Sigma, beta, gamma, num_iters); 19 | 20 | display_network_nonsqure2(B); -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/entries: -------------------------------------------------------------------------------- 1 | 9 2 | 3 | dir 4 | 119 5 | svn+ssh://samba.ifp.uiuc.edu/homes/jyang29/svn/codes/Hierarchical%20Discriminant%20Sparse%20Coding/sparse_coding_backup1/sparse_coding_backup1/honglaklee/code/sc2 6 | svn+ssh://samba.ifp.uiuc.edu/homes/jyang29/svn/codes 7 | 8 | 9 | 10 | 2009-02-17T22:37:45.240477Z 11 | 1 12 | jyang29 13 | 14 | 15 | svn:special svn:externals svn:needs-lock 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | b58e5d2a-d51e-4df9-a013-8d2d35c93361 28 | 29 | cgf_sc2.dll 30 | file 31 | 32 | 33 | 34 | 35 | 2009-03-06T03:00:13.000000Z 36 | 0f0568440fb4c34ddef5602020c93be7 37 | 2009-02-17T22:37:45.240477Z 38 | 1 39 | jyang29 40 | has-props 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 28672 62 | 63 | nrf 64 | dir 65 | 66 | cgf_sc.c 67 | file 68 | 69 | 70 | 71 | 72 | 2009-03-06T03:00:14.000000Z 73 | 72ab7b8194ea74e258742ec4361a0f4e 74 | 2009-02-17T22:37:45.240477Z 75 | 1 76 | jyang29 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 10303 99 | 100 | cgf_fitS_sc2.m 101 | file 102 | 103 | 104 | 105 | 106 | 2009-03-06T03:00:14.000000Z 107 | 7b18b54cfd2ad637b012858108a46f5f 108 | 2009-02-17T22:37:45.240477Z 109 | 1 110 | jyang29 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 3844 133 | 134 | getObjective2.m 135 | file 136 | 137 | 138 | 139 | 140 | 2009-03-06T03:00:14.000000Z 141 | 14d5ca0e80cfcbbce64f2b1d8fe24f2c 142 | 2009-02-17T22:37:45.240477Z 143 | 1 144 | jyang29 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 1241 167 | 168 | cgf_sc2.mexa64 169 | file 170 | 171 | 172 | 173 | 174 | 2009-03-06T03:00:14.000000Z 175 | 2ea562f646232e823d2cfb5cac8c11f7 176 | 2009-02-17T22:37:45.240477Z 177 | 1 178 | jyang29 179 | has-props 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 26507 201 | 202 | cgf_sc2.mexglx 203 | file 204 | 205 | 206 | 207 | 208 | 2009-03-06T03:00:14.000000Z 209 | 211dd8771987a37a16b23c6929e5f1d0 210 | 2009-02-17T22:37:45.240477Z 211 | 1 212 | jyang29 213 | has-props 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 20889 235 | 236 | makefile.linux 237 | file 238 | 239 | 240 | 241 | 242 | 2009-03-06T03:00:14.000000Z 243 | ccb672e8c8b447f6380f253bc4dffe0c 244 | 2009-02-17T22:37:45.240477Z 245 | 1 246 | jyang29 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 140 269 | 270 | makefile.win32 271 | file 272 | 273 | 274 | 275 | 276 | 2009-03-06T03:00:14.000000Z 277 | 6a8a67fe6797d4d950ad2206ed6e9a67 278 | 2009-02-17T22:37:45.240477Z 279 | 1 280 | jyang29 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 183 303 | 304 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/format: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/prop-base/cgf_sc2.dll.svn-base: -------------------------------------------------------------------------------- 1 | K 13 2 | svn:mime-type 3 | V 24 4 | application/octet-stream 5 | END 6 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/prop-base/cgf_sc2.mexa64.svn-base: -------------------------------------------------------------------------------- 1 | K 13 2 | svn:mime-type 3 | V 24 4 | application/octet-stream 5 | END 6 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/prop-base/cgf_sc2.mexglx.svn-base: -------------------------------------------------------------------------------- 1 | K 13 2 | svn:mime-type 3 | V 24 4 | application/octet-stream 5 | END 6 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/cgf_fitS_sc2.m.svn-base: -------------------------------------------------------------------------------- 1 | function S = cgf_fitS_sc2(A,X, sparsity, noise_var, beta, epsilon, sigma, tol, disp_ocbsol, disp_patnum, disp_stats, Sinit) 2 | % cgf_fitS -- fit internal vars S to the data X using fast congugate gradient 3 | % Usage 4 | % S = cgf_fitS(A,X,noise_var,beta,sigma, 5 | % [tol, disp_ocbsol, disp_patnum, disp_stats]) 6 | % Inputs 7 | % A basis functions 8 | % X data vectors 9 | % noise_var variance of the noise (|x-As|^2) 10 | % beta steepness term for prior 11 | % sigma scaling term for prior 12 | % tol solution tolerance (default 0.001) 13 | % disp_ocbsol display info from the fitting process 14 | % disp_patnum display the pattern number 15 | % disp_stats display summary statistics for the fit 16 | % Outputs 17 | % S the estimated coefficients 18 | 19 | maxiter=100; 20 | 21 | [L,M] = size(A); 22 | N = size(X,2); 23 | 24 | if ~exist('tol','var'); tol = 0.001; end 25 | if ~exist('disp_ocbsol','var'); disp_ocbsol = 0; end 26 | if ~exist('disp_patnum','var'); disp_patnum = 1; end 27 | if ~exist('disp_stats','var'); disp_stats = 1; end 28 | if ~exist('maxiter','var'); maxiter = 8; end 29 | if ~exist('reduction','var'); reduction = 8; end 30 | 31 | % XXX: we don't use initialization for "log" sparsity function because of local optima 32 | if ~exist('Sinit','var') %|| strcmp(sparsity, 'log') || strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 33 | Sinit=A'*X; 34 | normA2=sum(A.*A)'; 35 | for i=1:N 36 | Sinit(:,i)=Sinit(:,i)./normA2; 37 | end 38 | initiated = 0; 39 | else 40 | initiated = 1; 41 | end 42 | 43 | if ~strcmp(sparsity, 'log') && ~strcmp(sparsity, 'huberL1') && ~strcmp(sparsity, ... 44 | 'epsL1') 45 | error('sparsity function is not properly specified!\n'); 46 | end 47 | 48 | lambda=1/noise_var; 49 | 50 | if strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 51 | if ~exist('epsilon','var') || isempty(epsilon) || epsilon==0 52 | error('epsilon was not set properly!\n') 53 | end 54 | end 55 | 56 | S = zeros(M,N); 57 | tic 58 | if ~initiated 59 | if strcmp(sparsity, 'log') 60 | [S niters nf ng] = cgf_sc2(A,X,Sinit,0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 61 | elseif strcmp(sparsity, 'huberL1') 62 | [S niters nf ng] = cgf_sc2(A,X,Sinit,1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 63 | elseif strcmp(sparsity, 'epsL1') 64 | [S niters nf ng] = cgf_sc2(A,X,Sinit,2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 65 | end 66 | else 67 | if strcmp(sparsity, 'log') 68 | [S niters nf ng] = cgf_sc2(A,X,Sinit,0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 69 | elseif strcmp(sparsity, 'huberL1') 70 | [S niters nf ng] = cgf_sc2(A,X,Sinit,1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 71 | elseif strcmp(sparsity, 'epsL1') 72 | [S niters nf ng] = cgf_sc2(A,X,Sinit,2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 73 | end 74 | % for i=1:size(X,2) 75 | % [aa,bb] = sort(abs(Sinit(:,i))); 76 | % bb = flipud(bb); 77 | % active = bb(1:M/reduction); 78 | % if strcmp(sparsity, 'log') 79 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 80 | % elseif strcmp(sparsity, 'huberL1') 81 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 82 | % elseif strcmp(sparsity, 'epsL1') 83 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 84 | % end 85 | % S(active,i) = S2; 86 | % end 87 | % fprintf('%d',reduction); 88 | end 89 | t = toc; 90 | 91 | if (disp_stats) 92 | fprintf(' aits=%6.2f af=%6.2f ag=%6.2f at=%7.4f\n', ... 93 | niters/N, nf/N, ng/N, t/N); 94 | end 95 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/cgf_sc.c.svn-base: -------------------------------------------------------------------------------- 1 | /* 2 | * cgf.c: conj. grad. routine for finding optimal s - fast! 3 | */ 4 | #include 5 | #include 6 | #include "mex.h" 7 | 8 | #define sgn(x) (x>0 ? 1 : (x<0 ? -1 : 0)) 9 | 10 | 11 | extern void cgf(double *Sout, double *nits, double *nf, double *ng, double *Sin, double *X, int npats, double tol, int maxiter, int numflag); 12 | 13 | /* Input & Output Arguments */ 14 | 15 | #define A_IN prhs[0] /* basis matrix */ 16 | #define X_IN prhs[1] /* data vectors */ 17 | #define S_IN prhs[2] /* initial guess for S */ 18 | #define SPARSITY_IN prhs[3] /* initial guess for S */ 19 | #define LAMBDA_IN prhs[4] /* precision */ 20 | #define BETA_IN prhs[5] /* prior steepness */ 21 | #define SIGMA_IN prhs[6] /* scaling parameter for prior */ 22 | #define TOL_IN prhs[7] /* tolerance */ 23 | #define MAXITER_IN prhs[8] /* maximum iterations for dfrpmin */ 24 | #define OUTFLAG_IN prhs[9] /* output flag */ 25 | #define NUMFLAG_IN prhs[10] /* pattern number output flag */ 26 | #define EPSILON_IN prhs[11] /* huber function epsilon */ 27 | 28 | #define S_OUT plhs[0] /* basis coeffs for each data vector */ 29 | #define NITS_OUT plhs[1] /* total iterations done by cg */ 30 | #define NF_OUT plhs[2] /* total P(s|x,A) calcs */ 31 | #define NG_OUT plhs[3] /* total d/ds P(s|x,A) calcs */ 32 | 33 | /* Define indexing macros for matricies */ 34 | 35 | /* L = dimension of input vectors 36 | * M = number of basis functions 37 | */ 38 | 39 | #define A_(i,j) A[(i) + (j)*L] /* A is L x M */ 40 | #define X_(i,n) X[(i) + (n)*L] /* X is L x npats */ 41 | 42 | #define Sout_(i,n) Sout[(i) + (n)*M] /* S is M x npats */ 43 | #define Sin_(i,n) Sin[(i) + (n)*M] /* S is M x npats */ 44 | 45 | #define AtA_(i,j) AtA[(i) + (j)*M] /* AtA is M x M */ 46 | 47 | /* Globals for using with frprmin */ 48 | 49 | static double *A; /* basis matrix */ 50 | static int L; /* data dimension */ 51 | static int M; /* number of basis vectors */ 52 | static double lambda; /* 1/noise_var */ 53 | static double beta; /* prior steepness */ 54 | static double sigma; /* prior scaling */ 55 | static double k1, k2, k3; /* precomputed constants for f1dim */ 56 | 57 | static double *x; /* current data vector being fitted */ 58 | static double *s0; /* init coefficient vector (1:M) */ 59 | static double *d; /* search dir. coefficient vector (1:M) */ 60 | static int outflag; /* print search progress */ 61 | 62 | static double *AtA; /* Only compute A'*A once (1:M,1:M) */ 63 | static double *Atx; /* A*x (1:M) */ 64 | 65 | static int fcount, gcount; 66 | 67 | #define SP_LOG 0 68 | #define SP_HUBER_L1 1 69 | #define SP_EPS_L1 2 70 | static int g_sparsity_func; 71 | static double g_epsilon; /* use global variable for huber function epsilon */ 72 | 73 | static void init_global_arrays() 74 | { 75 | int i, j, k; 76 | double *Ai, *Aj, sum; 77 | 78 | x = (double *) malloc(L * sizeof(double)); 79 | s0 = (double *) malloc(M * sizeof(double)); 80 | d = (double *) malloc(M * sizeof(double)); 81 | AtA = (double *) malloc(M * M * sizeof(double)); 82 | Atx = (double *) malloc(M * sizeof(double)); 83 | 84 | /* Calc A'*A */ 85 | for (i = 0; i < M; i++) { 86 | Ai = A + i * L; 87 | for (j = 0; j < M; j++) { 88 | Aj = A + j * L; 89 | sum = 0.0; 90 | for (k = 0; k < L; k++) { 91 | sum += Ai[k] * Aj[k]; 92 | } 93 | AtA_(i, j) = sum; 94 | } 95 | } 96 | } 97 | 98 | static void free_global_arrays() 99 | { 100 | 101 | free((double *) x); 102 | free((double *) s0); 103 | free((double *) d); 104 | free((double *) AtA); 105 | free((double *) Atx); 106 | } 107 | 108 | 109 | 110 | float init_f1dim(s1, d1) 111 | float *s1, *d1; 112 | { 113 | register int i, j; 114 | register double As, Ag, sum; 115 | register float fval; 116 | extern double sparse(); 117 | 118 | for (i = 0; i < M; i++) { 119 | s0[i] = s1[i + 1]; 120 | d[i] = d1[i + 1]; 121 | } 122 | k1 = k2 = k3 = 0; 123 | for (i = 0; i < L; i++) { 124 | As = Ag = 0; 125 | for (j = 0; j < M; j++) { 126 | As += A_(i, j) * s0[j]; 127 | Ag += A_(i, j) * d[j]; 128 | } 129 | k1 += As * (As - 2 * x[i]); 130 | k2 += Ag * (As - x[i]); 131 | k3 += Ag * Ag; 132 | } 133 | k1 *= 0.5 * lambda; 134 | k2 *= lambda; 135 | k3 *= 0.5 * lambda; 136 | 137 | fval = k1; 138 | 139 | sum = 0; 140 | for (i = 0; i < M; i++) 141 | sum += sparse(s0[i] / sigma); 142 | fval += beta * sum; 143 | 144 | fcount++; 145 | 146 | return (fval); 147 | } 148 | 149 | float f1dim(alpha) 150 | float alpha; 151 | { 152 | int i; 153 | double sum; 154 | float fval; 155 | extern double sparse(); 156 | 157 | fval = k1 + (k2 + k3 * alpha) * alpha; 158 | 159 | sum = 0; 160 | for (i = 0; i < M; i++) { 161 | sum += sparse((s0[i] + alpha * d[i]) / sigma); 162 | } 163 | fval += beta * sum; 164 | 165 | fcount++; 166 | 167 | return (fval); 168 | } 169 | 170 | 171 | /* 172 | * Gradient evaluation used by conj grad descent 173 | */ 174 | void dfunc(p, grad) 175 | float *p, *grad; 176 | { 177 | register int i, j; 178 | register double sum, *cptr, bos = beta / sigma; 179 | register float *p1; 180 | extern double sparse_prime(); 181 | 182 | p1 = &p[1]; 183 | 184 | for (i = 0; i < M; i++) { 185 | cptr = AtA + i * M; 186 | sum = 0; 187 | for (j = 0; j < M; j++) { 188 | sum += p1[j] * *cptr++; 189 | } 190 | grad[i + 1] = lambda * (sum - Atx[i]) + bos * sparse_prime((double) p1[i] / sigma); 191 | } 192 | gcount++; 193 | } 194 | 195 | double sparse(x) 196 | double x; 197 | { 198 | if (g_sparsity_func== SP_LOG) { 199 | return (log(1.0 + x * x)); 200 | } else if (g_sparsity_func== SP_HUBER_L1) { 201 | /* retval(idx_in) = 1/(2*eps).*x(idx_in).^2; 202 | retval(idx_out) = 1/2.*(2.*abs(x(idx_out))-eps); */ 203 | if (fabs(x) < g_epsilon) 204 | return x*x/(2.0*g_epsilon); /*1.0/(2.0*g_epsilon)* x*x;*/ 205 | else 206 | return (2*abs(x)-g_epsilon)/2.0; /*1.0/2.0* (2*abs(x)-g_epsilon);*/ 207 | } else if (g_sparsity_func== SP_EPS_L1) { 208 | return (sqrt(x * x + g_epsilon)); 209 | } 210 | 211 | fprintf(stderr, "Error: sparsity function is not properly specified!\n"); 212 | exit(-1); 213 | } 214 | 215 | double sparse_prime(x) 216 | double x; 217 | { 218 | if (g_sparsity_func== SP_LOG) { 219 | return (2 * x / (1.0 + x * x)); 220 | } else if (g_sparsity_func== SP_HUBER_L1) { 221 | /* retval(idx_in) = 1/(2*eps).* 2.0.*x(idx_in); 222 | retval(idx_out) = 1/2.* 2.*sign(x(idx_out)); */ 223 | if (fabs(x) < g_epsilon) 224 | return x/ g_epsilon; /*1.0/(2.0*g_epsilon)* 2.0*x;*/ 225 | else 226 | return sgn(x); 227 | } else if (g_sparsity_func== SP_EPS_L1) { 228 | return x/sqrt(x * x + g_epsilon); 229 | } 230 | 231 | fprintf(stderr, "Error: sparsity function is not properly specified!\n"); 232 | exit(-2); 233 | 234 | } 235 | 236 | void iter_do() 237 | { 238 | } 239 | 240 | 241 | #include 242 | extern int ITMAX; 243 | 244 | void cgf(double *Sout, double *nits, double *nf, double *ng, double *Sin, double *X, int npats, double tol, int maxiter, int numflag) 245 | { 246 | double sum; 247 | float fret; 248 | int niter, l, m, n; 249 | float *p; 250 | 251 | *nits = *nf = *ng = 0.0; 252 | ITMAX = 10; 253 | 254 | init_global_arrays(); 255 | p = vector(1, M); 256 | 257 | for (n = 0; n < npats; n++) { 258 | if (numflag) { 259 | fprintf(stderr, "\r%d", n + 1); 260 | fflush(stderr); 261 | } 262 | 263 | for (l = 0; l < L; l++) { 264 | x[l] = X_(l, n); 265 | } 266 | 267 | for (m = 0; m < M; m++) { 268 | 269 | /* precompute Atx for this pattern */ 270 | sum = 0.0; 271 | for (l = 0; l < L; l++) { 272 | sum += A_(l, m) * x[l]; 273 | } 274 | Atx[m] = sum; 275 | 276 | /* copy initial guess */ 277 | p[m + 1] = Sin_(m, n); 278 | } 279 | 280 | fcount = gcount = 0; 281 | 282 | frprmn(p, M, (float) tol, &niter, &fret, init_f1dim, f1dim, dfunc); 283 | 284 | *nits += (double) niter; 285 | *nf += (double) fcount; 286 | *ng += (double) gcount; 287 | 288 | if (outflag) { 289 | fprintf(stdout, "\nfret=%f niters=%d fcount=%d gcount=%d\n", fret, niter, fcount, gcount); 290 | fflush(stdout); 291 | } 292 | 293 | /* copy back solution */ 294 | for (m = 0; m < M; m++) { 295 | Sout_(m, n) = p[m + 1]; 296 | } 297 | } 298 | 299 | free_global_arrays(); 300 | free_vector(p, 1, n); 301 | } 302 | 303 | 304 | void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) 305 | { 306 | double *Sout, nits = 0, nf = 0, ng = 0, *Sin; 307 | double *X, tol; 308 | int maxiter, npats, numflag, i; 309 | 310 | /* Check for proper number of arguments */ 311 | 312 | if (nrhs < 7) { 313 | mexErrMsgTxt("cgf requires 6 input arguments."); 314 | } else if (nlhs < 1) { 315 | mexErrMsgTxt("cgf requires 1 output argument."); 316 | } 317 | 318 | /* Assign pointers to the various parameters */ 319 | 320 | A = mxGetPr(A_IN); 321 | X = mxGetPr(X_IN); 322 | Sin = mxGetPr(S_IN); 323 | g_sparsity_func = mxGetScalar(SPARSITY_IN); 324 | lambda = mxGetScalar(LAMBDA_IN); 325 | beta = mxGetScalar(BETA_IN); 326 | sigma = mxGetScalar(SIGMA_IN); 327 | 328 | /*fprintf(stderr,"--------------\n"); 329 | fprintf(stderr,"g_sparsity_func = %d\n", g_sparsity_func); 330 | fprintf(stderr,"lambda = %f\n", lambda); 331 | fprintf(stderr,"beta = %f\n", beta); 332 | fprintf(stderr,"sigma = %f\n", sigma); 333 | */ 334 | 335 | 336 | if (nrhs < 8) { 337 | tol = 0.1; 338 | } else { 339 | tol = mxGetScalar(TOL_IN); 340 | } 341 | 342 | if (nrhs < 9) { 343 | maxiter = 100; 344 | } else { 345 | maxiter = (int) mxGetScalar(MAXITER_IN); 346 | } 347 | 348 | if (nrhs < 10) { 349 | outflag = 0; 350 | } else { 351 | outflag = (int) mxGetScalar(OUTFLAG_IN); 352 | } 353 | 354 | if (nrhs < 11) { 355 | numflag = 0; 356 | } else { 357 | numflag = (int) mxGetScalar(NUMFLAG_IN); 358 | } 359 | 360 | /* This is only for sparsity type = SP_HUBER_L1, SP_EPS_L1 */ 361 | if (nrhs < 12) { 362 | g_epsilon = 0.5; 363 | } else { 364 | g_epsilon = mxGetScalar(EPSILON_IN); 365 | } 366 | 367 | 368 | L = (int) mxGetM(A_IN); 369 | M = (int) mxGetN(A_IN); 370 | npats = (int) mxGetN(X_IN); 371 | 372 | /* Create a matrix for the return argument */ 373 | 374 | S_OUT = mxCreateDoubleMatrix(M, npats, mxREAL); 375 | Sout = mxGetPr(S_OUT); 376 | 377 | if (nlhs > 1) { 378 | NITS_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 379 | } 380 | if (nlhs > 2) { 381 | NF_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 382 | } 383 | if (nlhs > 3) { 384 | NG_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 385 | } 386 | 387 | /* Do the actual computations in a subroutine */ 388 | 389 | cgf(Sout, &nits, &nf, &ng, Sin, X, npats, tol, maxiter, numflag); 390 | 391 | if (nlhs > 1) { 392 | *(mxGetPr(NITS_OUT)) = nits; 393 | } 394 | if (nlhs > 2) { 395 | *(mxGetPr(NF_OUT)) = nf; 396 | } 397 | if (nlhs > 3) { 398 | *(mxGetPr(NG_OUT)) = ng; 399 | } 400 | } 401 | 402 | #undef A_ 403 | #undef X_ 404 | #undef Sout_ 405 | #undef Sin_ 406 | #undef AtA_ 407 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/cgf_sc2.dll.svn-base: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/.svn/text-base/cgf_sc2.dll.svn-base -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/cgf_sc2.mexa64.svn-base: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/.svn/text-base/cgf_sc2.mexa64.svn-base -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/cgf_sc2.mexglx.svn-base: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/.svn/text-base/cgf_sc2.mexglx.svn-base -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/getObjective2.m.svn-base: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, fsparsity] = getObjective2(A, S, X, sparsity, noise_var, beta, sigma, epsilon) 2 | 3 | 4 | if ~strcmp(sparsity, 'log') && ~strcmp(sparsity, 'huberL1') && ~strcmp(sparsity,'epsL1') && ... 5 | ~strcmp(sparsity,'FS') && ~strcmp(sparsity, 'L1') && ~strcmp(sparsity,'LARS') && ... 6 | ~strcmp(sparsity, 'trueL1') && ~strcmp(sparsity, 'logpos') 7 | error('sparsity function is not properly specified!\n'); 8 | end 9 | 10 | if strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 11 | if ~exist('epsilon','var') || isempty(epsilon) || epsilon==0 12 | error('epsilon was not set properly!\n') 13 | end 14 | end 15 | 16 | 17 | E = A*S - X; 18 | lambda=1/noise_var; 19 | fresidue = 0.5*lambda*sum(sum(E.^2)); 20 | 21 | if strcmp(sparsity, 'log') 22 | fsparsity = beta*sum(sum(log(1+(S/sigma).^2))); 23 | elseif strcmp(sparsity, 'huberL1') 24 | fsparsity = beta*sum(sum(huber_func(S/sigma, epsilon))); 25 | elseif strcmp(sparsity, 'epsL1') 26 | fsparsity = beta*sum(sum(sqrt(epsilon+(S/sigma).^2))); 27 | elseif strcmp(sparsity, 'L1') | strcmp(sparsity, 'LARS') | strcmp(sparsity, 'trueL1') | strcmp(sparsity, 'FS') 28 | fsparsity = beta*sum(sum(abs(S/sigma))); 29 | elseif strcmp(sparsity, 'logpos') 30 | fsparsity = beta*sum(sum(log(1+(S/sigma)))); 31 | end 32 | 33 | fobj = fresidue + fsparsity; 34 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/makefile.linux.svn-base: -------------------------------------------------------------------------------- 1 | MEX = mex 2 | NRFDIR = ./nrf 3 | 4 | all: cgf_sc.c 5 | make -C nrf -f makefile.linux 6 | $(MEX) -I$(NRFDIR) -L$(NRFDIR) -lnrfopt cgf_sc.c -o cgf_sc2 7 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/.svn/text-base/makefile.win32.svn-base: -------------------------------------------------------------------------------- 1 | MEX = C:\MATLAB6p5\bin\win32\mex.bat #mex 2 | NRFDIR = ./nrf 3 | MEXT = dll 4 | 5 | all: cgf_sc.c 6 | make -C nrf -f makefile.win32 7 | $(MEX) -I$(NRFDIR) cgf_sc.c nrf/libnrfopt.a -output cgf_sc2 8 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/cgf_fitS_sc2.m: -------------------------------------------------------------------------------- 1 | function S = cgf_fitS_sc2(A,X, sparsity, noise_var, beta, epsilon, sigma, tol, disp_ocbsol, disp_patnum, disp_stats, Sinit) 2 | % cgf_fitS -- fit internal vars S to the data X using fast congugate gradient 3 | % Usage 4 | % S = cgf_fitS(A,X,noise_var,beta,sigma, 5 | % [tol, disp_ocbsol, disp_patnum, disp_stats]) 6 | % Inputs 7 | % A basis functions 8 | % X data vectors 9 | % noise_var variance of the noise (|x-As|^2) 10 | % beta steepness term for prior 11 | % sigma scaling term for prior 12 | % tol solution tolerance (default 0.001) 13 | % disp_ocbsol display info from the fitting process 14 | % disp_patnum display the pattern number 15 | % disp_stats display summary statistics for the fit 16 | % Outputs 17 | % S the estimated coefficients 18 | 19 | maxiter=100; 20 | 21 | [L,M] = size(A); 22 | N = size(X,2); 23 | 24 | if ~exist('tol','var'); tol = 0.001; end 25 | if ~exist('disp_ocbsol','var'); disp_ocbsol = 0; end 26 | if ~exist('disp_patnum','var'); disp_patnum = 1; end 27 | if ~exist('disp_stats','var'); disp_stats = 1; end 28 | if ~exist('maxiter','var'); maxiter = 8; end 29 | if ~exist('reduction','var'); reduction = 8; end 30 | 31 | % XXX: we don't use initialization for "log" sparsity function because of local optima 32 | if ~exist('Sinit','var') %|| strcmp(sparsity, 'log') || strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 33 | Sinit=A'*X; 34 | normA2=sum(A.*A)'; 35 | for i=1:N 36 | Sinit(:,i)=Sinit(:,i)./normA2; 37 | end 38 | initiated = 0; 39 | else 40 | initiated = 1; 41 | end 42 | 43 | if ~strcmp(sparsity, 'log') && ~strcmp(sparsity, 'huberL1') && ~strcmp(sparsity, ... 44 | 'epsL1') 45 | error('sparsity function is not properly specified!\n'); 46 | end 47 | 48 | lambda=1/noise_var; 49 | 50 | if strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 51 | if ~exist('epsilon','var') || isempty(epsilon) || epsilon==0 52 | error('epsilon was not set properly!\n') 53 | end 54 | end 55 | 56 | S = zeros(M,N); 57 | tic 58 | if ~initiated 59 | if strcmp(sparsity, 'log') 60 | [S niters nf ng] = cgf_sc2(A,X,Sinit,0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 61 | elseif strcmp(sparsity, 'huberL1') 62 | [S niters nf ng] = cgf_sc2(A,X,Sinit,1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 63 | elseif strcmp(sparsity, 'epsL1') 64 | [S niters nf ng] = cgf_sc2(A,X,Sinit,2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 65 | end 66 | else 67 | if strcmp(sparsity, 'log') 68 | [S niters nf ng] = cgf_sc2(A,X,Sinit,0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 69 | elseif strcmp(sparsity, 'huberL1') 70 | [S niters nf ng] = cgf_sc2(A,X,Sinit,1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 71 | elseif strcmp(sparsity, 'epsL1') 72 | [S niters nf ng] = cgf_sc2(A,X,Sinit,2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 73 | end 74 | % for i=1:size(X,2) 75 | % [aa,bb] = sort(abs(Sinit(:,i))); 76 | % bb = flipud(bb); 77 | % active = bb(1:M/reduction); 78 | % if strcmp(sparsity, 'log') 79 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),0,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum); 80 | % elseif strcmp(sparsity, 'huberL1') 81 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),1,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 82 | % elseif strcmp(sparsity, 'epsL1') 83 | % [S2 niters nf ng] = cgf_sc2(A(:,active),X(:,i),Sinit(:,i),2,lambda,beta,sigma,tol,maxiter, disp_ocbsol,disp_patnum, epsilon); 84 | % end 85 | % S(active,i) = S2; 86 | % end 87 | % fprintf('%d',reduction); 88 | end 89 | t = toc; 90 | 91 | if (disp_stats) 92 | fprintf(' aits=%6.2f af=%6.2f ag=%6.2f at=%7.4f\n', ... 93 | niters/N, nf/N, ng/N, t/N); 94 | end 95 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/cgf_sc.c: -------------------------------------------------------------------------------- 1 | /* 2 | * cgf.c: conj. grad. routine for finding optimal s - fast! 3 | */ 4 | #include 5 | #include 6 | #include "mex.h" 7 | 8 | #define sgn(x) (x>0 ? 1 : (x<0 ? -1 : 0)) 9 | 10 | 11 | extern void cgf(double *Sout, double *nits, double *nf, double *ng, double *Sin, double *X, int npats, double tol, int maxiter, int numflag); 12 | 13 | /* Input & Output Arguments */ 14 | 15 | #define A_IN prhs[0] /* basis matrix */ 16 | #define X_IN prhs[1] /* data vectors */ 17 | #define S_IN prhs[2] /* initial guess for S */ 18 | #define SPARSITY_IN prhs[3] /* initial guess for S */ 19 | #define LAMBDA_IN prhs[4] /* precision */ 20 | #define BETA_IN prhs[5] /* prior steepness */ 21 | #define SIGMA_IN prhs[6] /* scaling parameter for prior */ 22 | #define TOL_IN prhs[7] /* tolerance */ 23 | #define MAXITER_IN prhs[8] /* maximum iterations for dfrpmin */ 24 | #define OUTFLAG_IN prhs[9] /* output flag */ 25 | #define NUMFLAG_IN prhs[10] /* pattern number output flag */ 26 | #define EPSILON_IN prhs[11] /* huber function epsilon */ 27 | 28 | #define S_OUT plhs[0] /* basis coeffs for each data vector */ 29 | #define NITS_OUT plhs[1] /* total iterations done by cg */ 30 | #define NF_OUT plhs[2] /* total P(s|x,A) calcs */ 31 | #define NG_OUT plhs[3] /* total d/ds P(s|x,A) calcs */ 32 | 33 | /* Define indexing macros for matricies */ 34 | 35 | /* L = dimension of input vectors 36 | * M = number of basis functions 37 | */ 38 | 39 | #define A_(i,j) A[(i) + (j)*L] /* A is L x M */ 40 | #define X_(i,n) X[(i) + (n)*L] /* X is L x npats */ 41 | 42 | #define Sout_(i,n) Sout[(i) + (n)*M] /* S is M x npats */ 43 | #define Sin_(i,n) Sin[(i) + (n)*M] /* S is M x npats */ 44 | 45 | #define AtA_(i,j) AtA[(i) + (j)*M] /* AtA is M x M */ 46 | 47 | /* Globals for using with frprmin */ 48 | 49 | static double *A; /* basis matrix */ 50 | static int L; /* data dimension */ 51 | static int M; /* number of basis vectors */ 52 | static double lambda; /* 1/noise_var */ 53 | static double beta; /* prior steepness */ 54 | static double sigma; /* prior scaling */ 55 | static double k1, k2, k3; /* precomputed constants for f1dim */ 56 | 57 | static double *x; /* current data vector being fitted */ 58 | static double *s0; /* init coefficient vector (1:M) */ 59 | static double *d; /* search dir. coefficient vector (1:M) */ 60 | static int outflag; /* print search progress */ 61 | 62 | static double *AtA; /* Only compute A'*A once (1:M,1:M) */ 63 | static double *Atx; /* A*x (1:M) */ 64 | 65 | static int fcount, gcount; 66 | 67 | #define SP_LOG 0 68 | #define SP_HUBER_L1 1 69 | #define SP_EPS_L1 2 70 | static int g_sparsity_func; 71 | static double g_epsilon; /* use global variable for huber function epsilon */ 72 | 73 | static void init_global_arrays() 74 | { 75 | int i, j, k; 76 | double *Ai, *Aj, sum; 77 | 78 | x = (double *) malloc(L * sizeof(double)); 79 | s0 = (double *) malloc(M * sizeof(double)); 80 | d = (double *) malloc(M * sizeof(double)); 81 | AtA = (double *) malloc(M * M * sizeof(double)); 82 | Atx = (double *) malloc(M * sizeof(double)); 83 | 84 | /* Calc A'*A */ 85 | for (i = 0; i < M; i++) { 86 | Ai = A + i * L; 87 | for (j = 0; j < M; j++) { 88 | Aj = A + j * L; 89 | sum = 0.0; 90 | for (k = 0; k < L; k++) { 91 | sum += Ai[k] * Aj[k]; 92 | } 93 | AtA_(i, j) = sum; 94 | } 95 | } 96 | } 97 | 98 | static void free_global_arrays() 99 | { 100 | 101 | free((double *) x); 102 | free((double *) s0); 103 | free((double *) d); 104 | free((double *) AtA); 105 | free((double *) Atx); 106 | } 107 | 108 | 109 | 110 | float init_f1dim(s1, d1) 111 | float *s1, *d1; 112 | { 113 | register int i, j; 114 | register double As, Ag, sum; 115 | register float fval; 116 | extern double sparse(); 117 | 118 | for (i = 0; i < M; i++) { 119 | s0[i] = s1[i + 1]; 120 | d[i] = d1[i + 1]; 121 | } 122 | k1 = k2 = k3 = 0; 123 | for (i = 0; i < L; i++) { 124 | As = Ag = 0; 125 | for (j = 0; j < M; j++) { 126 | As += A_(i, j) * s0[j]; 127 | Ag += A_(i, j) * d[j]; 128 | } 129 | k1 += As * (As - 2 * x[i]); 130 | k2 += Ag * (As - x[i]); 131 | k3 += Ag * Ag; 132 | } 133 | k1 *= 0.5 * lambda; 134 | k2 *= lambda; 135 | k3 *= 0.5 * lambda; 136 | 137 | fval = k1; 138 | 139 | sum = 0; 140 | for (i = 0; i < M; i++) 141 | sum += sparse(s0[i] / sigma); 142 | fval += beta * sum; 143 | 144 | fcount++; 145 | 146 | return (fval); 147 | } 148 | 149 | float f1dim(alpha) 150 | float alpha; 151 | { 152 | int i; 153 | double sum; 154 | float fval; 155 | extern double sparse(); 156 | 157 | fval = k1 + (k2 + k3 * alpha) * alpha; 158 | 159 | sum = 0; 160 | for (i = 0; i < M; i++) { 161 | sum += sparse((s0[i] + alpha * d[i]) / sigma); 162 | } 163 | fval += beta * sum; 164 | 165 | fcount++; 166 | 167 | return (fval); 168 | } 169 | 170 | 171 | /* 172 | * Gradient evaluation used by conj grad descent 173 | */ 174 | void dfunc(p, grad) 175 | float *p, *grad; 176 | { 177 | register int i, j; 178 | register double sum, *cptr, bos = beta / sigma; 179 | register float *p1; 180 | extern double sparse_prime(); 181 | 182 | p1 = &p[1]; 183 | 184 | for (i = 0; i < M; i++) { 185 | cptr = AtA + i * M; 186 | sum = 0; 187 | for (j = 0; j < M; j++) { 188 | sum += p1[j] * *cptr++; 189 | } 190 | grad[i + 1] = lambda * (sum - Atx[i]) + bos * sparse_prime((double) p1[i] / sigma); 191 | } 192 | gcount++; 193 | } 194 | 195 | double sparse(x) 196 | double x; 197 | { 198 | if (g_sparsity_func== SP_LOG) { 199 | return (log(1.0 + x * x)); 200 | } else if (g_sparsity_func== SP_HUBER_L1) { 201 | /* retval(idx_in) = 1/(2*eps).*x(idx_in).^2; 202 | retval(idx_out) = 1/2.*(2.*abs(x(idx_out))-eps); */ 203 | if (fabs(x) < g_epsilon) 204 | return x*x/(2.0*g_epsilon); /*1.0/(2.0*g_epsilon)* x*x;*/ 205 | else 206 | return (2*abs(x)-g_epsilon)/2.0; /*1.0/2.0* (2*abs(x)-g_epsilon);*/ 207 | } else if (g_sparsity_func== SP_EPS_L1) { 208 | return (sqrt(x * x + g_epsilon)); 209 | } 210 | 211 | fprintf(stderr, "Error: sparsity function is not properly specified!\n"); 212 | exit(-1); 213 | } 214 | 215 | double sparse_prime(x) 216 | double x; 217 | { 218 | if (g_sparsity_func== SP_LOG) { 219 | return (2 * x / (1.0 + x * x)); 220 | } else if (g_sparsity_func== SP_HUBER_L1) { 221 | /* retval(idx_in) = 1/(2*eps).* 2.0.*x(idx_in); 222 | retval(idx_out) = 1/2.* 2.*sign(x(idx_out)); */ 223 | if (fabs(x) < g_epsilon) 224 | return x/ g_epsilon; /*1.0/(2.0*g_epsilon)* 2.0*x;*/ 225 | else 226 | return sgn(x); 227 | } else if (g_sparsity_func== SP_EPS_L1) { 228 | return x/sqrt(x * x + g_epsilon); 229 | } 230 | 231 | fprintf(stderr, "Error: sparsity function is not properly specified!\n"); 232 | exit(-2); 233 | 234 | } 235 | 236 | void iter_do() 237 | { 238 | } 239 | 240 | 241 | #include 242 | extern int ITMAX; 243 | 244 | void cgf(double *Sout, double *nits, double *nf, double *ng, double *Sin, double *X, int npats, double tol, int maxiter, int numflag) 245 | { 246 | double sum; 247 | float fret; 248 | int niter, l, m, n; 249 | float *p; 250 | 251 | *nits = *nf = *ng = 0.0; 252 | ITMAX = 10; 253 | 254 | init_global_arrays(); 255 | p = vector(1, M); 256 | 257 | for (n = 0; n < npats; n++) { 258 | if (numflag) { 259 | fprintf(stderr, "\r%d", n + 1); 260 | fflush(stderr); 261 | } 262 | 263 | for (l = 0; l < L; l++) { 264 | x[l] = X_(l, n); 265 | } 266 | 267 | for (m = 0; m < M; m++) { 268 | 269 | /* precompute Atx for this pattern */ 270 | sum = 0.0; 271 | for (l = 0; l < L; l++) { 272 | sum += A_(l, m) * x[l]; 273 | } 274 | Atx[m] = sum; 275 | 276 | /* copy initial guess */ 277 | p[m + 1] = Sin_(m, n); 278 | } 279 | 280 | fcount = gcount = 0; 281 | 282 | frprmn(p, M, (float) tol, &niter, &fret, init_f1dim, f1dim, dfunc); 283 | 284 | *nits += (double) niter; 285 | *nf += (double) fcount; 286 | *ng += (double) gcount; 287 | 288 | if (outflag) { 289 | fprintf(stdout, "\nfret=%f niters=%d fcount=%d gcount=%d\n", fret, niter, fcount, gcount); 290 | fflush(stdout); 291 | } 292 | 293 | /* copy back solution */ 294 | for (m = 0; m < M; m++) { 295 | Sout_(m, n) = p[m + 1]; 296 | } 297 | } 298 | 299 | free_global_arrays(); 300 | free_vector(p, 1, n); 301 | } 302 | 303 | 304 | void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) 305 | { 306 | double *Sout, nits = 0, nf = 0, ng = 0, *Sin; 307 | double *X, tol; 308 | int maxiter, npats, numflag, i; 309 | 310 | /* Check for proper number of arguments */ 311 | 312 | if (nrhs < 7) { 313 | mexErrMsgTxt("cgf requires 6 input arguments."); 314 | } else if (nlhs < 1) { 315 | mexErrMsgTxt("cgf requires 1 output argument."); 316 | } 317 | 318 | /* Assign pointers to the various parameters */ 319 | 320 | A = mxGetPr(A_IN); 321 | X = mxGetPr(X_IN); 322 | Sin = mxGetPr(S_IN); 323 | g_sparsity_func = mxGetScalar(SPARSITY_IN); 324 | lambda = mxGetScalar(LAMBDA_IN); 325 | beta = mxGetScalar(BETA_IN); 326 | sigma = mxGetScalar(SIGMA_IN); 327 | 328 | /*fprintf(stderr,"--------------\n"); 329 | fprintf(stderr,"g_sparsity_func = %d\n", g_sparsity_func); 330 | fprintf(stderr,"lambda = %f\n", lambda); 331 | fprintf(stderr,"beta = %f\n", beta); 332 | fprintf(stderr,"sigma = %f\n", sigma); 333 | */ 334 | 335 | 336 | if (nrhs < 8) { 337 | tol = 0.1; 338 | } else { 339 | tol = mxGetScalar(TOL_IN); 340 | } 341 | 342 | if (nrhs < 9) { 343 | maxiter = 100; 344 | } else { 345 | maxiter = (int) mxGetScalar(MAXITER_IN); 346 | } 347 | 348 | if (nrhs < 10) { 349 | outflag = 0; 350 | } else { 351 | outflag = (int) mxGetScalar(OUTFLAG_IN); 352 | } 353 | 354 | if (nrhs < 11) { 355 | numflag = 0; 356 | } else { 357 | numflag = (int) mxGetScalar(NUMFLAG_IN); 358 | } 359 | 360 | /* This is only for sparsity type = SP_HUBER_L1, SP_EPS_L1 */ 361 | if (nrhs < 12) { 362 | g_epsilon = 0.5; 363 | } else { 364 | g_epsilon = mxGetScalar(EPSILON_IN); 365 | } 366 | 367 | 368 | L = (int) mxGetM(A_IN); 369 | M = (int) mxGetN(A_IN); 370 | npats = (int) mxGetN(X_IN); 371 | 372 | /* Create a matrix for the return argument */ 373 | 374 | S_OUT = mxCreateDoubleMatrix(M, npats, mxREAL); 375 | Sout = mxGetPr(S_OUT); 376 | 377 | if (nlhs > 1) { 378 | NITS_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 379 | } 380 | if (nlhs > 2) { 381 | NF_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 382 | } 383 | if (nlhs > 3) { 384 | NG_OUT = mxCreateDoubleMatrix(1, 1, mxREAL); 385 | } 386 | 387 | /* Do the actual computations in a subroutine */ 388 | 389 | cgf(Sout, &nits, &nf, &ng, Sin, X, npats, tol, maxiter, numflag); 390 | 391 | if (nlhs > 1) { 392 | *(mxGetPr(NITS_OUT)) = nits; 393 | } 394 | if (nlhs > 2) { 395 | *(mxGetPr(NF_OUT)) = nf; 396 | } 397 | if (nlhs > 3) { 398 | *(mxGetPr(NG_OUT)) = ng; 399 | } 400 | } 401 | 402 | #undef A_ 403 | #undef X_ 404 | #undef Sout_ 405 | #undef Sin_ 406 | #undef AtA_ 407 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/cgf_sc2.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/cgf_sc2.dll -------------------------------------------------------------------------------- /RegularizedSC/sc2/cgf_sc2.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/cgf_sc2.mexa64 -------------------------------------------------------------------------------- /RegularizedSC/sc2/cgf_sc2.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tingfengainiaini/sparseCodingSuperResolution/f3a46d599507916037718e54b04f211786a0e431/RegularizedSC/sc2/cgf_sc2.mexglx -------------------------------------------------------------------------------- /RegularizedSC/sc2/getObjective.asv: -------------------------------------------------------------------------------- 1 | function [fobj] = getObjective(A, S, X, beta) 2 | 3 | E = A*S - X; 4 | fobj= sum(sum(E.^2)); 5 | 6 | E = beta*sum(sum(S.^2)); 7 | 8 | 9 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/getObjective2.m: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, fsparsity] = getObjective2(A, S, X, sparsity, noise_var, beta, sigma, epsilon) 2 | 3 | 4 | if ~strcmp(sparsity, 'log') && ~strcmp(sparsity, 'huberL1') && ~strcmp(sparsity,'epsL1') && ... 5 | ~strcmp(sparsity,'FS') && ~strcmp(sparsity, 'L1') && ~strcmp(sparsity,'LARS') && ... 6 | ~strcmp(sparsity, 'trueL1') && ~strcmp(sparsity, 'logpos') 7 | error('sparsity function is not properly specified!\n'); 8 | end 9 | 10 | if strcmp(sparsity, 'huberL1') || strcmp(sparsity, 'epsL1') 11 | if ~exist('epsilon','var') || isempty(epsilon) || epsilon==0 12 | error('epsilon was not set properly!\n') 13 | end 14 | end 15 | 16 | 17 | E = A*S - X; 18 | lambda=1/noise_var; 19 | fresidue = 0.5*lambda*sum(sum(E.^2)); 20 | 21 | if strcmp(sparsity, 'log') 22 | fsparsity = beta*sum(sum(log(1+(S/sigma).^2))); 23 | elseif strcmp(sparsity, 'huberL1') 24 | fsparsity = beta*sum(sum(huber_func(S/sigma, epsilon))); 25 | elseif strcmp(sparsity, 'epsL1') 26 | fsparsity = beta*sum(sum(sqrt(epsilon+(S/sigma).^2))); 27 | elseif strcmp(sparsity, 'L1') | strcmp(sparsity, 'LARS') | strcmp(sparsity, 'trueL1') | strcmp(sparsity, 'FS') 28 | fsparsity = beta*sum(sum(abs(S/sigma))); 29 | elseif strcmp(sparsity, 'logpos') 30 | fsparsity = beta*sum(sum(log(1+(S/sigma)))); 31 | end 32 | 33 | fobj = fresidue + fsparsity; 34 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/getObjective3.m: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, fsparsity] = getObjective3(A, S, X, beta) 2 | 3 | E = A*S - X; 4 | fresidue = 0.5*sum(sum(E.^2)); 5 | fsparsity = beta*sum(sum(abs(S))); 6 | 7 | fobj = fresidue + fsparsity; 8 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/getObjective_knn.m: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, freg] = getObjective_knn(A, S, X, beta) 2 | 3 | E = double(A)*S - double(X); 4 | 5 | fresidue = sum(sum(E.^2)); 6 | freg = beta*sum(sum(S.^2)); 7 | 8 | fobj= fresidue + freg; 9 | 10 | 11 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/getObjective_sc.m: -------------------------------------------------------------------------------- 1 | function [fobj, fresidue, freg] = getObjective_sc(A, S, X, beta) 2 | 3 | E = double(A)*S - double(X); 4 | fresidue = sum(sum(E.^2)); 5 | freg = beta*sum(sum(abs(S))); 6 | 7 | fobj= fresidue + freg; 8 | 9 | 10 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/makefile.linux: -------------------------------------------------------------------------------- 1 | MEX = mex 2 | NRFDIR = ./nrf 3 | 4 | all: cgf_sc.c 5 | make -C nrf -f makefile.linux 6 | $(MEX) -I$(NRFDIR) -L$(NRFDIR) -lnrfopt cgf_sc.c -o cgf_sc2 7 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/makefile.win32: -------------------------------------------------------------------------------- 1 | MEX = C:\MATLAB6p5\bin\win32\mex.bat #mex 2 | NRFDIR = ./nrf 3 | MEXT = dll 4 | 5 | all: cgf_sc.c 6 | make -C nrf -f makefile.win32 7 | $(MEX) -I$(NRFDIR) cgf_sc.c nrf/libnrfopt.a -output cgf_sc2 8 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/entries: -------------------------------------------------------------------------------- 1 | 9 2 | 3 | dir 4 | 119 5 | svn+ssh://samba.ifp.uiuc.edu/homes/jyang29/svn/codes/Hierarchical%20Discriminant%20Sparse%20Coding/sparse_coding_backup1/sparse_coding_backup1/honglaklee/code/sc2/nrf 6 | svn+ssh://samba.ifp.uiuc.edu/homes/jyang29/svn/codes 7 | 8 | 9 | 10 | 2009-02-17T22:37:45.240477Z 11 | 1 12 | jyang29 13 | 14 | 15 | svn:special svn:externals svn:needs-lock 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | b58e5d2a-d51e-4df9-a013-8d2d35c93361 28 | 29 | impure.c 30 | file 31 | 32 | 33 | 34 | 35 | 2009-03-06T03:00:13.000000Z 36 | ad8d978bc97aa52450b720ec82177efd 37 | 2009-02-17T22:37:45.240477Z 38 | 1 39 | jyang29 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 396 62 | 63 | nrutil.h 64 | file 65 | 66 | 67 | 68 | 69 | 2009-03-06T03:00:13.000000Z 70 | 3ccc795ef1c1c06ec68287533b6a4431 71 | 2009-02-17T22:37:45.240477Z 72 | 1 73 | jyang29 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 3431 96 | 97 | frprmn.c 98 | file 99 | 100 | 101 | 102 | 103 | 2009-03-06T03:00:13.000000Z 104 | 9f8814a7571d590804fbaea92cd8e0d6 105 | 2009-02-17T22:37:45.240477Z 106 | 1 107 | jyang29 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 1198 130 | 131 | linmin.c 132 | file 133 | 134 | 135 | 136 | 137 | 2009-03-06T03:00:13.000000Z 138 | 3c529e0e8067ff5df966e307c0e0f892 139 | 2009-02-17T22:37:45.240477Z 140 | 1 141 | jyang29 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 438 164 | 165 | brent.c 166 | file 167 | 168 | 169 | 170 | 171 | 2009-03-06T03:00:13.000000Z 172 | 88771e2c55de3b96800a2512d152e8e4 173 | 2009-02-17T22:37:45.240477Z 174 | 1 175 | jyang29 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 1535 198 | 199 | makefile.linux 200 | file 201 | 202 | 203 | 204 | 205 | 2009-03-06T03:00:13.000000Z 206 | 31c2c1639d6763586621e025982e1631 207 | 2009-02-17T22:37:45.240477Z 208 | 1 209 | jyang29 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 241 232 | 233 | mnbrak.c 234 | file 235 | 236 | 237 | 238 | 239 | 2009-03-06T03:00:13.000000Z 240 | a37d25ec315cdf60cb8ff4798a8598c5 241 | 2009-02-17T22:37:45.240477Z 242 | 1 243 | jyang29 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 1340 266 | 267 | README 268 | file 269 | 270 | 271 | 272 | 273 | 2009-03-06T03:00:13.000000Z 274 | b2d6fd832a61e6657a827fee2c963e93 275 | 2009-02-17T22:37:45.240477Z 276 | 1 277 | jyang29 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 331 300 | 301 | getreent.c 302 | file 303 | 304 | 305 | 306 | 307 | 2009-03-06T03:00:13.000000Z 308 | 02eab9671887e09cfe7456eb8035d59c 309 | 2009-02-17T22:37:45.240477Z 310 | 1 311 | jyang29 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 173 334 | 335 | makefile.win32 336 | file 337 | 338 | 339 | 340 | 341 | 2009-03-06T03:00:13.000000Z 342 | c3262781d0ce69b29e0ce2bec39247e8 343 | 2009-02-17T22:37:45.240477Z 344 | 1 345 | jyang29 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 230 368 | 369 | nrutil.c 370 | file 371 | 372 | 373 | 374 | 375 | 2009-03-06T03:00:13.000000Z 376 | 9788dc213599c6bbb37f0c23c60df729 377 | 2009-02-17T22:37:45.240477Z 378 | 1 379 | jyang29 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 8915 402 | 403 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/format: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/README.svn-base: -------------------------------------------------------------------------------- 1 | 2 | These are the numerical recipies routines for conjugate gradient 3 | descent. They have been modified to make the 1d line minimizations 4 | efficient. 5 | 6 | frprmn.c: main routine for executing conj. grad. 7 | linmin.c: line minimization algorithm 8 | mnbrak.c: routine for bracketing the minimum 9 | brent.c: finds minimum via Brent's method 10 | 11 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/brent.c.svn-base: -------------------------------------------------------------------------------- 1 | #include 2 | #include "nrutil.h" 3 | #define ITMAX 100 4 | #define CGOLD 0.3819660 5 | #define ZEPS 1.0e-10 6 | #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); 7 | 8 | float brent(ax,bx,cx,f,tol,xmin) 9 | float (*f)(),*xmin,ax,bx,cx,tol; 10 | { 11 | int iter; 12 | float a,b,d,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm; 13 | float e=0.0; 14 | 15 | a=(ax < cx ? ax : cx); 16 | b=(ax > cx ? ax : cx); 17 | x=w=v=bx; 18 | fw=fv=fx=(*f)(x); 19 | for (iter=1;iter<=ITMAX;iter++) { 20 | xm=0.5*(a+b); 21 | tol2=2.0*(tol1=tol*fabs(x)+ZEPS); 22 | if (fabs(x-xm) <= (tol2-0.5*(b-a))) { 23 | *xmin=x; 24 | return fx; 25 | } 26 | if (fabs(e) > tol1) { 27 | r=(x-w)*(fx-fv); 28 | q=(x-v)*(fx-fw); 29 | p=(x-v)*q-(x-w)*r; 30 | q=2.0*(q-r); 31 | if (q > 0.0) p = -p; 32 | q=fabs(q); 33 | etemp=e; 34 | e=d; 35 | if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x)) 36 | d=CGOLD*(e=(x >= xm ? a-x : b-x)); 37 | else { 38 | d=p/q; 39 | u=x+d; 40 | if (u-a < tol2 || b-u < tol2) 41 | d=SIGN(tol1,xm-x); 42 | } 43 | } else { 44 | d=CGOLD*(e=(x >= xm ? a-x : b-x)); 45 | } 46 | u=(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); 47 | fu=(*f)(u); 48 | if (fu <= fx) { 49 | if (u >= x) a=x; else b=x; 50 | SHFT(v,w,x,u) 51 | SHFT(fv,fw,fx,fu) 52 | } else { 53 | if (u < x) a=u; else b=u; 54 | if (fu <= fw || w == x) { 55 | v=w; 56 | w=u; 57 | fv=fw; 58 | fw=fu; 59 | } else if (fu <= fv || v == x || v == w) { 60 | v=u; 61 | fv=fu; 62 | } 63 | } 64 | } 65 | nrerror("Too many iterations in brent"); 66 | *xmin=x; 67 | return fx; 68 | } 69 | #undef ITMAX 70 | #undef CGOLD 71 | #undef ZEPS 72 | #undef SHFT 73 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 74 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/frprmn.c.svn-base: -------------------------------------------------------------------------------- 1 | /* 2 | * frprmn.c: modified frprmn for efficient line minimization 3 | */ 4 | #include 5 | #include "nrutil.h" 6 | 7 | #define EPS 1.0e-10 8 | #define FREEALL free_vector(xi,1,n);free_vector(h,1,n);free_vector(g,1,n); 9 | 10 | int ITMAX; 11 | 12 | void frprmn(p,n,ftol,iter,fret,init_f1dim,f1dim,dfunc) 13 | float (*init_f1dim)(),(*f1dim)(),*fret,ftol,p[]; 14 | int *iter,n; 15 | void (*dfunc)(); 16 | { 17 | void linmin(); 18 | int j,its; 19 | float gg,gam,fp,dgg; 20 | float *g,*h,*xi; 21 | 22 | g=vector(1,n); 23 | h=vector(1,n); 24 | xi=vector(1,n); 25 | (*dfunc)(p,xi); 26 | for (j=1;j<=n;j++) { 27 | g[j] = -xi[j]; 28 | xi[j]=h[j]=g[j]; 29 | } 30 | for (its=1;its<=ITMAX;its++) { 31 | *iter=its; 32 | fp=(*init_f1dim)(p,xi); 33 | linmin(p,xi,n,fret,f1dim); 34 | if (2.0*fabs(*fret-fp) <= ftol*(fabs(*fret)+fabs(fp)+EPS)) { 35 | FREEALL 36 | return; 37 | } 38 | (*dfunc)(p,xi); 39 | dgg=gg=0.0; 40 | for (j=1;j<=n;j++) { 41 | gg += g[j]*g[j]; 42 | dgg += (xi[j]+g[j])*xi[j]; 43 | } 44 | if (gg == 0.0) { 45 | FREEALL 46 | return; 47 | } 48 | gam=dgg/gg; 49 | for (j=1;j<=n;j++) { 50 | g[j] = -xi[j]; 51 | xi[j]=h[j]=g[j]+gam*h[j]; 52 | } 53 | } 54 | /* nrerror("Too many iterations in frprmn"); */ 55 | FREEALL 56 | return; 57 | } 58 | 59 | #undef EPS 60 | #undef FREEALL 61 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 62 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/getreent.c.svn-base: -------------------------------------------------------------------------------- 1 | /* default reentrant pointer when multithread enabled */ 2 | 3 | #include <_ansi.h> 4 | #include 5 | 6 | struct _reent * 7 | _DEFUN_VOID(__getreent) 8 | { 9 | return _impure_ptr; 10 | } 11 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/impure.c.svn-base: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | /* Note that there is a copy of this in sys/reent.h. */ 4 | #ifndef __ATTRIBUTE_IMPURE_PTR__ 5 | #define __ATTRIBUTE_IMPURE_PTR__ 6 | #endif 7 | 8 | #ifndef __ATTRIBUTE_IMPURE_DATA__ 9 | #define __ATTRIBUTE_IMPURE_DATA__ 10 | #endif 11 | 12 | static struct _reent __ATTRIBUTE_IMPURE_DATA__ impure_data = _REENT_INIT (impure_data); 13 | struct _reent *__ATTRIBUTE_IMPURE_PTR__ _impure_ptr = &impure_data; 14 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/linmin.c.svn-base: -------------------------------------------------------------------------------- 1 | #include "nrutil.h" 2 | #define TOL 2.0e-4 3 | 4 | void linmin(p,xi,n,fret,f1dim) 5 | float (*f1dim)(),*fret,p[],xi[]; 6 | int n; 7 | { 8 | float brent(); 9 | void mnbrak(); 10 | int j; 11 | float xx,xmin,fx,fb,fa,bx,ax; 12 | 13 | ax=0.0; 14 | xx=1.0; 15 | mnbrak(&ax,&xx,&bx,&fa,&fx,&fb,f1dim); 16 | *fret=brent(ax,xx,bx,f1dim,TOL,&xmin); 17 | for (j=1;j<=n;j++) { 18 | xi[j] *= xmin; 19 | p[j] += xi[j]; 20 | } 21 | } 22 | #undef TOL 23 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 24 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/makefile.linux.svn-base: -------------------------------------------------------------------------------- 1 | CC= gcc 2 | CFLAGS= -O -I. -fPIC 3 | 4 | OPTOBJS= frprmn.o linmin.o brent.o mnbrak.o nrutil.o #impure.o #getreent.o 5 | 6 | .c.o: 7 | ${CC} ${CFLAGS} -c $*.c 8 | 9 | libnrfopt.a: clean $(OPTOBJS) 10 | ar cr libnrfopt.a $(OPTOBJS) 11 | 12 | clean: 13 | rm -rf *.o *.a 14 | 15 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/makefile.win32.svn-base: -------------------------------------------------------------------------------- 1 | CC= gcc 2 | CFLAGS= -O -I. 3 | 4 | OPTOBJS= frprmn.o linmin.o brent.o mnbrak.o nrutil.o impure.o getreent.o 5 | 6 | .c.o: 7 | ${CC} ${CFLAGS} -c $*.c 8 | 9 | libnrfopt.a: clean $(OPTOBJS) 10 | ar cr libnrfopt.a $(OPTOBJS) 11 | 12 | clean: 13 | rm -rf *.o *.a 14 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/mnbrak.c.svn-base: -------------------------------------------------------------------------------- 1 | #include 2 | #include "nrutil.h" 3 | #define GOLD 1.618034 4 | #define GLIMIT 100.0 5 | #define TINY 1.0e-20 6 | #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); 7 | 8 | void mnbrak(ax,bx,cx,fa,fb,fc,func) 9 | float (*func)(),*ax,*bx,*cx,*fa,*fb,*fc; 10 | { 11 | float ulim,u,r,q,fu,dum; 12 | 13 | *fa=(*func)(*ax); 14 | *fb=(*func)(*bx); 15 | if (*fb > *fa) { 16 | SHFT(dum,*ax,*bx,dum) 17 | SHFT(dum,*fb,*fa,dum) 18 | } 19 | *cx=(*bx)+GOLD*(*bx-*ax); 20 | *fc=(*func)(*cx); 21 | while (*fb > *fc) { 22 | r=(*bx-*ax)*(*fb-*fc); 23 | q=(*bx-*cx)*(*fb-*fa); 24 | u=(*bx)-((*bx-*cx)*q-(*bx-*ax)*r)/ 25 | (2.0*SIGN(FMAX(fabs(q-r),TINY),q-r)); 26 | ulim=(*bx)+GLIMIT*(*cx-*bx); 27 | if ((*bx-u)*(u-*cx) > 0.0) { 28 | fu=(*func)(u); 29 | if (fu < *fc) { 30 | *ax=(*bx); 31 | *bx=u; 32 | *fa=(*fb); 33 | *fb=fu; 34 | return; 35 | } else if (fu > *fb) { 36 | *cx=u; 37 | *fc=fu; 38 | return; 39 | } 40 | u=(*cx)+GOLD*(*cx-*bx); 41 | fu=(*func)(u); 42 | } else if ((*cx-u)*(u-ulim) > 0.0) { 43 | fu=(*func)(u); 44 | if (fu < *fc) { 45 | SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx)) 46 | SHFT(*fb,*fc,fu,(*func)(u)) 47 | } 48 | } else if ((u-ulim)*(ulim-*cx) >= 0.0) { 49 | u=ulim; 50 | fu=(*func)(u); 51 | } else { 52 | u=(*cx)+GOLD*(*cx-*bx); 53 | fu=(*func)(u); 54 | } 55 | SHFT(*ax,*bx,*cx,u) 56 | SHFT(*fa,*fb,*fc,fu) 57 | } 58 | } 59 | #undef GOLD 60 | #undef GLIMIT 61 | #undef TINY 62 | #undef SHFT 63 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 64 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/.svn/text-base/nrutil.c.svn-base: -------------------------------------------------------------------------------- 1 | /* CAUTION: This is the traditional K&R C (only) version of the Numerical 2 | Recipes utility file nrutil.c. Do not confuse this file with the 3 | same-named file nrutil.c that is supplied in the same subdirectory or 4 | archive as the header file nrutil.h. *That* file contains both ANSI and 5 | traditional K&R versions, along with #ifdef macros to select the 6 | correct version. *This* file contains only traditional K&R. */ 7 | 8 | #include 9 | #define NR_END 1 10 | #define FREE_ARG char* 11 | 12 | void nrerror(error_text) 13 | char error_text[]; 14 | /* Numerical Recipes standard error handler */ 15 | { 16 | void exit(); 17 | 18 | fprintf(stderr,"Numerical Recipes run-time error...\n"); 19 | fprintf(stderr,"%s\n",error_text); 20 | fprintf(stderr,"...now exiting to system...\n"); 21 | exit(1); 22 | } 23 | 24 | float *vector(nl,nh) 25 | long nh,nl; 26 | /* allocate a float vector with subscript range v[nl..nh] */ 27 | { 28 | float *v; 29 | 30 | v=(float *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(float))); 31 | if (!v) nrerror("allocation failure in vector()"); 32 | return v-nl+NR_END; 33 | } 34 | 35 | int *ivector(nl,nh) 36 | long nh,nl; 37 | /* allocate an int vector with subscript range v[nl..nh] */ 38 | { 39 | int *v; 40 | 41 | v=(int *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(int))); 42 | if (!v) nrerror("allocation failure in ivector()"); 43 | return v-nl+NR_END; 44 | } 45 | 46 | unsigned char *cvector(nl,nh) 47 | long nh,nl; 48 | /* allocate an unsigned char vector with subscript range v[nl..nh] */ 49 | { 50 | unsigned char *v; 51 | 52 | v=(unsigned char *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(unsigned char))); 53 | if (!v) nrerror("allocation failure in cvector()"); 54 | return v-nl+NR_END; 55 | } 56 | 57 | unsigned long *lvector(nl,nh) 58 | long nh,nl; 59 | /* allocate an unsigned long vector with subscript range v[nl..nh] */ 60 | { 61 | unsigned long *v; 62 | 63 | v=(unsigned long *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(long))); 64 | if (!v) nrerror("allocation failure in lvector()"); 65 | return v-nl+NR_END; 66 | } 67 | 68 | double *dvector(nl,nh) 69 | long nh,nl; 70 | /* allocate a double vector with subscript range v[nl..nh] */ 71 | { 72 | double *v; 73 | 74 | v=(double *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(double))); 75 | if (!v) nrerror("allocation failure in dvector()"); 76 | return v-nl+NR_END; 77 | } 78 | 79 | float **matrix(nrl,nrh,ncl,nch) 80 | long nch,ncl,nrh,nrl; 81 | /* allocate a float matrix with subscript range m[nrl..nrh][ncl..nch] */ 82 | { 83 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 84 | float **m; 85 | 86 | /* allocate pointers to rows */ 87 | m=(float **) malloc((unsigned int)((nrow+NR_END)*sizeof(float*))); 88 | if (!m) nrerror("allocation failure 1 in matrix()"); 89 | m += NR_END; 90 | m -= nrl; 91 | 92 | /* allocate rows and set pointers to them */ 93 | m[nrl]=(float *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(float))); 94 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 95 | m[nrl] += NR_END; 96 | m[nrl] -= ncl; 97 | 98 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 99 | 100 | /* return pointer to array of pointers to rows */ 101 | return m; 102 | } 103 | 104 | double **dmatrix(nrl,nrh,ncl,nch) 105 | long nch,ncl,nrh,nrl; 106 | /* allocate a double matrix with subscript range m[nrl..nrh][ncl..nch] */ 107 | { 108 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 109 | double **m; 110 | 111 | /* allocate pointers to rows */ 112 | m=(double **) malloc((unsigned int)((nrow+NR_END)*sizeof(double*))); 113 | if (!m) nrerror("allocation failure 1 in matrix()"); 114 | m += NR_END; 115 | m -= nrl; 116 | 117 | /* allocate rows and set pointers to them */ 118 | m[nrl]=(double *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(double))); 119 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 120 | m[nrl] += NR_END; 121 | m[nrl] -= ncl; 122 | 123 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 124 | 125 | /* return pointer to array of pointers to rows */ 126 | return m; 127 | } 128 | 129 | int **imatrix(nrl,nrh,ncl,nch) 130 | long nch,ncl,nrh,nrl; 131 | /* allocate a int matrix with subscript range m[nrl..nrh][ncl..nch] */ 132 | { 133 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 134 | int **m; 135 | 136 | /* allocate pointers to rows */ 137 | m=(int **) malloc((unsigned int)((nrow+NR_END)*sizeof(int*))); 138 | if (!m) nrerror("allocation failure 1 in matrix()"); 139 | m += NR_END; 140 | m -= nrl; 141 | 142 | 143 | /* allocate rows and set pointers to them */ 144 | m[nrl]=(int *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(int))); 145 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 146 | m[nrl] += NR_END; 147 | m[nrl] -= ncl; 148 | 149 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 150 | 151 | /* return pointer to array of pointers to rows */ 152 | return m; 153 | } 154 | 155 | float **submatrix(a,oldrl,oldrh,oldcl,oldch,newrl,newcl) 156 | float **a; 157 | long newcl,newrl,oldch,oldcl,oldrh,oldrl; 158 | /* point a submatrix [newrl..][newcl..] to a[oldrl..oldrh][oldcl..oldch] */ 159 | { 160 | long i,j,nrow=oldrh-oldrl+1,ncol=oldcl-newcl; 161 | float **m; 162 | 163 | /* allocate array of pointers to rows */ 164 | m=(float **) malloc((unsigned int) ((nrow+NR_END)*sizeof(float*))); 165 | if (!m) nrerror("allocation failure in submatrix()"); 166 | m += NR_END; 167 | m -= newrl; 168 | 169 | /* set pointers to rows */ 170 | for(i=oldrl,j=newrl;i<=oldrh;i++,j++) m[j]=a[i]+ncol; 171 | 172 | /* return pointer to array of pointers to rows */ 173 | return m; 174 | } 175 | 176 | float **convert_matrix(a,nrl,nrh,ncl,nch) 177 | float *a; 178 | long nch,ncl,nrh,nrl; 179 | /* allocate a float matrix m[nrl..nrh][ncl..nch] that points to the matrix 180 | declared in the standard C manner as a[nrow][ncol], where nrow=nrh-nrl+1 181 | and ncol=nch-ncl+1. The routine should be called with the address 182 | &a[0][0] as the first argument. */ 183 | { 184 | long i,j,nrow=nrh-nrl+1,ncol=nch-ncl+1; 185 | float **m; 186 | 187 | /* allocate pointers to rows */ 188 | m=(float **) malloc((unsigned int) ((nrow+NR_END)*sizeof(float*))); 189 | if (!m) nrerror("allocation failure in convert_matrix()"); 190 | m += NR_END; 191 | m -= nrl; 192 | 193 | /* set pointers to rows */ 194 | m[nrl]=a-ncl; 195 | for(i=1,j=nrl+1;i (dmaxarg2) ?\ 12 | (dmaxarg1) : (dmaxarg2)) 13 | 14 | static double dminarg1,dminarg2; 15 | #define DMIN(a,b) (dminarg1=(a),dminarg2=(b),(dminarg1) < (dminarg2) ?\ 16 | (dminarg1) : (dminarg2)) 17 | 18 | static float maxarg1,maxarg2; 19 | #define FMAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1) > (maxarg2) ?\ 20 | (maxarg1) : (maxarg2)) 21 | 22 | static float minarg1,minarg2; 23 | #define FMIN(a,b) (minarg1=(a),minarg2=(b),(minarg1) < (minarg2) ?\ 24 | (minarg1) : (minarg2)) 25 | 26 | static long lmaxarg1,lmaxarg2; 27 | #define LMAX(a,b) (lmaxarg1=(a),lmaxarg2=(b),(lmaxarg1) > (lmaxarg2) ?\ 28 | (lmaxarg1) : (lmaxarg2)) 29 | 30 | static long lminarg1,lminarg2; 31 | #define LMIN(a,b) (lminarg1=(a),lminarg2=(b),(lminarg1) < (lminarg2) ?\ 32 | (lminarg1) : (lminarg2)) 33 | 34 | static int imaxarg1,imaxarg2; 35 | #define IMAX(a,b) (imaxarg1=(a),imaxarg2=(b),(imaxarg1) > (imaxarg2) ?\ 36 | (imaxarg1) : (imaxarg2)) 37 | 38 | static int iminarg1,iminarg2; 39 | #define IMIN(a,b) (iminarg1=(a),iminarg2=(b),(iminarg1) < (iminarg2) ?\ 40 | (iminarg1) : (iminarg2)) 41 | 42 | #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) 43 | 44 | #if defined(__STDC__) || defined(ANSI) || defined(NRANSI) /* ANSI */ 45 | 46 | void nrerror(char error_text[]); 47 | float *vector(long nl, long nh); 48 | int *ivector(long nl, long nh); 49 | unsigned char *cvector(long nl, long nh); 50 | unsigned long *lvector(long nl, long nh); 51 | double *dvector(long nl, long nh); 52 | float **matrix(long nrl, long nrh, long ncl, long nch); 53 | double **dmatrix(long nrl, long nrh, long ncl, long nch); 54 | int **imatrix(long nrl, long nrh, long ncl, long nch); 55 | float **submatrix(float **a, long oldrl, long oldrh, long oldcl, long oldch, 56 | long newrl, long newcl); 57 | float **convert_matrix(float *a, long nrl, long nrh, long ncl, long nch); 58 | float ***f3tensor(long nrl, long nrh, long ncl, long nch, long ndl, long ndh); 59 | void free_vector(float *v, long nl, long nh); 60 | void free_ivector(int *v, long nl, long nh); 61 | void free_cvector(unsigned char *v, long nl, long nh); 62 | void free_lvector(unsigned long *v, long nl, long nh); 63 | void free_dvector(double *v, long nl, long nh); 64 | void free_matrix(float **m, long nrl, long nrh, long ncl, long nch); 65 | void free_dmatrix(double **m, long nrl, long nrh, long ncl, long nch); 66 | void free_imatrix(int **m, long nrl, long nrh, long ncl, long nch); 67 | void free_submatrix(float **b, long nrl, long nrh, long ncl, long nch); 68 | void free_convert_matrix(float **b, long nrl, long nrh, long ncl, long nch); 69 | void free_f3tensor(float ***t, long nrl, long nrh, long ncl, long nch, 70 | long ndl, long ndh); 71 | 72 | #else /* ANSI */ 73 | /* traditional - K&R */ 74 | 75 | void nrerror(); 76 | float *vector(); 77 | float **matrix(); 78 | float **submatrix(); 79 | float **convert_matrix(); 80 | float ***f3tensor(); 81 | double *dvector(); 82 | double **dmatrix(); 83 | int *ivector(); 84 | int **imatrix(); 85 | unsigned char *cvector(); 86 | unsigned long *lvector(); 87 | void free_vector(); 88 | void free_dvector(); 89 | void free_ivector(); 90 | void free_cvector(); 91 | void free_lvector(); 92 | void free_matrix(); 93 | void free_submatrix(); 94 | void free_convert_matrix(); 95 | void free_dmatrix(); 96 | void free_imatrix(); 97 | void free_f3tensor(); 98 | 99 | #endif /* ANSI */ 100 | 101 | #endif /* _NR_UTILS_H_ */ 102 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/README: -------------------------------------------------------------------------------- 1 | 2 | These are the numerical recipies routines for conjugate gradient 3 | descent. They have been modified to make the 1d line minimizations 4 | efficient. 5 | 6 | frprmn.c: main routine for executing conj. grad. 7 | linmin.c: line minimization algorithm 8 | mnbrak.c: routine for bracketing the minimum 9 | brent.c: finds minimum via Brent's method 10 | 11 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/brent.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "nrutil.h" 3 | #define ITMAX 100 4 | #define CGOLD 0.3819660 5 | #define ZEPS 1.0e-10 6 | #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); 7 | 8 | float brent(ax,bx,cx,f,tol,xmin) 9 | float (*f)(),*xmin,ax,bx,cx,tol; 10 | { 11 | int iter; 12 | float a,b,d,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm; 13 | float e=0.0; 14 | 15 | a=(ax < cx ? ax : cx); 16 | b=(ax > cx ? ax : cx); 17 | x=w=v=bx; 18 | fw=fv=fx=(*f)(x); 19 | for (iter=1;iter<=ITMAX;iter++) { 20 | xm=0.5*(a+b); 21 | tol2=2.0*(tol1=tol*fabs(x)+ZEPS); 22 | if (fabs(x-xm) <= (tol2-0.5*(b-a))) { 23 | *xmin=x; 24 | return fx; 25 | } 26 | if (fabs(e) > tol1) { 27 | r=(x-w)*(fx-fv); 28 | q=(x-v)*(fx-fw); 29 | p=(x-v)*q-(x-w)*r; 30 | q=2.0*(q-r); 31 | if (q > 0.0) p = -p; 32 | q=fabs(q); 33 | etemp=e; 34 | e=d; 35 | if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x)) 36 | d=CGOLD*(e=(x >= xm ? a-x : b-x)); 37 | else { 38 | d=p/q; 39 | u=x+d; 40 | if (u-a < tol2 || b-u < tol2) 41 | d=SIGN(tol1,xm-x); 42 | } 43 | } else { 44 | d=CGOLD*(e=(x >= xm ? a-x : b-x)); 45 | } 46 | u=(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); 47 | fu=(*f)(u); 48 | if (fu <= fx) { 49 | if (u >= x) a=x; else b=x; 50 | SHFT(v,w,x,u) 51 | SHFT(fv,fw,fx,fu) 52 | } else { 53 | if (u < x) a=u; else b=u; 54 | if (fu <= fw || w == x) { 55 | v=w; 56 | w=u; 57 | fv=fw; 58 | fw=fu; 59 | } else if (fu <= fv || v == x || v == w) { 60 | v=u; 61 | fv=fu; 62 | } 63 | } 64 | } 65 | nrerror("Too many iterations in brent"); 66 | *xmin=x; 67 | return fx; 68 | } 69 | #undef ITMAX 70 | #undef CGOLD 71 | #undef ZEPS 72 | #undef SHFT 73 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 74 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/frprmn.c: -------------------------------------------------------------------------------- 1 | /* 2 | * frprmn.c: modified frprmn for efficient line minimization 3 | */ 4 | #include 5 | #include "nrutil.h" 6 | 7 | #define EPS 1.0e-10 8 | #define FREEALL free_vector(xi,1,n);free_vector(h,1,n);free_vector(g,1,n); 9 | 10 | int ITMAX; 11 | 12 | void frprmn(p,n,ftol,iter,fret,init_f1dim,f1dim,dfunc) 13 | float (*init_f1dim)(),(*f1dim)(),*fret,ftol,p[]; 14 | int *iter,n; 15 | void (*dfunc)(); 16 | { 17 | void linmin(); 18 | int j,its; 19 | float gg,gam,fp,dgg; 20 | float *g,*h,*xi; 21 | 22 | g=vector(1,n); 23 | h=vector(1,n); 24 | xi=vector(1,n); 25 | (*dfunc)(p,xi); 26 | for (j=1;j<=n;j++) { 27 | g[j] = -xi[j]; 28 | xi[j]=h[j]=g[j]; 29 | } 30 | for (its=1;its<=ITMAX;its++) { 31 | *iter=its; 32 | fp=(*init_f1dim)(p,xi); 33 | linmin(p,xi,n,fret,f1dim); 34 | if (2.0*fabs(*fret-fp) <= ftol*(fabs(*fret)+fabs(fp)+EPS)) { 35 | FREEALL 36 | return; 37 | } 38 | (*dfunc)(p,xi); 39 | dgg=gg=0.0; 40 | for (j=1;j<=n;j++) { 41 | gg += g[j]*g[j]; 42 | dgg += (xi[j]+g[j])*xi[j]; 43 | } 44 | if (gg == 0.0) { 45 | FREEALL 46 | return; 47 | } 48 | gam=dgg/gg; 49 | for (j=1;j<=n;j++) { 50 | g[j] = -xi[j]; 51 | xi[j]=h[j]=g[j]+gam*h[j]; 52 | } 53 | } 54 | /* nrerror("Too many iterations in frprmn"); */ 55 | FREEALL 56 | return; 57 | } 58 | 59 | #undef EPS 60 | #undef FREEALL 61 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 62 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/getreent.c: -------------------------------------------------------------------------------- 1 | /* default reentrant pointer when multithread enabled */ 2 | 3 | #include <_ansi.h> 4 | #include 5 | 6 | struct _reent * 7 | _DEFUN_VOID(__getreent) 8 | { 9 | return _impure_ptr; 10 | } 11 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/impure.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | /* Note that there is a copy of this in sys/reent.h. */ 4 | #ifndef __ATTRIBUTE_IMPURE_PTR__ 5 | #define __ATTRIBUTE_IMPURE_PTR__ 6 | #endif 7 | 8 | #ifndef __ATTRIBUTE_IMPURE_DATA__ 9 | #define __ATTRIBUTE_IMPURE_DATA__ 10 | #endif 11 | 12 | static struct _reent __ATTRIBUTE_IMPURE_DATA__ impure_data = _REENT_INIT (impure_data); 13 | struct _reent *__ATTRIBUTE_IMPURE_PTR__ _impure_ptr = &impure_data; 14 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/linmin.c: -------------------------------------------------------------------------------- 1 | #include "nrutil.h" 2 | #define TOL 2.0e-4 3 | 4 | void linmin(p,xi,n,fret,f1dim) 5 | float (*f1dim)(),*fret,p[],xi[]; 6 | int n; 7 | { 8 | float brent(); 9 | void mnbrak(); 10 | int j; 11 | float xx,xmin,fx,fb,fa,bx,ax; 12 | 13 | ax=0.0; 14 | xx=1.0; 15 | mnbrak(&ax,&xx,&bx,&fa,&fx,&fb,f1dim); 16 | *fret=brent(ax,xx,bx,f1dim,TOL,&xmin); 17 | for (j=1;j<=n;j++) { 18 | xi[j] *= xmin; 19 | p[j] += xi[j]; 20 | } 21 | } 22 | #undef TOL 23 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 24 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/makefile.linux: -------------------------------------------------------------------------------- 1 | CC= gcc 2 | CFLAGS= -O -I. -fPIC 3 | 4 | OPTOBJS= frprmn.o linmin.o brent.o mnbrak.o nrutil.o #impure.o #getreent.o 5 | 6 | .c.o: 7 | ${CC} ${CFLAGS} -c $*.c 8 | 9 | libnrfopt.a: clean $(OPTOBJS) 10 | ar cr libnrfopt.a $(OPTOBJS) 11 | 12 | clean: 13 | rm -rf *.o *.a 14 | 15 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/makefile.win32: -------------------------------------------------------------------------------- 1 | CC= gcc 2 | CFLAGS= -O -I. 3 | 4 | OPTOBJS= frprmn.o linmin.o brent.o mnbrak.o nrutil.o impure.o getreent.o 5 | 6 | .c.o: 7 | ${CC} ${CFLAGS} -c $*.c 8 | 9 | libnrfopt.a: clean $(OPTOBJS) 10 | ar cr libnrfopt.a $(OPTOBJS) 11 | 12 | clean: 13 | rm -rf *.o *.a 14 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/mnbrak.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "nrutil.h" 3 | #define GOLD 1.618034 4 | #define GLIMIT 100.0 5 | #define TINY 1.0e-20 6 | #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); 7 | 8 | void mnbrak(ax,bx,cx,fa,fb,fc,func) 9 | float (*func)(),*ax,*bx,*cx,*fa,*fb,*fc; 10 | { 11 | float ulim,u,r,q,fu,dum; 12 | 13 | *fa=(*func)(*ax); 14 | *fb=(*func)(*bx); 15 | if (*fb > *fa) { 16 | SHFT(dum,*ax,*bx,dum) 17 | SHFT(dum,*fb,*fa,dum) 18 | } 19 | *cx=(*bx)+GOLD*(*bx-*ax); 20 | *fc=(*func)(*cx); 21 | while (*fb > *fc) { 22 | r=(*bx-*ax)*(*fb-*fc); 23 | q=(*bx-*cx)*(*fb-*fa); 24 | u=(*bx)-((*bx-*cx)*q-(*bx-*ax)*r)/ 25 | (2.0*SIGN(FMAX(fabs(q-r),TINY),q-r)); 26 | ulim=(*bx)+GLIMIT*(*cx-*bx); 27 | if ((*bx-u)*(u-*cx) > 0.0) { 28 | fu=(*func)(u); 29 | if (fu < *fc) { 30 | *ax=(*bx); 31 | *bx=u; 32 | *fa=(*fb); 33 | *fb=fu; 34 | return; 35 | } else if (fu > *fb) { 36 | *cx=u; 37 | *fc=fu; 38 | return; 39 | } 40 | u=(*cx)+GOLD*(*cx-*bx); 41 | fu=(*func)(u); 42 | } else if ((*cx-u)*(u-ulim) > 0.0) { 43 | fu=(*func)(u); 44 | if (fu < *fc) { 45 | SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx)) 46 | SHFT(*fb,*fc,fu,(*func)(u)) 47 | } 48 | } else if ((u-ulim)*(ulim-*cx) >= 0.0) { 49 | u=ulim; 50 | fu=(*func)(u); 51 | } else { 52 | u=(*cx)+GOLD*(*cx-*bx); 53 | fu=(*func)(u); 54 | } 55 | SHFT(*ax,*bx,*cx,u) 56 | SHFT(*fa,*fb,*fc,fu) 57 | } 58 | } 59 | #undef GOLD 60 | #undef GLIMIT 61 | #undef TINY 62 | #undef SHFT 63 | /* (C) Copr. 1986-92 Numerical Recipes Software 6=Mn.Y". */ 64 | -------------------------------------------------------------------------------- /RegularizedSC/sc2/nrf/nrutil.c: -------------------------------------------------------------------------------- 1 | /* CAUTION: This is the traditional K&R C (only) version of the Numerical 2 | Recipes utility file nrutil.c. Do not confuse this file with the 3 | same-named file nrutil.c that is supplied in the same subdirectory or 4 | archive as the header file nrutil.h. *That* file contains both ANSI and 5 | traditional K&R versions, along with #ifdef macros to select the 6 | correct version. *This* file contains only traditional K&R. */ 7 | 8 | #include 9 | #define NR_END 1 10 | #define FREE_ARG char* 11 | 12 | void nrerror(error_text) 13 | char error_text[]; 14 | /* Numerical Recipes standard error handler */ 15 | { 16 | void exit(); 17 | 18 | fprintf(stderr,"Numerical Recipes run-time error...\n"); 19 | fprintf(stderr,"%s\n",error_text); 20 | fprintf(stderr,"...now exiting to system...\n"); 21 | exit(1); 22 | } 23 | 24 | float *vector(nl,nh) 25 | long nh,nl; 26 | /* allocate a float vector with subscript range v[nl..nh] */ 27 | { 28 | float *v; 29 | 30 | v=(float *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(float))); 31 | if (!v) nrerror("allocation failure in vector()"); 32 | return v-nl+NR_END; 33 | } 34 | 35 | int *ivector(nl,nh) 36 | long nh,nl; 37 | /* allocate an int vector with subscript range v[nl..nh] */ 38 | { 39 | int *v; 40 | 41 | v=(int *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(int))); 42 | if (!v) nrerror("allocation failure in ivector()"); 43 | return v-nl+NR_END; 44 | } 45 | 46 | unsigned char *cvector(nl,nh) 47 | long nh,nl; 48 | /* allocate an unsigned char vector with subscript range v[nl..nh] */ 49 | { 50 | unsigned char *v; 51 | 52 | v=(unsigned char *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(unsigned char))); 53 | if (!v) nrerror("allocation failure in cvector()"); 54 | return v-nl+NR_END; 55 | } 56 | 57 | unsigned long *lvector(nl,nh) 58 | long nh,nl; 59 | /* allocate an unsigned long vector with subscript range v[nl..nh] */ 60 | { 61 | unsigned long *v; 62 | 63 | v=(unsigned long *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(long))); 64 | if (!v) nrerror("allocation failure in lvector()"); 65 | return v-nl+NR_END; 66 | } 67 | 68 | double *dvector(nl,nh) 69 | long nh,nl; 70 | /* allocate a double vector with subscript range v[nl..nh] */ 71 | { 72 | double *v; 73 | 74 | v=(double *)malloc((unsigned int) ((nh-nl+1+NR_END)*sizeof(double))); 75 | if (!v) nrerror("allocation failure in dvector()"); 76 | return v-nl+NR_END; 77 | } 78 | 79 | float **matrix(nrl,nrh,ncl,nch) 80 | long nch,ncl,nrh,nrl; 81 | /* allocate a float matrix with subscript range m[nrl..nrh][ncl..nch] */ 82 | { 83 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 84 | float **m; 85 | 86 | /* allocate pointers to rows */ 87 | m=(float **) malloc((unsigned int)((nrow+NR_END)*sizeof(float*))); 88 | if (!m) nrerror("allocation failure 1 in matrix()"); 89 | m += NR_END; 90 | m -= nrl; 91 | 92 | /* allocate rows and set pointers to them */ 93 | m[nrl]=(float *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(float))); 94 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 95 | m[nrl] += NR_END; 96 | m[nrl] -= ncl; 97 | 98 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 99 | 100 | /* return pointer to array of pointers to rows */ 101 | return m; 102 | } 103 | 104 | double **dmatrix(nrl,nrh,ncl,nch) 105 | long nch,ncl,nrh,nrl; 106 | /* allocate a double matrix with subscript range m[nrl..nrh][ncl..nch] */ 107 | { 108 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 109 | double **m; 110 | 111 | /* allocate pointers to rows */ 112 | m=(double **) malloc((unsigned int)((nrow+NR_END)*sizeof(double*))); 113 | if (!m) nrerror("allocation failure 1 in matrix()"); 114 | m += NR_END; 115 | m -= nrl; 116 | 117 | /* allocate rows and set pointers to them */ 118 | m[nrl]=(double *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(double))); 119 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 120 | m[nrl] += NR_END; 121 | m[nrl] -= ncl; 122 | 123 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 124 | 125 | /* return pointer to array of pointers to rows */ 126 | return m; 127 | } 128 | 129 | int **imatrix(nrl,nrh,ncl,nch) 130 | long nch,ncl,nrh,nrl; 131 | /* allocate a int matrix with subscript range m[nrl..nrh][ncl..nch] */ 132 | { 133 | long i, nrow=nrh-nrl+1,ncol=nch-ncl+1; 134 | int **m; 135 | 136 | /* allocate pointers to rows */ 137 | m=(int **) malloc((unsigned int)((nrow+NR_END)*sizeof(int*))); 138 | if (!m) nrerror("allocation failure 1 in matrix()"); 139 | m += NR_END; 140 | m -= nrl; 141 | 142 | 143 | /* allocate rows and set pointers to them */ 144 | m[nrl]=(int *) malloc((unsigned int)((nrow*ncol+NR_END)*sizeof(int))); 145 | if (!m[nrl]) nrerror("allocation failure 2 in matrix()"); 146 | m[nrl] += NR_END; 147 | m[nrl] -= ncl; 148 | 149 | for(i=nrl+1;i<=nrh;i++) m[i]=m[i-1]+ncol; 150 | 151 | /* return pointer to array of pointers to rows */ 152 | return m; 153 | } 154 | 155 | float **submatrix(a,oldrl,oldrh,oldcl,oldch,newrl,newcl) 156 | float **a; 157 | long newcl,newrl,oldch,oldcl,oldrh,oldrl; 158 | /* point a submatrix [newrl..][newcl..] to a[oldrl..oldrh][oldcl..oldch] */ 159 | { 160 | long i,j,nrow=oldrh-oldrl+1,ncol=oldcl-newcl; 161 | float **m; 162 | 163 | /* allocate array of pointers to rows */ 164 | m=(float **) malloc((unsigned int) ((nrow+NR_END)*sizeof(float*))); 165 | if (!m) nrerror("allocation failure in submatrix()"); 166 | m += NR_END; 167 | m -= newrl; 168 | 169 | /* set pointers to rows */ 170 | for(i=oldrl,j=newrl;i<=oldrh;i++,j++) m[j]=a[i]+ncol; 171 | 172 | /* return pointer to array of pointers to rows */ 173 | return m; 174 | } 175 | 176 | float **convert_matrix(a,nrl,nrh,ncl,nch) 177 | float *a; 178 | long nch,ncl,nrh,nrl; 179 | /* allocate a float matrix m[nrl..nrh][ncl..nch] that points to the matrix 180 | declared in the standard C manner as a[nrow][ncol], where nrow=nrh-nrl+1 181 | and ncol=nch-ncl+1. The routine should be called with the address 182 | &a[0][0] as the first argument. */ 183 | { 184 | long i,j,nrow=nrh-nrl+1,ncol=nch-ncl+1; 185 | float **m; 186 | 187 | /* allocate pointers to rows */ 188 | m=(float **) malloc((unsigned int) ((nrow+NR_END)*sizeof(float*))); 189 | if (!m) nrerror("allocation failure in convert_matrix()"); 190 | m += NR_END; 191 | m -= nrl; 192 | 193 | /* set pointers to rows */ 194 | m[nrl]=a-ncl; 195 | for(i=1,j=nrl+1;i (dmaxarg2) ?\ 12 | (dmaxarg1) : (dmaxarg2)) 13 | 14 | static double dminarg1,dminarg2; 15 | #define DMIN(a,b) (dminarg1=(a),dminarg2=(b),(dminarg1) < (dminarg2) ?\ 16 | (dminarg1) : (dminarg2)) 17 | 18 | static float maxarg1,maxarg2; 19 | #define FMAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1) > (maxarg2) ?\ 20 | (maxarg1) : (maxarg2)) 21 | 22 | static float minarg1,minarg2; 23 | #define FMIN(a,b) (minarg1=(a),minarg2=(b),(minarg1) < (minarg2) ?\ 24 | (minarg1) : (minarg2)) 25 | 26 | static long lmaxarg1,lmaxarg2; 27 | #define LMAX(a,b) (lmaxarg1=(a),lmaxarg2=(b),(lmaxarg1) > (lmaxarg2) ?\ 28 | (lmaxarg1) : (lmaxarg2)) 29 | 30 | static long lminarg1,lminarg2; 31 | #define LMIN(a,b) (lminarg1=(a),lminarg2=(b),(lminarg1) < (lminarg2) ?\ 32 | (lminarg1) : (lminarg2)) 33 | 34 | static int imaxarg1,imaxarg2; 35 | #define IMAX(a,b) (imaxarg1=(a),imaxarg2=(b),(imaxarg1) > (imaxarg2) ?\ 36 | (imaxarg1) : (imaxarg2)) 37 | 38 | static int iminarg1,iminarg2; 39 | #define IMIN(a,b) (iminarg1=(a),iminarg2=(b),(iminarg1) < (iminarg2) ?\ 40 | (iminarg1) : (iminarg2)) 41 | 42 | #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) 43 | 44 | #if defined(__STDC__) || defined(ANSI) || defined(NRANSI) /* ANSI */ 45 | 46 | void nrerror(char error_text[]); 47 | float *vector(long nl, long nh); 48 | int *ivector(long nl, long nh); 49 | unsigned char *cvector(long nl, long nh); 50 | unsigned long *lvector(long nl, long nh); 51 | double *dvector(long nl, long nh); 52 | float **matrix(long nrl, long nrh, long ncl, long nch); 53 | double **dmatrix(long nrl, long nrh, long ncl, long nch); 54 | int **imatrix(long nrl, long nrh, long ncl, long nch); 55 | float **submatrix(float **a, long oldrl, long oldrh, long oldcl, long oldch, 56 | long newrl, long newcl); 57 | float **convert_matrix(float *a, long nrl, long nrh, long ncl, long nch); 58 | float ***f3tensor(long nrl, long nrh, long ncl, long nch, long ndl, long ndh); 59 | void free_vector(float *v, long nl, long nh); 60 | void free_ivector(int *v, long nl, long nh); 61 | void free_cvector(unsigned char *v, long nl, long nh); 62 | void free_lvector(unsigned long *v, long nl, long nh); 63 | void free_dvector(double *v, long nl, long nh); 64 | void free_matrix(float **m, long nrl, long nrh, long ncl, long nch); 65 | void free_dmatrix(double **m, long nrl, long nrh, long ncl, long nch); 66 | void free_imatrix(int **m, long nrl, long nrh, long ncl, long nch); 67 | void free_submatrix(float **b, long nrl, long nrh, long ncl, long nch); 68 | void free_convert_matrix(float **b, long nrl, long nrh, long ncl, long nch); 69 | void free_f3tensor(float ***t, long nrl, long nrh, long ncl, long nch, 70 | long ndl, long ndh); 71 | 72 | #else /* ANSI */ 73 | /* traditional - K&R */ 74 | 75 | void nrerror(); 76 | float *vector(); 77 | float **matrix(); 78 | float **submatrix(); 79 | float **convert_matrix(); 80 | float ***f3tensor(); 81 | double *dvector(); 82 | double **dmatrix(); 83 | int *ivector(); 84 | int **imatrix(); 85 | unsigned char *cvector(); 86 | unsigned long *lvector(); 87 | void free_vector(); 88 | void free_dvector(); 89 | void free_ivector(); 90 | void free_cvector(); 91 | void free_lvector(); 92 | void free_matrix(); 93 | void free_submatrix(); 94 | void free_convert_matrix(); 95 | void free_dmatrix(); 96 | void free_imatrix(); 97 | void free_f3tensor(); 98 | 99 | #endif /* ANSI */ 100 | 101 | #endif /* _NR_UTILS_H_ */ 102 | -------------------------------------------------------------------------------- /ScSR.m: -------------------------------------------------------------------------------- 1 | function [hIm] = ScSR(lIm, up_scale, Dh, Dl, lambda, overlap) 2 | 3 | % normalize the dictionary 4 | norm_Dl = sqrt(sum(Dl.^2, 1)); 5 | Dl = Dl./repmat(norm_Dl, size(Dl, 1), 1); 6 | 7 | patch_size = sqrt(size(Dh, 1)); 8 | 9 | % bicubic interpolation of the low-resolution image 10 | mIm = single(imresize(lIm, up_scale, 'bicubic')); 11 | 12 | hIm = zeros(size(mIm)); 13 | cntMat = zeros(size(mIm)); 14 | 15 | [h, w] = size(mIm); 16 | 17 | % extract low-resolution image features 18 | lImfea = extr_lIm_fea(mIm); 19 | 20 | % patch indexes for sparse recovery (avoid boundary) 21 | gridx = 3:patch_size - overlap : w-patch_size-2; 22 | gridx = [gridx, w-patch_size-2]; 23 | gridy = 3:patch_size - overlap : h-patch_size-2; 24 | gridy = [gridy, h-patch_size-2]; 25 | 26 | A = Dl'*Dl; 27 | cnt = 0; 28 | 29 | % loop to recover each low-resolution patch 30 | for ii = 1:length(gridx), 31 | for jj = 1:length(gridy), 32 | num = (ii-1)*length(gridy)+jj; 33 | if mod(num,1000) == 0 34 | fprintf('%d---%d\n',num, length(gridx)*length(gridy)); 35 | end 36 | cnt = cnt+1; 37 | xx = gridx(ii); 38 | yy = gridy(jj); 39 | 40 | mPatch = mIm(yy:yy+patch_size-1, xx:xx+patch_size-1); 41 | mMean = mean(mPatch(:)); 42 | mPatch = mPatch(:) - mMean; 43 | mNorm = sqrt(sum(mPatch.^2)); 44 | 45 | mPatchFea = lImfea(yy:yy+patch_size-1, xx:xx+patch_size-1, :); 46 | mPatchFea = mPatchFea(:); 47 | mfNorm = sqrt(sum(mPatchFea.^2)); 48 | 49 | if mfNorm > 1, 50 | y = mPatchFea./mfNorm; 51 | else 52 | y = mPatchFea; 53 | end 54 | 55 | b = -Dl'*y; 56 | 57 | % sparse recovery 58 | w = L1QP_FeatureSign_yang(lambda, A, b); 59 | 60 | % generate the high resolution patch and scale the contrast 61 | hPatch = Dh*w; 62 | hPatch = lin_scale(hPatch, mNorm); 63 | 64 | hPatch = reshape(hPatch, [patch_size, patch_size]); 65 | hPatch = hPatch + mMean; 66 | 67 | hIm(yy:yy+patch_size-1, xx:xx+patch_size-1) = hIm(yy:yy+patch_size-1, xx:xx+patch_size-1) + hPatch; 68 | cntMat(yy:yy+patch_size-1, xx:xx+patch_size-1) = cntMat(yy:yy+patch_size-1, xx:xx+patch_size-1) + 1; 69 | end 70 | end 71 | 72 | % fill in the empty with bicubic interpolation 73 | idx = (cntMat < 1); 74 | hIm(idx) = mIm(idx); 75 | 76 | cntMat(idx) = 1; 77 | hIm = hIm./cntMat; 78 | hIm = uint8(hIm); 79 | -------------------------------------------------------------------------------- /backprojection.m: -------------------------------------------------------------------------------- 1 | function [im_h] = backprojection(im_h, im_l, maxIter) 2 | 3 | [row_l, col_l] = size(im_l); 4 | [row_h, col_h] = size(im_h); 5 | 6 | p = fspecial('gaussian', 5, 1); 7 | p = p.^2; 8 | p = p./sum(p(:)); 9 | 10 | im_l = double(im_l); 11 | im_h = double(im_h); 12 | 13 | for ii = 1:maxIter, 14 | im_l_s = imresize(im_h, [row_l, col_l], 'bicubic'); 15 | im_diff = im_l - im_l_s; 16 | 17 | im_diff = imresize(im_diff, [row_h, col_h], 'bicubic'); 18 | im_h = im_h + conv2(im_diff, p, 'same'); 19 | end 20 | -------------------------------------------------------------------------------- /compute_rmse.m: -------------------------------------------------------------------------------- 1 | function [rmse] = compute_rmse(im1, im2) 2 | 3 | if size(im1, 3) == 3, 4 | im1 = rgb2ycbcr(im1); 5 | im1 = im1(:, :, 1); 6 | end 7 | 8 | if size(im2, 3) == 3, 9 | im2 = rgb2ycbcr(im2); 10 | im2 = im2(:, :, 1); 11 | end 12 | 13 | imdff = double(im1) - double(im2); 14 | imdff = imdff(:); 15 | 16 | rmse = sqrt(mean(imdff.^2)); -------------------------------------------------------------------------------- /extr_lIm_fea.m: -------------------------------------------------------------------------------- 1 | function [lImFea] = extr_lIm_fea( lIm ) 2 | 3 | [nrow, ncol] = size(lIm); 4 | 5 | lImFea = zeros([nrow, ncol, 4]); 6 | 7 | % first order gradient filters 8 | hf1 = [-1,0,1]; 9 | vf1 = [-1,0,1]'; 10 | 11 | lImFea(:, :, 1) = conv2(lIm, hf1, 'same'); 12 | lImFea(:, :, 2) = conv2(lIm, vf1, 'same'); 13 | 14 | % second order gradient filters 15 | hf2 = [1,0,-2,0,1]; 16 | vf2 = [1,0,-2,0,1]'; 17 | 18 | lImFea(:, :, 3) = conv2(lIm,hf2,'same'); 19 | lImFea(:, :, 4) = conv2(lIm,vf2,'same'); 20 | 21 | -------------------------------------------------------------------------------- /lin_scale.m: -------------------------------------------------------------------------------- 1 | function [xh] = lin_scale( xh, mNorm ) 2 | 3 | hNorm = sqrt(sum(xh.^2)); 4 | 5 | if hNorm, 6 | s = mNorm*1.2/hNorm; 7 | xh = xh.*s; 8 | end -------------------------------------------------------------------------------- /patch_pruning.m: -------------------------------------------------------------------------------- 1 | function [Xh, Xl] = patch_pruning(Xh, Xl, threshold) 2 | 3 | pvars = var(Xh, 0, 1); 4 | 5 | idx = pvars > threshold; 6 | 7 | Xh = Xh(:, idx); 8 | Xl = Xl(:, idx); -------------------------------------------------------------------------------- /qssim.m: -------------------------------------------------------------------------------- 1 | function [mqssim, qssim_map] = qssim(img1, img2,ch,sqrt_3, K, window, L) 2 | 3 | %======================================================================== 4 | %QSSIM Index, Version 1.2 5 | %Copyright(c) 2011 Amir Kolaman 6 | %All Rights Reserved. 7 | % 8 | %---------------------------------------------------------------------- 9 | %Permission to use, copy, or modify this software and its documentation 10 | %for educational and research purposes only and without fee is hereby 11 | %granted, provided that this copyright notice and the original authors' 12 | %names appear on all copies and supporting documentation. This program 13 | %shall not be used, rewritten, or adapted as the basis of a commercial 14 | %software or hardware product without first obtaining permission of the 15 | %authors. The authors make no representations about the suitability of 16 | %this software for any purpose. It is provided "as is" without express 17 | %or implied warranty. 18 | %---------------------------------------------------------------------- 19 | %This code was modified from the original ssim_index.m downloaded from 20 | %http://www.ece.uwaterloo.ca/~z70wang/research/ssim/ 21 | %which is the implementation of 22 | %Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality 23 | %assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004. 24 | %__________________________________________________________________________ 25 | % 26 | %This is an implementation of the algorithm for calculating the 27 | %Quaternion Structural SIMilarity (QSSIM) index between two images. Please refer 28 | %to the following paper: 29 | % 30 | %A. Kolaman, Orly Yadid-Pecht "Quaternion Structural Similarity a New Quality Index for Color Images" 31 | %IEEE Transactios on Image Processing, vol. ??, no. ??, ??. 2011. 32 | % 33 | %Kindly report any suggestions or corrections to kolaman@bgu.ac.il 34 | % 35 | %To use this index you need to first download Quaternion Toolbox for Matlab at 36 | %http://qtfm.sourceforge.net/ 37 | %and add it to Matlab's path 38 | %---------------------------------------------------------------------- 39 | %% IMG1(N,N) while N an odd number SHOULD BE CHANGED 40 | %Input : (1) img1: the first image being compared 41 | % (2) img2: the second image being compared 42 | %% 43 | % (3) ch: the precent of upsizing chrominance sautration as to detect saturation changes better 44 | % (4) sqrt_3: whether to use normilize by square root of 3 45 | % (5) K: constants in the SSIM index formula (see the above 46 | % reference). defualt value: K = [0.01 0.03] 47 | % (6) window: local window for statistics (see the above 48 | % reference). default widnow is Gaussian given by 49 | % window = fspecial('gaussian', 11, 1.5); 50 | % (7) L: dynamic range of the images. default: L = 255 51 | % 52 | %Output: (1) mqssim: the mean QSSIM index value between 2 images. 53 | % If one of the images being compared is regarded as 54 | % perfect quality, then mqssim can be considered as the 55 | % quality measure of the other image. 56 | % If img1 = img2, then mqssim = 1. 57 | % (2) qqssim_map: the QSSIM index map of the test image. The map 58 | % has a smaller size than the input images. The actual size: 59 | % size(img1) - size(window) + 1. 60 | % 61 | %Default Color image Usage: 62 | % Given 2 test images img1 and img2, whose dynamic range is 0-255 63 | % 64 | % [mqssim qqssim_map] = qssim_index(img1, img2); 65 | % 66 | %Default gray scale image Usage: 67 | % Given 2 test images img1 and img2, whose dynamic range is 0-255, and 68 | % are gray scale (m,n,1) images 69 | %if sqrt_3=1 than use: 70 | % img1_RGB(:,:,3)=img1./sqrt(3); 71 | % img2_RGB(:,:,3)=img2./sqrt(3); 72 | %else 73 | % img1_RGB(:,:,3)=img1; 74 | % img2_RGB(:,:,3)=img2; 75 | % img1_RGB(:,:,2)=img1_RGB(:,:,3); 76 | % img1_RGB(:,:,1)=img1_RGB(:,:,3); 77 | % img2_RGB(:,:,2)=img2_RGB(:,:,3); 78 | % img2_RGB(:,:,1)=img2_RGB(:,:,3); 79 | % 80 | % [mqssim qqssim_map] = qssim_index(img1_RGB, img2_RGB); 81 | % 82 | %Advanced Usage: 83 | % User defined parameters. For example 84 | % 85 | % K = [0.05 0.05]; 86 | % window = ones(8); 87 | % L = 100; 88 | % [mqssim qqssim_map] = qssim_index(img1, img2, K, window, L); 89 | % 90 | %See the results: 91 | % 92 | % mqssim %Gives the mqssim value 93 | % imshow(max(0, qqssim_map).^4) %Shows the QSSIM index map 94 | % 95 | %======================================================================== 96 | 97 | %% making sure the input is in double 98 | img1=double(img1)./256; 99 | 100 | img2=double(img2)./256; 101 | 102 | %% *******************checking function input settings**************** 103 | if (nargin < 2 || nargin > 7) 104 | mqssim = -Inf; 105 | qssim_map = -Inf; 106 | display('parameters are not adaquate'); 107 | return; 108 | end 109 | 110 | if (nargin == 2) 111 | ch=1; 112 | sqrt_3=1; 113 | window = fspecial('gaussian', 11, 1.5); % creating 11x11 gaussian window 114 | K(1) = 0.01; % default settings 115 | K(2) = 0.03; % 116 | L = 1; % 117 | end 118 | 119 | if (size(img1) ~= size(img2)) 120 | mqssim = -Inf; 121 | qssim_map = -Inf; 122 | display('images are different in size'); 123 | return; 124 | end 125 | 126 | [M N] = size(img1(:,:,1)); 127 | 128 | if (nargin == 4) 129 | if ((M < 11) || (N < 11)) 130 | mqssim = -Inf; 131 | qssim_map = -Inf; 132 | return 133 | end 134 | window = fspecial('gaussian', 11, 1.5); % creating 11x11 gaussian window 135 | K(1) = 0.01; % default settings 136 | K(2) = 0.03; % 137 | L = 1; % 138 | end 139 | 140 | if (nargin == 5) 141 | if ((M < 11) || (N < 11)) 142 | mqssim = -Inf; 143 | qssim_map = -Inf; 144 | return 145 | end 146 | window = fspecial('gaussian', 11, 1.5); 147 | L =1; 148 | if (length(K) == 2) 149 | if (K(1) < 0 || K(2) < 0) 150 | mqssim = -Inf; 151 | qssim_map = -Inf; 152 | return; 153 | end 154 | else 155 | mqssim = -Inf; 156 | qssim_map = -Inf; 157 | return; 158 | end 159 | end 160 | 161 | if (nargin == 6) 162 | [H W] = size(window); 163 | if ((H*W) < 4 || (H > M) || (W > N)) 164 | mqssim = -Inf; 165 | qssim_map = -Inf; 166 | return 167 | end 168 | L = 1; 169 | if (length(K) == 2) 170 | if (K(1) < 0 || K(2) < 0) 171 | mqssim = -Inf; 172 | qssim_map = -Inf; 173 | return; 174 | end 175 | else 176 | mqssim = -Inf; 177 | qssim_map = -Inf; 178 | return; 179 | end 180 | end 181 | 182 | if (nargin == 7) 183 | [H W] = size(window); 184 | if ((H*W) < 4 || (H > M) || (W > N)) 185 | mqssim = -Inf; 186 | qssim_map = -Inf; 187 | return 188 | end 189 | if (length(K) == 2) 190 | if (K(1) < 0 || K(2) < 0) 191 | mqssim = -Inf; 192 | qssim_map = -Inf; 193 | return; 194 | end 195 | else 196 | mqssim = -Inf; 197 | qssim_map = -Inf; 198 | return; 199 | end 200 | end 201 | %% ***********************beginning of the code*********************** 202 | %$$$$$$$$$$$$$$$$$$$$ Dilating and chrominance channel of both $$$$$$$ 203 | %%$$$$$$$$$$$$$$$$$$$$$$$$$$$ images by ch $$$$$$$$$$$$$$$$$$$$$$$$$$$$ 204 | img1_L(:,:,3)=img1(:,:,1)/3+img1(:,:,2)/3+img1(:,:,3)/3; 205 | img1_L(:,:,2)=img1_L(:,:,3); 206 | img1_L(:,:,1)=img1_L(:,:,3); 207 | img1_ch=img1-img1_L; 208 | img1=img1_ch.*ch+img1_L; 209 | % img1=img1_ch+img1_L./ch; 210 | 211 | img2_L(:,:,3)=img2(:,:,1)/3+img2(:,:,2)/3+img2(:,:,3)/3; 212 | img2_L(:,:,2)=img2_L(:,:,3); 213 | img2_L(:,:,1)=img2_L(:,:,3); 214 | img2_ch=img2-img2_L; 215 | img2=img2_ch.*ch+img2_L; 216 | % img2=img2_ch+img2_L./ch; 217 | 218 | %$$$$$$$$$$$$$$$$$$$$ Dilating and chrominance channel of both $$$$$$$ 219 | %%$$$$$$$$$$$$$$$$$$$$$$$$$$$ images by ch $$$$$$$$$$$$$$$$$$$$$$$$$$$$ 220 | 221 | % automatic downsampling 222 | f = max(1,round(min(M,N)/256)); 223 | %downsampling by f 224 | %use a simple low-pass filter 225 | if(f>1) 226 | lpf = ones(f,f); 227 | lpf = (1./(f*f))*lpf; 228 | img1 = imfilter(img1,lpf,'symmetric','same'); 229 | img2 = imfilter(img2,lpf,'symmetric','same'); 230 | 231 | img1 = img1(1:f:end,1:f:end,:); 232 | img2 = img2(1:f:end,1:f:end,:); 233 | end 234 | 235 | C1 = (K(1)*L)^2; 236 | C2 = (K(2)*L)^2; 237 | C1=quaternion(C1,0,0,0); 238 | C2=quaternion(C2,0,0,0); 239 | window = window/sum(sum(window)); %normalize to sum 1 240 | img1_Q=img_to_Q(img1,sqrt_3); 241 | img2_Q=img_to_Q(img2,sqrt_3); 242 | 243 | mu1 = filter2_RGB(img1,window);%gaussian average of all the colors in image 1 244 | mu2 = filter2_RGB(img2,window);%gaussian average of all the colors in image 2 245 | 246 | mu1_Q = img_to_Q(mu1,sqrt_3);%convert F image to quaternion 247 | mu2_Q = img_to_Q(mu2,sqrt_3);%convert G image to quaternion 248 | 249 | mu1_sq_Q=mu1_Q.*conj(mu1_Q);%compute mu1_squared in quaternions 250 | mu2_sq_Q=mu2_Q.*conj(mu2_Q);%compute mu2_squared in quaternions 251 | mu1_mu2_Q=mu1_Q.*conj(mu2_Q);%compute the correlation between mu1 and mu2 in quaternions 252 | 253 | 254 | img1_hue_sq_Q=img1_Q.*conj(img1_Q);%compute img1_hue_squared in quaternions 255 | img2_hue_sq_Q=img2_Q.*conj(img2_Q);%compute img2_hue_squared in quaternions 256 | img1_img2_hue_Q=img1_Q.*conj(img2_Q);%compute the correlation between img1_hue and img2_hue. 257 | 258 | 259 | sigma1_sq_Q = filter2_Q(img1_hue_sq_Q,window)-mu1_sq_Q;%average the hue squared with a gaussian 260 | sigma2_sq_Q = filter2_Q(img2_hue_sq_Q,window)-mu2_sq_Q ;%average the hue squared with a gaussian 261 | sigma12_Q = filter2_Q(img1_img2_hue_Q,window)-mu1_mu2_Q ;%average the hue squared with a gaussian 262 | 263 | if (abs(C1) > 0 && abs(C2) > 0) 264 | qssim_map_Q = ((2*mu1_mu2_Q + C1).*(2*conj(sigma12_Q) + C2))./((mu1_sq_Q + mu2_sq_Q + C1).*(conj(sigma1_sq_Q + sigma2_sq_Q) + C2)); 265 | qssim_map = abs(qssim_map_Q); 266 | else 267 | numerator1 = 2*mu1_mu2 + C1; 268 | numerator2 = 2*sigma12 + C2; 269 | denominator1 = mu1_sq + mu2_sq + C1; 270 | denominator2 = sigma1_sq + sigma2_sq + C2; 271 | qssim_map = ones(size(mu1)); 272 | index = (denominator1.*denominator2 > 0); 273 | qssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index)); 274 | index = (denominator1 ~= 0) & (denominator2 == 0); 275 | qssim_map(index) = numerator1(index)./denominator1(index); 276 | end 277 | mqssim = mean2(qssim_map); 278 | end 279 | function f_img=filter2_RGB(img,window) 280 | %this function performs a gaussian average on an RGB image 281 | f_img(:,:,1) = filter2(window, img(:,:,1), 'valid');%gaussian average of red color in image 1 282 | f_img(:,:,2) = filter2(window, img(:,:,2), 'valid');%gaussian average of green color in image 1 283 | f_img(:,:,3) = filter2(window, img(:,:,3), 'valid');%gaussian average of blue color in image 1 284 | end 285 | 286 | function result=filter2_Q(f_Q,window) 287 | % this function does a regular filter2 on every part of the quaternion 288 | % matrix seperatly 289 | temp(:,:,1) = filter2(window, s(f_Q), 'valid');%gaussian average of red color in image 1 290 | temp(:,:,2) = filter2(window, x(f_Q), 'valid');%gaussian average of green color in image 1 291 | temp(:,:,3) = filter2(window, y(f_Q), 'valid');%gaussian average of blue color in image 1 292 | temp(:,:,4) = filter2(window, z(f_Q), 'valid');%gaussian average of blue color in image 1 293 | result = convert(quaternion(temp(:,:,1), ... 294 | temp(:,:,2), ... 295 | temp(:,:,3),... 296 | temp(:,:,4)), 'double'); 297 | 298 | end 299 | 300 | function img_Q=img_to_Q(img,sqrt_3) 301 | % this function convert RGB image to quaternion space 302 | %it converts the image to quaternion and normalizes it to 1 303 | if sqrt_3==1 304 | img_Q = convert(quaternion(img(:,:,1), ... 305 | img(:,:,2), ... 306 | img(:,:,3)), 'double') ./sqrt(3); 307 | else 308 | img_Q = convert(quaternion(img(:,:,1), ... 309 | img(:,:,2), ... 310 | img(:,:,3)), 'double') ; 311 | end 312 | end -------------------------------------------------------------------------------- /rnd_smp_patch.m: -------------------------------------------------------------------------------- 1 | function [Xh, Xl] = rnd_smp_patch(img_path, type, patch_size, num_patch, upscale) 2 | 3 | img_dir = dir(fullfile(img_path, type)); 4 | 5 | Xh = []; 6 | Xl = []; 7 | 8 | img_num = length(img_dir); 9 | nper_img = zeros(1, img_num); 10 | 11 | for ii = 1:length(img_dir), 12 | im = imread(fullfile(img_path, img_dir(ii).name)); 13 | nper_img(ii) = prod(size(im)); 14 | end 15 | 16 | nper_img = floor(nper_img*num_patch/sum(nper_img)); 17 | 18 | for ii = 1:img_num, 19 | patch_num = nper_img(ii); 20 | im = imread(fullfile(img_path, img_dir(ii).name)); 21 | [H, L] = sample_patches(im, patch_size, patch_num, upscale); 22 | Xh = [Xh, H]; 23 | Xl = [Xl, L]; 24 | end 25 | 26 | patch_path = ['Training/rnd_patches_' num2str(patch_size) '_' num2str(num_patch) '_s' num2str(upscale) '.mat']; 27 | save(patch_path, 'Xh', 'Xl'); -------------------------------------------------------------------------------- /sample_patches.m: -------------------------------------------------------------------------------- 1 | function [HP, LP] = sample_patches(im, patch_size, patch_num, upscale) 2 | 3 | if size(im, 3) == 3, 4 | hIm = rgb2gray(im); 5 | else 6 | hIm = im; 7 | end 8 | 9 | % generate low resolution counter parts 10 | lIm = imresize(hIm, 1/upscale, 'bicubic'); 11 | lIm = imresize(lIm, size(hIm), 'bicubic'); 12 | [nrow, ncol] = size(hIm); 13 | 14 | x = randperm(nrow-2*patch_size-1) + patch_size; 15 | y = randperm(ncol-2*patch_size-1) + patch_size; 16 | 17 | [X,Y] = meshgrid(x,y); 18 | 19 | xrow = X(:); 20 | ycol = Y(:); 21 | 22 | if patch_num < length(xrow), 23 | xrow = xrow(1:patch_num); 24 | ycol = ycol(1:patch_num); 25 | end 26 | 27 | patch_num = length(xrow); 28 | 29 | hIm = double(hIm); 30 | lIm = double(lIm); 31 | 32 | H = zeros(patch_size^2, length(xrow)); 33 | L = zeros(4*patch_size^2, length(xrow)); 34 | 35 | % compute the first and second order gradients 36 | hf1 = [-1,0,1]; 37 | vf1 = [-1,0,1]'; 38 | 39 | lImG11 = conv2(lIm, hf1,'same'); 40 | lImG12 = conv2(lIm, vf1,'same'); 41 | 42 | hf2 = [1,0,-2,0,1]; 43 | vf2 = [1,0,-2,0,1]'; 44 | 45 | lImG21 = conv2(lIm,hf2,'same'); 46 | lImG22 = conv2(lIm,vf2,'same'); 47 | 48 | for ii = 1:patch_num, 49 | row = xrow(ii); 50 | col = ycol(ii); 51 | 52 | Hpatch = hIm(row:row+patch_size-1,col:col+patch_size-1); 53 | 54 | Lpatch1 = lImG11(row:row+patch_size-1,col:col+patch_size-1); 55 | Lpatch2 = lImG12(row:row+patch_size-1,col:col+patch_size-1); 56 | Lpatch3 = lImG21(row:row+patch_size-1,col:col+patch_size-1); 57 | Lpatch4 = lImG22(row:row+patch_size-1,col:col+patch_size-1); 58 | 59 | Lpatch = [Lpatch1(:),Lpatch2(:),Lpatch3(:),Lpatch4(:)]; 60 | Lpatch = Lpatch(:); 61 | 62 | HP(:,ii) = Hpatch(:)-mean(Hpatch(:)); 63 | LP(:,ii) = Lpatch; 64 | end 65 | -------------------------------------------------------------------------------- /ssim_index.m: -------------------------------------------------------------------------------- 1 | function [mssim, ssim_map] = ssim_index(img1, img2, K, window, L) 2 | 3 | %======================================================================== 4 | %SSIM Index, Version 1.0 5 | %Copyright(c) 2003 Zhou Wang 6 | %All Rights Reserved. 7 | % 8 | %The author is with Howard Hughes Medical Institute, and Laboratory 9 | %for Computational Vision at Center for Neural Science and Courant 10 | %Institute of Mathematical Sciences, New York University. 11 | % 12 | %---------------------------------------------------------------------- 13 | %Permission to use, copy, or modify this software and its documentation 14 | %for educational and research purposes only and without fee is hereby 15 | %granted, provided that this copyright notice and the original authors' 16 | %names appear on all copies and supporting documentation. This program 17 | %shall not be used, rewritten, or adapted as the basis of a commercial 18 | %software or hardware product without first obtaining permission of the 19 | %authors. The authors make no representations about the suitability of 20 | %this software for any purpose. It is provided "as is" without express 21 | %or implied warranty. 22 | %---------------------------------------------------------------------- 23 | % 24 | %This is an implementation of the algorithm for calculating the 25 | %Structural SIMilarity (SSIM) index between two images. Please refer 26 | %to the following paper: 27 | % 28 | %Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image 29 | %quality assessment: From error measurement to structural similarity" 30 | %IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004. 31 | % 32 | %Kindly report any suggestions or corrections to zhouwang@ieee.org 33 | % 34 | %---------------------------------------------------------------------- 35 | % 36 | %Input : (1) img1: the first image being compared 37 | % (2) img2: the second image being compared 38 | % (3) K: constants in the SSIM index formula (see the above 39 | % reference). defualt value: K = [0.01 0.03] 40 | % (4) window: local window for statistics (see the above 41 | % reference). default widnow is Gaussian given by 42 | % window = fspecial('gaussian', 11, 1.5); 43 | % (5) L: dynamic range of the images. default: L = 255 44 | % 45 | %Output: (1) mssim: the mean SSIM index value between 2 images. 46 | % If one of the images being compared is regarded as 47 | % perfect quality, then mssim can be considered as the 48 | % quality measure of the other image. 49 | % If img1 = img2, then mssim = 1. 50 | % (2) ssim_map: the SSIM index map of the test image. The map 51 | % has a smaller size than the input images. The actual size: 52 | % size(img1) - size(window) + 1. 53 | % 54 | %Default Usage: 55 | % Given 2 test images img1 and img2, whose dynamic range is 0-255 56 | % 57 | % [mssim ssim_map] = ssim_index(img1, img2); 58 | % 59 | %Advanced Usage: 60 | % User defined parameters. For example 61 | % 62 | % K = [0.05 0.05]; 63 | % window = ones(8); 64 | % L = 100; 65 | % [mssim ssim_map] = ssim_index(img1, img2, K, window, L); 66 | % 67 | %See the results: 68 | % 69 | % mssim %Gives the mssim value 70 | % imshow(max(0, ssim_map).^4) %Shows the SSIM index map 71 | % 72 | %======================================================================== 73 | 74 | 75 | if (nargin < 2 || nargin > 5) 76 | ssim_index = -Inf; 77 | ssim_map = -Inf; 78 | return; 79 | end 80 | 81 | if (size(img1) ~= size(img2)) 82 | ssim_index = -Inf; 83 | ssim_map = -Inf; 84 | return; 85 | end 86 | 87 | [M N] = size(img1); 88 | 89 | if (nargin == 2) 90 | if ((M < 11) || (N < 11)) 91 | ssim_index = -Inf; 92 | ssim_map = -Inf; 93 | return 94 | end 95 | window = fspecial('gaussian', 11, 1.5); % 96 | K(1) = 0.01; % default settings 97 | K(2) = 0.03; % 98 | L = 255; % 99 | end 100 | 101 | if (nargin == 3) 102 | if ((M < 11) || (N < 11)) 103 | ssim_index = -Inf; 104 | ssim_map = -Inf; 105 | return 106 | end 107 | window = fspecial('gaussian', 11, 1.5); 108 | L = 255; 109 | if (length(K) == 2) 110 | if (K(1) < 0 || K(2) < 0) 111 | ssim_index = -Inf; 112 | ssim_map = -Inf; 113 | return; 114 | end 115 | else 116 | ssim_index = -Inf; 117 | ssim_map = -Inf; 118 | return; 119 | end 120 | end 121 | 122 | if (nargin == 4) 123 | [H W] = size(window); 124 | if ((H*W) < 4 || (H > M) || (W > N)) 125 | ssim_index = -Inf; 126 | ssim_map = -Inf; 127 | return 128 | end 129 | L = 255; 130 | if (length(K) == 2) 131 | if (K(1) < 0 || K(2) < 0) 132 | ssim_index = -Inf; 133 | ssim_map = -Inf; 134 | return; 135 | end 136 | else 137 | ssim_index = -Inf; 138 | ssim_map = -Inf; 139 | return; 140 | end 141 | end 142 | 143 | if (nargin == 5) 144 | [H W] = size(window); 145 | if ((H*W) < 4 || (H > M) || (W > N)) 146 | ssim_index = -Inf; 147 | ssim_map = -Inf; 148 | return 149 | end 150 | if (length(K) == 2) 151 | if (K(1) < 0 || K(2) < 0) 152 | ssim_index = -Inf; 153 | ssim_map = -Inf; 154 | return; 155 | end 156 | else 157 | ssim_index = -Inf; 158 | ssim_map = -Inf; 159 | return; 160 | end 161 | end 162 | 163 | C1 = (K(1)*L)^2; 164 | C2 = (K(2)*L)^2; 165 | window = window/sum(sum(window)); 166 | img1 = double(img1); 167 | img2 = double(img2); 168 | 169 | mu1 = filter2(window, img1, 'valid'); 170 | mu2 = filter2(window, img2, 'valid'); 171 | mu1_sq = mu1.*mu1; 172 | mu2_sq = mu2.*mu2; 173 | mu1_mu2 = mu1.*mu2; 174 | sigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq; 175 | sigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq; 176 | sigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2; 177 | 178 | if (C1 > 0 & C2 > 0) 179 | ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)); 180 | else 181 | numerator1 = 2*mu1_mu2 + C1; 182 | numerator2 = 2*sigma12 + C2; 183 | denominator1 = mu1_sq + mu2_sq + C1; 184 | denominator2 = sigma1_sq + sigma2_sq + C2; 185 | ssim_map = ones(size(mu1)); 186 | index = (denominator1.*denominator2 > 0); 187 | ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index)); 188 | index = (denominator1 ~= 0) & (denominator2 == 0); 189 | ssim_map(index) = numerator1(index)./denominator1(index); 190 | end 191 | 192 | mssim = mean2(ssim_map); 193 | 194 | return -------------------------------------------------------------------------------- /train_coupled_dict.m: -------------------------------------------------------------------------------- 1 | function [Dh, Dl] = train_coupled_dict(Xh, Xl, dict_size, lambda, upscale) 2 | 3 | addpath(genpath('RegularizedSC')); 4 | 5 | hDim = size(Xh, 1); 6 | lDim = size(Xl, 1); 7 | 8 | % should pre-normalize Xh and Xl ! 9 | hNorm = sqrt(sum(Xh.^2)); 10 | lNorm = sqrt(sum(Xl.^2)); 11 | Idx = find( hNorm & lNorm ); 12 | 13 | Xh = Xh(:, Idx); 14 | Xl = Xl(:, Idx); 15 | 16 | Xh = Xh./repmat(sqrt(sum(Xh.^2)), size(Xh, 1), 1); 17 | Xl = Xl./repmat(sqrt(sum(Xl.^2)), size(Xl, 1), 1); 18 | 19 | % joint learning of the dictionary 20 | X = [sqrt(hDim)*Xh; sqrt(lDim)*Xl]; 21 | Xnorm = sqrt(sum(X.^2, 1)); 22 | 23 | clear Xh Xl; 24 | 25 | X = X(:, Xnorm > 1e-5); 26 | X = X./repmat(sqrt(sum(X.^2, 1)), hDim+lDim, 1); 27 | 28 | idx = randperm(size(X, 2)); 29 | 30 | % dictionary training 31 | [D] = reg_sparse_coding(X, dict_size, [], 0, lambda, 40); 32 | 33 | Dh = D(1:hDim, :); 34 | Dl = D(hDim+1:end, :); 35 | 36 | % normalize the dictionary 37 | % Dh = Dh./repmat(sqrt(sum(Dh.^2, 1)), hDim, 1); 38 | % Dl = Dl./repmat(sqrt(sum(Dl.^2, 1)), lDim, 1); 39 | 40 | patch_size = sqrt(size(Dh, 1)); 41 | 42 | dict_path = ['Dictionary/D_' num2str(dict_size) '_' num2str(lambda) '_' num2str(patch_size) '_s' num2str(upscale) '.mat' ]; 43 | save(dict_path, 'Dh', 'Dl'); --------------------------------------------------------------------------------