├── PSF_generation ├── PSF_cell │ └── readme.md ├── PSF_data │ ├── readme.md │ └── PSF_info_fld_000 │ │ ├── readme.md │ │ └── PSF_info.xlsx ├── PSF_info │ ├── readme.md │ └── PSF_info_fld_000 │ │ ├── readme.md │ │ └── wav_040.mat ├── wav_response │ ├── readme.md │ ├── wav_dist_cell_avr_itvl_380_10nm_780.mat │ └── wav_dist_cell_spl_itvl_380_10nm_780.mat ├── illumination_info │ ├── readme.md │ └── illumination_info.xlsx ├── PSF.Q ├── PSF_data_transfer.m ├── load_wave_response.m ├── load_wave_response_illuminate.m ├── ray_tracing │ ├── difftrace │ │ ├── __init__.py │ │ ├── zernike.py │ │ └── conjugates.py │ ├── lens_file │ │ ├── surf.txt │ │ ├── hybrid.txt │ │ ├── cmos.txt │ │ ├── doubleGauss.txt │ │ └── doubleGauss.json │ ├── optimize.ipynb │ └── test_analysis.py ├── compute_field_info.m ├── readme.md ├── interp_relative_illumination.m ├── compute_delta_angle.m ├── compute_h_w_delta.m ├── pad_PSF.m ├── judge_main_wav.m └── PSF_coherent_superposition.m ├── fov_deformable_net ├── dcn │ ├── DCN.egg-info │ │ ├── dependency_links.txt │ │ ├── top_level.txt │ │ ├── PKG-INFO │ │ └── SOURCES.txt │ ├── make.sh │ ├── dist │ │ └── DCN-1.0-py3.7-linux-x86_64.egg │ ├── modules │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── deform_conv.cpython-37.pyc │ │ │ ├── deform_psroi_pooling.cpython-37.pyc │ │ │ └── modulated_deform_conv.cpython-37.pyc │ │ ├── __init__.py │ │ ├── deform_conv.py │ │ ├── deform_psroi_pooling.py │ │ └── modulated_deform_conv.py │ ├── build │ │ ├── lib.linux-x86_64-3.7 │ │ │ ├── DCN.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── deform_conv_func.py │ │ │ │ ├── modulated_deform_conv_func.py │ │ │ │ └── deform_psroi_pooling_func.py │ │ │ └── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── deform_conv.py │ │ │ │ ├── modulated_deform_conv.py │ │ │ │ └── deform_psroi_pooling.py │ │ └── temp.linux-x86_64-3.7 │ │ │ └── hdd8T_3 │ │ │ └── chensq │ │ │ └── dyconv │ │ │ └── dcn │ │ │ └── src │ │ │ ├── vision.o │ │ │ ├── cpu │ │ │ ├── deform_cpu.o │ │ │ ├── modulated_deform_cpu.o │ │ │ └── deform_psroi_pooling_cpu.o │ │ │ └── cuda │ │ │ ├── deform_conv_cuda.o │ │ │ ├── deform_psroi_pooling_cuda.o │ │ │ └── modulated_deform_conv_cuda.o │ ├── functions │ │ ├── __init__.py │ │ ├── deform_conv_func.py │ │ ├── modulated_deform_conv_func.py │ │ └── deform_psroi_pooling_func.py │ ├── src │ │ ├── vision.cpp │ │ ├── cpu │ │ │ ├── deform_conv_cpu.h │ │ │ ├── deform_psroi_pooling_cpu.h │ │ │ ├── modulated_deform_conv_cpu.h │ │ │ ├── deform_cpu.cpp │ │ │ ├── deform_psroi_pooling_cpu.cpp │ │ │ └── modulated_deform_cpu.cpp │ │ ├── cuda │ │ │ ├── deform_conv_cuda.h │ │ │ ├── deform_psroi_pooling_cuda.h │ │ │ └── modulated_deform_conv_cuda.h │ │ ├── deform_conv.h │ │ ├── modulated_deform_conv.h │ │ └── deform_psroi_pooling.h │ ├── setup.py │ └── README.md ├── requirements.txt ├── readme.md ├── dataloader.py ├── utils.py ├── option │ └── option.py ├── train.py ├── dataset_generator.py ├── test.py ├── test_real.py ├── loss.py └── deformable_unet.py ├── imaging_simulation ├── image_plane_image │ └── readme.md ├── readme.md ├── object_plane_image │ └── readme.md ├── apply_wb.m ├── apply_ccm.m ├── mosaicing.m ├── patch_conv.m └── imaging_simulation.m └── README.md /PSF_generation/PSF_cell/readme.md: -------------------------------------------------------------------------------- 1 | PSF cell in this folder 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF_data/readme.md: -------------------------------------------------------------------------------- 1 | PSF data in this folder 2 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/DCN.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF_info/readme.md: -------------------------------------------------------------------------------- 1 | PSF information in this folder 2 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/make.sh: -------------------------------------------------------------------------------- 1 | python setup.py build install 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF_data/PSF_info_fld_000/readme.md: -------------------------------------------------------------------------------- 1 | sample PSF data 2 | -------------------------------------------------------------------------------- /imaging_simulation/image_plane_image/readme.md: -------------------------------------------------------------------------------- 1 | # imaging result image 2 | -------------------------------------------------------------------------------- /imaging_simulation/readme.md: -------------------------------------------------------------------------------- 1 | # Imaging Simulation with Optical PSFs 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF_info/PSF_info_fld_000/readme.md: -------------------------------------------------------------------------------- 1 | sample PSF information 2 | -------------------------------------------------------------------------------- /PSF_generation/wav_response/readme.md: -------------------------------------------------------------------------------- 1 | wave distribution response of the sensor 2 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/DCN.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | DCN 2 | functions 3 | modules 4 | -------------------------------------------------------------------------------- /PSF_generation/illumination_info/readme.md: -------------------------------------------------------------------------------- 1 | illumination information in this folder 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF.Q: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/PSF.Q -------------------------------------------------------------------------------- /imaging_simulation/object_plane_image/readme.md: -------------------------------------------------------------------------------- 1 | # object plane image, which is the input image. 2 | -------------------------------------------------------------------------------- /PSF_generation/PSF_data_transfer.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/PSF_data_transfer.m -------------------------------------------------------------------------------- /PSF_generation/load_wave_response.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/load_wave_response.m -------------------------------------------------------------------------------- /PSF_generation/load_wave_response_illuminate.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/load_wave_response_illuminate.m -------------------------------------------------------------------------------- /PSF_generation/PSF_info/PSF_info_fld_000/wav_040.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/PSF_info/PSF_info_fld_000/wav_040.mat -------------------------------------------------------------------------------- /PSF_generation/PSF_data/PSF_info_fld_000/PSF_info.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/PSF_data/PSF_info_fld_000/PSF_info.xlsx -------------------------------------------------------------------------------- /PSF_generation/illumination_info/illumination_info.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/illumination_info/illumination_info.xlsx -------------------------------------------------------------------------------- /fov_deformable_net/dcn/dist/DCN-1.0-py3.7-linux-x86_64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/dist/DCN-1.0-py3.7-linux-x86_64.egg -------------------------------------------------------------------------------- /PSF_generation/wav_response/wav_dist_cell_avr_itvl_380_10nm_780.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/wav_response/wav_dist_cell_avr_itvl_380_10nm_780.mat -------------------------------------------------------------------------------- /PSF_generation/wav_response/wav_dist_cell_spl_itvl_380_10nm_780.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/PSF_generation/wav_response/wav_dist_cell_spl_itvl_380_10nm_780.mat -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/modules/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/__pycache__/deform_conv.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/modules/__pycache__/deform_conv.cpython-37.pyc -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/difftrace/__init__.py: -------------------------------------------------------------------------------- 1 | from .surfaces import * 2 | from .optics import * 3 | from .utils import * 4 | from .zernike import * 5 | from .analysis import * 6 | from .optimize import * -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/__pycache__/deform_psroi_pooling.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/modules/__pycache__/deform_psroi_pooling.cpython-37.pyc -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/__pycache__/modulated_deform_conv.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/modules/__pycache__/modulated_deform_conv.cpython-37.pyc -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/DCN.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/DCN.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /fov_deformable_net/dcn/functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv_func import DeformConvFunction 2 | from .modulated_deform_conv_func import ModulatedDeformConvFunction 3 | from .deform_psroi_pooling_func import DeformRoIPoolingFunction -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/vision.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/vision.o -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_cpu.o -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv_func import DeformConvFunction 2 | from .modulated_deform_conv_func import ModulatedDeformConvFunction 3 | from .deform_psroi_pooling_func import DeformRoIPoolingFunction -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_conv_cuda.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_conv_cuda.o -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/modulated_deform_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/modulated_deform_cpu.o -------------------------------------------------------------------------------- /fov_deformable_net/requirements.txt: -------------------------------------------------------------------------------- 1 | cached-property==1.5.2 2 | certifi==2021.10.8 3 | h5py==3.6.0 4 | numpy==1.21.5 5 | opencv-python==4.5.5.62 6 | Pillow==6.2.2 7 | scipy==1.7.3 8 | six==1.16.0 9 | tifffile==2021.11.2 10 | torch==1.2.0 11 | torchvision==0.4.0 12 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_psroi_pooling_cpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_psroi_pooling_cpu.o -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_psroi_pooling_cuda.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_psroi_pooling_cuda.o -------------------------------------------------------------------------------- /imaging_simulation/apply_wb.m: -------------------------------------------------------------------------------- 1 | function img_out = apply_wb(img, wb, inverse_wb) 2 | % inverse or add white balance of image 3 | if inverse_wb 4 | wb = 1 ./ wb; 5 | end 6 | img_out = cat(3, img(:, :, 1) .* wb(1), img(:, :, 2) .* wb(2), img(:, :, 3) .* wb(3)); 7 | end 8 | 9 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/modulated_deform_conv_cuda.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TanGeeGo/ImagingSimulation/HEAD/fov_deformable_net/dcn/build/temp.linux-x86_64-3.7/hdd8T_3/chensq/dyconv/dcn/src/cuda/modulated_deform_conv_cuda.o -------------------------------------------------------------------------------- /fov_deformable_net/dcn/DCN.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: DCN 3 | Version: 1.0 4 | Summary: deformable convolutional networks 5 | Home-page: https://github.com/charlesshang/DCNv2 6 | Author: xvjiarui 7 | License: UNKNOWN 8 | Platform: UNKNOWN 9 | 10 | UNKNOWN 11 | 12 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv import DeformConv, _DeformConv, DeformConvPack 2 | from .modulated_deform_conv import ModulatedDeformConv, _ModulatedDeformConv, ModulatedDeformConvPack 3 | from .deform_psroi_pooling import DeformRoIPooling, _DeformRoIPooling, DeformRoIPoolingPack -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv import DeformConv, _DeformConv, DeformConvPack 2 | from .modulated_deform_conv import ModulatedDeformConv, _ModulatedDeformConv, ModulatedDeformConvPack 3 | from .deform_psroi_pooling import DeformRoIPooling, _DeformRoIPooling, DeformRoIPoolingPack -------------------------------------------------------------------------------- /PSF_generation/compute_field_info.m: -------------------------------------------------------------------------------- 1 | function [fld_index,fld_index_int] = compute_field_info(fld_sample_dist,fld_sample_interval) 2 | div = round(fld_sample_dist / fld_sample_interval); 3 | % compute the nearest index 4 | index = div * fld_sample_interval; 5 | index_int = round(index * 100); 6 | % transfer to string 7 | index_int_str = num2str(index_int, '%03d'); 8 | % concate the string 9 | fld_index = strcat('PSF_info_fld_', index_int_str); 10 | fld_index_int = index; 11 | end 12 | 13 | -------------------------------------------------------------------------------- /imaging_simulation/apply_ccm.m: -------------------------------------------------------------------------------- 1 | function img_out = apply_ccm(img, ccm, inverse_ccm) 2 | % inverse the CCM of image 3 | if inverse_ccm 4 | % inverse the color correction matrix 5 | ccm = inv(ccm); 6 | end 7 | img_r = ccm(1, 1) * img(:, :, 1) + ccm(1, 2) * img(:, :, 2) + ccm(1, 3) * img(:, :, 3); 8 | img_g = ccm(2, 1) * img(:, :, 1) + ccm(2, 2) * img(:, :, 2) + ccm(2, 3) * img(:, :, 3); 9 | img_b = ccm(3, 1) * img(:, :, 1) + ccm(3, 2) * img(:, :, 2) + ccm(3, 3) * img(:, :, 3); 10 | 11 | img_out = cat(3, img_r, img_g, img_b); 12 | end 13 | 14 | -------------------------------------------------------------------------------- /PSF_generation/readme.md: -------------------------------------------------------------------------------- 1 | # PSF generation by ray-tracing and coherent superposition 2 | 3 | 1. compute the full field PSF data and save it to the path ./PSF_data/PSF_info_fld_${field_value} 4 | 5 | **the '${} represents an variable of field value'** 6 | 7 | 2. transfer the PSF data in excel to matlab file 8 | ``` 9 | PSF_data_transfer.m 10 | ``` 11 | 12 | 3. complete the coherent superposition of PSFs in different wavelength, and reunion the seperate PSFs according to the wave distribution and lens shading 13 | ``` 14 | PSF_coherent_superposition.m 15 | ``` 16 | -------------------------------------------------------------------------------- /PSF_generation/interp_relative_illumination.m: -------------------------------------------------------------------------------- 1 | function relative_illumination_cell = interp_relative_illumination(illumination_path, wave_num) 2 | % initialize the relative illumination cell 3 | relative_illumination_cell = cell(wave_num, 1); 4 | for relative_wave_index = 1:wave_num 5 | % read out the illumination data 6 | [relative_illumination, ~, ~] = xlsread(illumination_path, relative_wave_index); 7 | % interpt the relative illumination data 8 | relative_illumination_interp = griddedInterpolant(relative_illumination(:, 1), ... 9 | relative_illumination(:, 2), 'pchip'); 10 | % save the handle of interpt 11 | relative_illumination_cell{relative_wave_index} = relative_illumination_interp; 12 | end 13 | end 14 | 15 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/vision.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "deform_psroi_pooling.h" 3 | #include "deform_conv.h" 4 | #include "modulated_deform_conv.h" 5 | 6 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 7 | m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); 8 | m.def("deform_conv_backward", &deform_conv_backward, "deform_conv_backward"); 9 | m.def("modulated_deform_conv_forward", &modulated_deform_conv_forward, "modulated_deform_conv_forward"); 10 | m.def("modulated_deform_conv_backward", &modulated_deform_conv_backward, "modulated_deform_conv_backward"); 11 | m.def("deform_psroi_pooling_forward", &deform_psroi_pooling_forward, "deform_psroi_pooling_forward"); 12 | m.def("deform_psroi_pooling_backward", &deform_psroi_pooling_backward, "deform_psroi_pooling_backward"); 13 | } 14 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/lens_file/surf.txt: -------------------------------------------------------------------------------- 1 | surface zemax 2 | type roc distance material radius conic add—terms 3 | O 0.000000000000000E+000 0.000000000000000E+000 vacuum 0.000000000000000E+000 0.000000000000000E+000 4 | S 2.182945635593339 7.671820430862000E-001 SK2 1.6230 4.793112851938000E-002 -4.691280267917000E-003 7.645527682248000E-003 -9.899006584054000E-003 5.082426021028000E-003 -1.568718371190000E-003 3.078604445320000E-004 -4.601858801156000E-005 5 | S 5.415324616573791E+001 8.746657850000000E+000 SK2 2.933920437194000E+001 0.000000000000000E+000 6 | S 1.525219209401110E+002 5.000000000000000E-001 vacuum 2.933920437194000E+001 0.000000000000000E+000 7 | S 3.595062445048480E+001 1.400000000000000E+001 SK16 2.435458095155000E+001 0.000000000000000E+000 8 | S 0.000000000000000E+000 3.776965890000000E+000 F5 2.135822601177000E+001 0.000000000000000E+000 -------------------------------------------------------------------------------- /fov_deformable_net/dcn/DCN.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | setup.py 3 | /hdd8T_3/chensq/dyconv/dcn/src/vision.cpp 4 | /hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_cpu.cpp 5 | /hdd8T_3/chensq/dyconv/dcn/src/cpu/deform_psroi_pooling_cpu.cpp 6 | /hdd8T_3/chensq/dyconv/dcn/src/cpu/modulated_deform_cpu.cpp 7 | /hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_conv_cuda.cu 8 | /hdd8T_3/chensq/dyconv/dcn/src/cuda/deform_psroi_pooling_cuda.cu 9 | /hdd8T_3/chensq/dyconv/dcn/src/cuda/modulated_deform_conv_cuda.cu 10 | DCN.egg-info/PKG-INFO 11 | DCN.egg-info/SOURCES.txt 12 | DCN.egg-info/dependency_links.txt 13 | DCN.egg-info/top_level.txt 14 | functions/__init__.py 15 | functions/deform_conv_func.py 16 | functions/deform_psroi_pooling_func.py 17 | functions/modulated_deform_conv_func.py 18 | modules/__init__.py 19 | modules/deform_conv.py 20 | modules/deform_psroi_pooling.py 21 | modules/modulated_deform_conv.py -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/lens_file/hybrid.txt: -------------------------------------------------------------------------------- 1 | ours hybrid lens STOP: FACE1 F2 2 | type roc distance material radius add—terms 3 | O 0 0 vacuum 0 4 | S 6.0646318683537 1.46099937598857 1.656807/54.0966 2.17412550771287 5 | S 11.0451497015743 1.20520021707642 vacuum 2.27725870980071 6 | S -7.58489607235603 2. 1.755201/27.5795 2.34697169790166 7 | S 12.4712133387471 1.09382575731602 vacuum 2.90360909131386 8 | S -195.04576312087 2. 1.620410/60.3236 3.40003778830106 9 | S -7.01883645937378 0.0999999999999999 vacuum 3.75891860742661 10 | S 10.2232169936181 1.85519094383849 1.62299/58.2 4.2744755170082 11 | S -37.6592832626424 2.53056424628492 vacuum 4.24786765315633 12 | S 6.14617639448119 2. 1.62041/60.3236 3.70061206783638 13 | S 0. 0.3 vacuum 3.43155525734689 14 | S 0. 1.95395976288026 1.755201/27.5795 3.47590152042135 15 | S 5.47668666587998 3. vacuum 2.61377919675658 16 | I 0 0. vacuum 2.20160034971201 -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/lens_file/cmos.txt: -------------------------------------------------------------------------------- 1 | ours wide angle lens STOP: FACE11 F4 2 | type roc distance material radius add—terms 3 | O 0 0 vacuum 0 4 | S 19.364 1.5 1.62041/60.34 5.73800093693603 5 | S 15.68 0.47 vacuum 4.80582634066469 6 | S 13.092 1. 1.62041/60.34 4.35291989831639 7 | S 4.207 3.465 vacuum 3.1768685247433 8 | S -85 0.8 1.60886/57.90 2.24110941713105 9 | S 3.59 2.719 vacuum 1.88287511882901 10 | S 13.092 1.931 1.7495/34.99 1.73261487588318 11 | S -52.893 1.39 vacuum 1.56331052583034 12 | S 7.379 3 1.62041/60.34 1.27333384567774 13 | S -9.433 0.168 vacuum 0.903355877317607 14 | S 0. 0.173 vacuum 0.877766469333814 15 | S 9.7 1.73 1.62041/60.34 0.862736976780007 16 | S -4.406 0.2 vacuum 0.890007411571933 17 | S -3.323 0.6 1.71736/29.50 0.901184762474856 18 | S 5.058 0.202 vacuum 1.03043682261129 19 | S 6.516 1.652 1.62041/60.34 1.12230224013961 20 | S -5.42 4.60652637048134 vacuum 1.38915226442061 21 | I 0 0. vacuum 2.11816943389221 -------------------------------------------------------------------------------- /PSF_generation/compute_delta_angle.m: -------------------------------------------------------------------------------- 1 | function fld_sample_delta_angle = compute_delta_angle(fld_sample_delta,rotat_index) 2 | switch rotat_index 3 | case 2 4 | fld_sample_delta(2) = -fld_sample_delta(2); 5 | case 3 6 | fld_sample_delta(1) = -fld_sample_delta(1); 7 | case 4 8 | fld_sample_delta(1) = -fld_sample_delta(1); 9 | fld_sample_delta(2) = -fld_sample_delta(2); 10 | end 11 | % compute the rotate angle by delta h 12 | if fld_sample_delta(1) >= 0 13 | % rotate angle in [-90, 90] 14 | angle_tangent = fld_sample_delta(2) / fld_sample_delta(1); 15 | fld_sample_delta_angle = atand(angle_tangent); 16 | elseif fld_sample_delta(1) < 0 17 | % rotate angle in [-180, -90] and [90, 180] 18 | angle_tangent = fld_sample_delta(2) / - fld_sample_delta(1); 19 | if fld_sample_delta(2) >= 0 20 | % rotate angle in [90, 180] 21 | fld_sample_delta_angle = 180 - atand(angle_tangent); 22 | elseif fld_sample_delta(2) < 0 23 | % rotate angle in [-180, -90] 24 | fld_sample_delta_angle = -180 - atand(angle_tangent); 25 | end 26 | end 27 | end 28 | 29 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/lens_file/doubleGauss.txt: -------------------------------------------------------------------------------- 1 | doublegauss zemax 2 | type roc distance material radius conic add—terms 3 | O 0.000000000000000E+000 0.000000000000000E+000 vacuum 0.000000000000000E+000 0.000000000000000E+000 4 | S 5.415324616573791E+001 8.746657850000000E+000 SK2 2.933920437194000E+001 0.000000000000000E+000 5 | S 1.525219209401110E+002 5.000000000000000E-001 vacuum 2.826658482256000E+001 0.000000000000000E+000 6 | S 3.595062445048480E+001 1.400000000000000E+001 SK16 2.435458095155000E+001 0.000000000000000E+000 7 | S 0.000000000000000E+000 3.776965890000000E+000 F5 2.135822601177000E+001 0.000000000000000E+000 8 | S 2.226992461800949E+001 1.425305930000000E+001 vacuum 1.493283368295000E+001 0.000000000000000E+000 9 | A 0.000000000000000E+000 1.242812910000000E+001 vacuum 1.000000000000000E+001 0.000000000000000E+000 10 | S -2.568503303046087E+001 3.776965890000000E+000 F5 1.330133689386000E+001 0.000000000000000E+000 11 | S 0 1.083392850000000E+001 SK16 1.662503180280000E+001 0.000000000000000E+000 12 | S -3.698022072863240E+001 5.000000000000000E-001 vacuum 1.906148230444000E+001 0.000000000000000E+000 13 | S 1.964173340965081E+002 6.858174910000000E+000 SK16 2.147382942899000E+001 0.000000000000000E+000 14 | S -6.714755002373626E+001 5.731453790500000E+001 vacuum 2.179718377169000E+001 0.000000000000000E+000 15 | I 0.000000000000000E+000 0.000000000000000E+000 vacuum 2.456914634886920E+001 0.000000000000000E+000 -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/deform_conv_cpu.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor 5 | deform_conv_cpu_forward(const at::Tensor &input, 6 | const at::Tensor &weight, 7 | const at::Tensor &bias, 8 | const at::Tensor &offset, 9 | const int kernel_h, 10 | const int kernel_w, 11 | const int stride_h, 12 | const int stride_w, 13 | const int pad_h, 14 | const int pad_w, 15 | const int dilation_h, 16 | const int dilation_w, 17 | const int group, 18 | const int deformable_group, 19 | const int im2col_step); 20 | 21 | std::vector 22 | deform_conv_cpu_backward(const at::Tensor &input, 23 | const at::Tensor &weight, 24 | const at::Tensor &bias, 25 | const at::Tensor &offset, 26 | const at::Tensor &grad_output, 27 | const int kernel_h, 28 | const int kernel_w, 29 | const int stride_h, 30 | const int stride_w, 31 | const int pad_h, 32 | const int pad_w, 33 | const int dilation_h, 34 | const int dilation_w, 35 | const int group, 36 | const int deformable_group, 37 | const int im2col_step); 38 | 39 | 40 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cuda/deform_conv_cuda.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor 5 | deform_conv_cuda_forward(const at::Tensor &input, 6 | const at::Tensor &weight, 7 | const at::Tensor &bias, 8 | const at::Tensor &offset, 9 | const int kernel_h, 10 | const int kernel_w, 11 | const int stride_h, 12 | const int stride_w, 13 | const int pad_h, 14 | const int pad_w, 15 | const int dilation_h, 16 | const int dilation_w, 17 | const int group, 18 | const int deformable_group, 19 | const int im2col_step); 20 | 21 | std::vector 22 | deform_conv_cuda_backward(const at::Tensor &input, 23 | const at::Tensor &weight, 24 | const at::Tensor &bias, 25 | const at::Tensor &offset, 26 | const at::Tensor &grad_output, 27 | const int kernel_h, 28 | const int kernel_w, 29 | const int stride_h, 30 | const int stride_w, 31 | const int pad_h, 32 | const int pad_w, 33 | const int dilation_h, 34 | const int dilation_w, 35 | const int group, 36 | const int deformable_group, 37 | const int im2col_step); 38 | 39 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/deform_psroi_pooling_cpu.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | 5 | std::tuple 6 | deform_psroi_pooling_cpu_forward(const at::Tensor &input, 7 | const at::Tensor &bbox, 8 | const at::Tensor &trans, 9 | const int no_trans, 10 | const float spatial_scale, 11 | const int output_dim, 12 | const int group_size, 13 | const int pooled_size, 14 | const int part_size, 15 | const int sample_per_part, 16 | const float trans_std); 17 | 18 | std::tuple 19 | deform_psroi_pooling_cpu_backward(const at::Tensor &out_grad, 20 | const at::Tensor &input, 21 | const at::Tensor &bbox, 22 | const at::Tensor &trans, 23 | const at::Tensor &top_count, 24 | const int no_trans, 25 | const float spatial_scale, 26 | const int output_dim, 27 | const int group_size, 28 | const int pooled_size, 29 | const int part_size, 30 | const int sample_per_part, 31 | const float trans_std); -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cuda/deform_psroi_pooling_cuda.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | std::tuple 5 | deform_psroi_pooling_cuda_forward(const at::Tensor &input, 6 | const at::Tensor &bbox, 7 | const at::Tensor &trans, 8 | const int no_trans, 9 | const float spatial_scale, 10 | const int output_dim, 11 | const int group_size, 12 | const int pooled_size, 13 | const int part_size, 14 | const int sample_per_part, 15 | const float trans_std); 16 | 17 | std::tuple 18 | deform_psroi_pooling_cuda_backward(const at::Tensor &out_grad, 19 | const at::Tensor &input, 20 | const at::Tensor &bbox, 21 | const at::Tensor &trans, 22 | const at::Tensor &top_count, 23 | const int no_trans, 24 | const float spatial_scale, 25 | const int output_dim, 26 | const int group_size, 27 | const int pooled_size, 28 | const int part_size, 29 | const int sample_per_part, 30 | const float trans_std); -------------------------------------------------------------------------------- /PSF_generation/compute_h_w_delta.m: -------------------------------------------------------------------------------- 1 | function PSF_center_delta_pixel = compute_h_w_delta(PSF_center_delta_mm,fld_sample_delta_angle,pixel_length) 2 | if (-180 < fld_sample_delta_angle)&&(fld_sample_delta_angle <= -90) 3 | PSF_center_delta_mm_h = PSF_center_delta_mm * ... 4 | cosd(fld_sample_delta_angle + 180); 5 | PSF_center_delta_mm_w = PSF_center_delta_mm * ... 6 | sind(fld_sample_delta_angle + 180); 7 | elseif (-90 < fld_sample_delta_angle)&&(fld_sample_delta_angle <= 0) 8 | PSF_center_delta_mm_h = -PSF_center_delta_mm * ... 9 | sind(fld_sample_delta_angle + 90); 10 | PSF_center_delta_mm_w = PSF_center_delta_mm * ... 11 | cosd(fld_sample_delta_angle + 90); 12 | elseif (0 < fld_sample_delta_angle)&&(fld_sample_delta_angle <= 90) 13 | PSF_center_delta_mm_h = -PSF_center_delta_mm * ... 14 | sind(90 - fld_sample_delta_angle); 15 | PSF_center_delta_mm_w = -PSF_center_delta_mm * ... 16 | cosd(90 - fld_sample_delta_angle); 17 | elseif (90 < fld_sample_delta_angle)&&(fld_sample_delta_angle <= 180) 18 | PSF_center_delta_mm_h = PSF_center_delta_mm * ... 19 | cosd(180 - fld_sample_delta_angle); 20 | PSF_center_delta_mm_w = -PSF_center_delta_mm * ... 21 | sind(180 - fld_sample_delta_angle); 22 | end 23 | PSF_center_delta_pixel_h = (PSF_center_delta_mm_h * 1000) / pixel_length; 24 | PSF_center_delta_pixel_w = (PSF_center_delta_mm_w * 1000) / pixel_length; 25 | PSF_center_delta_pixel = [PSF_center_delta_pixel_h, PSF_center_delta_pixel_w]; 26 | end 27 | 28 | -------------------------------------------------------------------------------- /imaging_simulation/mosaicing.m: -------------------------------------------------------------------------------- 1 | function img_mosaiced = mosaicing(img_aberration, bayer_pattern) 2 | [H, W, ~] = size(img_aberration); 3 | img_mosaiced = zeros(H, W); 4 | bayer_pattern = upper(bayer_pattern); 5 | if strcmp(bayer_pattern, 'BGGR') 6 | img_mosaiced(1:2:end, 1:2:end) = img_aberration(1:2:end, 1:2:end, 3); 7 | img_mosaiced(2:2:end, 1:2:end) = img_aberration(2:2:end, 1:2:end, 2); 8 | img_mosaiced(1:2:end, 2:2:end) = img_aberration(1:2:end, 2:2:end, 2); 9 | img_mosaiced(2:2:end, 2:2:end) = img_aberration(2:2:end, 2:2:end, 1); 10 | elseif strcmp(bayer_pattern, 'GBRG') 11 | img_mosaiced(1:2:end, 1:2:end) = img_aberration(1:2:end, 1:2:end, 2); 12 | img_mosaiced(2:2:end, 1:2:end) = img_aberration(2:2:end, 1:2:end, 1); 13 | img_mosaiced(1:2:end, 2:2:end) = img_aberration(1:2:end, 2:2:end, 3); 14 | img_mosaiced(2:2:end, 2:2:end) = img_aberration(2:2:end, 2:2:end, 2); 15 | elseif strcmp(bayer_pattern, 'GRBG') 16 | img_mosaiced(1:2:end, 1:2:end) = img_aberration(1:2:end, 1:2:end, 2); 17 | img_mosaiced(2:2:end, 1:2:end) = img_aberration(2:2:end, 1:2:end, 3); 18 | img_mosaiced(1:2:end, 2:2:end) = img_aberration(1:2:end, 2:2:end, 1); 19 | img_mosaiced(2:2:end, 2:2:end) = img_aberration(2:2:end, 2:2:end, 2); 20 | elseif strcmp(bayer_pattern, 'RGGB') 21 | img_mosaiced(1:2:end, 1:2:end) = img_aberration(1:2:end, 1:2:end, 1); 22 | img_mosaiced(2:2:end, 1:2:end) = img_aberration(2:2:end, 1:2:end, 2); 23 | img_mosaiced(1:2:end, 2:2:end) = img_aberration(1:2:end, 2:2:end, 2); 24 | img_mosaiced(2:2:end, 2:2:end) = img_aberration(2:2:end, 2:2:end, 3); 25 | else 26 | error('Unknown Bayer Pattern of %s!', bayer_pattern); 27 | end 28 | end 29 | 30 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/modulated_deform_conv_cpu.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor 5 | modulated_deform_conv_cpu_forward(const at::Tensor &input, 6 | const at::Tensor &weight, 7 | const at::Tensor &bias, 8 | const at::Tensor &offset, 9 | const at::Tensor &mask, 10 | const int kernel_h, 11 | const int kernel_w, 12 | const int stride_h, 13 | const int stride_w, 14 | const int pad_h, 15 | const int pad_w, 16 | const int dilation_h, 17 | const int dilation_w, 18 | const int group, 19 | const int deformable_group, 20 | const int im2col_step); 21 | 22 | std::vector 23 | modulated_deform_conv_cpu_backward(const at::Tensor &input, 24 | const at::Tensor &weight, 25 | const at::Tensor &bias, 26 | const at::Tensor &offset, 27 | const at::Tensor &mask, 28 | const at::Tensor &grad_output, 29 | const int kernel_h, 30 | const int kernel_w, 31 | const int stride_h, 32 | const int stride_w, 33 | const int pad_h, 34 | const int pad_w, 35 | const int dilation_h, 36 | const int dilation_w, 37 | const int group, 38 | const int deformable_group, 39 | const int im2col_step); 40 | 41 | 42 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cuda/modulated_deform_conv_cuda.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor 5 | modulated_deform_conv_cuda_forward(const at::Tensor &input, 6 | const at::Tensor &weight, 7 | const at::Tensor &bias, 8 | const at::Tensor &offset, 9 | const at::Tensor &mask, 10 | const int kernel_h, 11 | const int kernel_w, 12 | const int stride_h, 13 | const int stride_w, 14 | const int pad_h, 15 | const int pad_w, 16 | const int dilation_h, 17 | const int dilation_w, 18 | const int group, 19 | const int deformable_group, 20 | const int im2col_step); 21 | 22 | std::vector 23 | modulated_deform_conv_cuda_backward(const at::Tensor &input, 24 | const at::Tensor &weight, 25 | const at::Tensor &bias, 26 | const at::Tensor &offset, 27 | const at::Tensor &mask, 28 | const at::Tensor &grad_output, 29 | const int kernel_h, 30 | const int kernel_w, 31 | const int stride_h, 32 | const int stride_w, 33 | const int pad_h, 34 | const int pad_w, 35 | const int dilation_h, 36 | const int dilation_w, 37 | const int group, 38 | const int deformable_group, 39 | const int im2col_step); 40 | 41 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/deform_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | 7 | at::Tensor 8 | deform_conv_cpu_forward(const at::Tensor &input, 9 | const at::Tensor &weight, 10 | const at::Tensor &bias, 11 | const at::Tensor &offset, 12 | const int kernel_h, 13 | const int kernel_w, 14 | const int stride_h, 15 | const int stride_w, 16 | const int pad_h, 17 | const int pad_w, 18 | const int dilation_h, 19 | const int dilation_w, 20 | const int group, 21 | const int deformable_group, 22 | const int im2col_step) 23 | { 24 | AT_ERROR("Not implement on cpu"); 25 | } 26 | 27 | std::vector 28 | deform_conv_cpu_backward(const at::Tensor &input, 29 | const at::Tensor &weight, 30 | const at::Tensor &bias, 31 | const at::Tensor &offset, 32 | const at::Tensor &grad_output, 33 | const int kernel_h, 34 | const int kernel_w, 35 | const int stride_h, 36 | const int stride_w, 37 | const int pad_h, 38 | const int pad_w, 39 | const int dilation_h, 40 | const int dilation_w, 41 | const int group, 42 | const int deformable_group, 43 | const int im2col_step) 44 | { 45 | AT_ERROR("Not implement on cpu"); 46 | } 47 | 48 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/deform_psroi_pooling_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | 7 | std::tuple 8 | deform_psroi_pooling_cpu_forward(const at::Tensor &input, 9 | const at::Tensor &bbox, 10 | const at::Tensor &trans, 11 | const int no_trans, 12 | const float spatial_scale, 13 | const int output_dim, 14 | const int group_size, 15 | const int pooled_size, 16 | const int part_size, 17 | const int sample_per_part, 18 | const float trans_std) 19 | { 20 | AT_ERROR("Not implement on cpu"); 21 | } 22 | 23 | std::tuple 24 | deform_psroi_pooling_cpu_backward(const at::Tensor &out_grad, 25 | const at::Tensor &input, 26 | const at::Tensor &bbox, 27 | const at::Tensor &trans, 28 | const at::Tensor &top_count, 29 | const int no_trans, 30 | const float spatial_scale, 31 | const int output_dim, 32 | const int group_size, 33 | const int pooled_size, 34 | const int part_size, 35 | const int sample_per_part, 36 | const float trans_std) 37 | { 38 | AT_ERROR("Not implement on cpu"); 39 | } -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/cpu/modulated_deform_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | 7 | at::Tensor 8 | modulated_deform_conv_cpu_forward(const at::Tensor &input, 9 | const at::Tensor &weight, 10 | const at::Tensor &bias, 11 | const at::Tensor &offset, 12 | const at::Tensor &mask, 13 | const int kernel_h, 14 | const int kernel_w, 15 | const int stride_h, 16 | const int stride_w, 17 | const int pad_h, 18 | const int pad_w, 19 | const int dilation_h, 20 | const int dilation_w, 21 | const int group, 22 | const int deformable_group, 23 | const int im2col_step) 24 | { 25 | AT_ERROR("Not implement on cpu"); 26 | } 27 | 28 | std::vector 29 | modulated_deform_conv_cpu_backward(const at::Tensor &input, 30 | const at::Tensor &weight, 31 | const at::Tensor &bias, 32 | const at::Tensor &offset, 33 | const at::Tensor &mask, 34 | const at::Tensor &grad_output, 35 | const int kernel_h, 36 | const int kernel_w, 37 | const int stride_h, 38 | const int stride_w, 39 | const int pad_h, 40 | const int pad_w, 41 | const int dilation_h, 42 | const int dilation_w, 43 | const int group, 44 | const int deformable_group, 45 | const int im2col_step) 46 | { 47 | AT_ERROR("Not implement on cpu"); 48 | } 49 | 50 | -------------------------------------------------------------------------------- /fov_deformable_net/readme.md: -------------------------------------------------------------------------------- 1 | 2 | ## This is the official Pytorch implementation of FoV deformable network. 3 | 4 | ### Prerequisite 5 | 6 | * Python 3.7 7 | * Matlab 8 | * Other python packages are downloaded as follows: 9 | ```python 10 | pip install -r requirements.txt 11 | ``` 12 | *None*: Please make sure your machine has a GPU, and its driver version is comply with the CUDA version! This will reduce the problems when installing the DCNv2 module later. 13 | 14 | * #### The Deformable ConvNets V2 (DCNv2) module in our code adopts [Xujiarui's Implementation](https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0). We recommand you recompile the code according to your machine and python environment as follows: 15 | 16 | ```python 17 | cd ~/dcn 18 | python setup.py develop 19 | ``` 20 | 21 | This may cause many issues, please open Issues and ask me if you have any problems! 22 | 23 | ### 1. prepare the dataset of your camera by: 24 | 25 | ```python 26 | python dataset_generator.py 27 | ``` 28 | 29 | Note that the path information in this file needs update to the path of your computer: 30 | 31 | ```python 32 | date_ind = "2022xxxx" # date information for h5py file 33 | dataset_type = "valid" # type of dataset "train" or "valid" 34 | camera_idx = "camera0x" # index of camera "camera01" to "camera05" 35 | base_path = "./synthetic_datasets" # system path 36 | input_dir = "input_rgb_2022xxxx" # input data dir 37 | label_dir = "label_rgb" # label data dir 38 | if_mask = False # whether add mask 39 | ``` 40 | 41 | ### 2. Check the option file information 42 | 43 | * #### Checking the data path and other hyper-parameters for training 44 | 45 | Note: The training information and the test information are in the same option.py file! 46 | 47 | ### 3. Training the FoV deformable network 48 | 49 | ```python 50 | python train.py 51 | ``` 52 | 53 | ### 4. Test on the actual photographs of your camera 54 | 55 | ```python 56 | python test_real.py 57 | ``` 58 | 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Optical aberrations Correction in Postprocessing using Imaging Simulation (TOG 2021, PostRec. SIGGRAPH 2022) 2 | ## The code releasing is finally approved by our funding agency. Thanks for your waiting! 3 | 4 | by Shiqi Chen, Huajun Feng, Dexin Pan, Zhihai Xu, Qi Li, and Yueting Chen 5 | 6 | This is the official Pytorch implementation of "**Optical aberrations Correction in Postprocessing using Imaging Simulation**" [[Paper]](https://dl.acm.org/doi/abs/10.1145/3474088) 7 | 8 | 🚩 **Updating(New Features/Updates)** 9 | - ✅ Oct. 19, 2023. Add the illustration of psf calculation and the script of analysis module in ray tracing. 10 | 11 | ## First let me introduce you the how to calculate the psf of a given optical systems 12 | 13 | ### State the lens and start the analysis 14 | ```python 15 | import torch 16 | import difftrace as dg 17 | # load the lens 18 | device = torch.device('cpu') 19 | dtype = torch.float64 20 | lens = dg.System('lens_file/doubleGauss.json', torch.float64, torch.device('cpu')) 21 | # define analysis 22 | views = torch.tensor([0., 10., 14.], dtype=dtype, device=device) 23 | wavelengths = torch.tensor([dg.lambda_F, dg.lambda_d, dg.lambda_C], dtype=dtype, device=device) 24 | ana = dg.Analysis(lens, views, wavelengths, dtype=dtype, device=device) 25 | ``` 26 | ### Calculate the psfs 27 | ```python 28 | import matplotlib.pyplot as plt 29 | pupil_sampling = 201 30 | image_sampling = 101 31 | image_delta = 0.0005 32 | sample_distribution = 'hexapolar' 33 | psf_kirchoff = ana.psf_kirchoff(pupil_sampling=pupil_sampling, 34 | image_sampling=image_sampling, 35 | image_delta=image_delta) 36 | plt.imshow(psf_kirchoff, cmap='jet') 37 | ``` 38 | 39 | *here we optimize the precalculation of the entrace pupil and the rays sampling, so be free to directly use this method* 40 | 41 | ### We also provide many other analysis such as `spot diagram`, `mtf`, `wavefront map`, ... Please check the ./PSF_generation/ray_tracing/analysis.ipynb for more information 42 | 43 | ### This repo is still in updating. -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/optimize.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# import the packages\n", 10 | "import numpy as np\n", 11 | "import torch\n", 12 | "import difftrace as dg\n", 13 | "# load the lens\n", 14 | "device = torch.device('cpu')\n", 15 | "dtype = torch.float64\n", 16 | "wavelengths = torch.Tensor([dg.lambda_F, dg.lambda_d, dg.lambda_C]).to(device)\n", 17 | "views = torch.tensor(np.array([0., 10., 14.]), dtype=dtype, device=device)\n", 18 | "lens = dg.System('lens_file/doubleGauss.json', dtype=dtype, device=device)\n", 19 | "ana = dg.Analysis(lens, views=views, wavelengths=wavelengths, dtype=dtype, device=device)\n", 20 | "merit_dict = {\"spot_diagram\": {\n", 21 | " \"weight\": 1.0,\n", 22 | " \"target\": [0.0, 0.0, 0.0], # the target must align with the output shape of the merit function\n", 23 | " \"views\": views,\n", 24 | " \"wavelengths\": wavelengths,\n", 25 | " \"M\": 6,\n", 26 | " \"R\": None,\n", 27 | " \"sampling\": \"grid\",\n", 28 | " \"entrance_pupil\": True,\n", 29 | " \"show\": False\n", 30 | " }}" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# this optimization part is under construction, will be updated later\n", 40 | "opt = dg.Optimize(lens, views, wavelengths, merit_dict, False, dtype, device)\n", 41 | "opt.optimize_wz_trf()" 42 | ] 43 | } 44 | ], 45 | "metadata": { 46 | "kernelspec": { 47 | "display_name": "pytorch201", 48 | "language": "python", 49 | "name": "python3" 50 | }, 51 | "language_info": { 52 | "codemirror_mode": { 53 | "name": "ipython", 54 | "version": 3 55 | }, 56 | "file_extension": ".py", 57 | "mimetype": "text/x-python", 58 | "name": "python", 59 | "nbconvert_exporter": "python", 60 | "pygments_lexer": "ipython3", 61 | "version": "3.9.6" 62 | } 63 | }, 64 | "nbformat": 4, 65 | "nbformat_minor": 2 66 | } 67 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import glob 5 | 6 | import torch 7 | 8 | from torch.utils.cpp_extension import CUDA_HOME 9 | from torch.utils.cpp_extension import CppExtension 10 | from torch.utils.cpp_extension import CUDAExtension 11 | 12 | from setuptools import find_packages 13 | from setuptools import setup 14 | 15 | requirements = ["torch", "torchvision"] 16 | 17 | def get_extensions(): 18 | this_dir = os.path.dirname(os.path.abspath(__file__)) 19 | extensions_dir = os.path.join(this_dir, "src") 20 | 21 | main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) 22 | source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) 23 | source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) 24 | 25 | sources = main_file + source_cpu 26 | extension = CppExtension 27 | extra_compile_args = {"cxx": []} 28 | define_macros = [] 29 | 30 | if torch.cuda.is_available() and CUDA_HOME is not None: 31 | extension = CUDAExtension 32 | sources += source_cuda 33 | define_macros += [("WITH_CUDA", None)] 34 | extra_compile_args["nvcc"] = [ 35 | "-DCUDA_HAS_FP16=1", 36 | "-D__CUDA_NO_HALF_OPERATORS__", 37 | "-D__CUDA_NO_HALF_CONVERSIONS__", 38 | "-D__CUDA_NO_HALF2_OPERATORS__", 39 | ] 40 | else: 41 | raise NotImplementedError('Cuda is not availabel') 42 | 43 | sources = [os.path.join(extensions_dir, s) for s in sources] 44 | include_dirs = [extensions_dir] 45 | ext_modules = [ 46 | extension( 47 | "DCN", 48 | sources, 49 | include_dirs=include_dirs, 50 | define_macros=define_macros, 51 | extra_compile_args=extra_compile_args, 52 | ) 53 | ] 54 | return ext_modules 55 | 56 | setup( 57 | name="DCN", 58 | version="1.0", 59 | author="xvjiarui", 60 | url="https://github.com/charlesshang/DCNv2", 61 | description="deformable convolutional networks", 62 | packages=find_packages(exclude=("configs", "tests",)), 63 | # install_requires=requirements, 64 | ext_modules=get_extensions(), 65 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, 66 | ) -------------------------------------------------------------------------------- /fov_deformable_net/dataloader.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import numpy as np 3 | import torch 4 | import random 5 | import h5py 6 | import time 7 | import torch.utils.data as data 8 | 9 | class Dataset_from_h5(data.Dataset): 10 | 11 | def __init__(self, src_path, recrop_patch_size=128, sigma=5, train=True): 12 | self.path = src_path 13 | self.recrop_size = recrop_patch_size 14 | self.sigma = sigma 15 | self.if_train = train 16 | h5f = h5py.File(self.path, 'r') 17 | self.keys = list(h5f.keys()) 18 | if self.if_train: 19 | random.shuffle(self.keys) 20 | h5f.close() 21 | 22 | def __getitem__(self, index): 23 | h5f = h5py.File(self.path, 'r') 24 | key = self.keys[index] 25 | data = np.array(h5f[key]).reshape(h5f[key].shape) 26 | h5f.close() 27 | 28 | if self.if_train: 29 | (H, W, C) = data.shape 30 | rnd_h = random.randint(0, max(0, H - self.recrop_size)) 31 | rnd_w = random.randint(0, max(0, W - self.recrop_size)) 32 | patch = data[rnd_h:rnd_h + self.recrop_size, rnd_w:rnd_w + self.recrop_size, :] 33 | 34 | p = 0.5 35 | if random.random() > p: #RandomRot90 36 | patch = patch.transpose(1, 0, 2) 37 | if random.random() > p: #RandomHorizontalFlip 38 | patch = patch[:, ::-1, :] 39 | if random.random() > p: #RandomVerticalFlip 40 | patch = patch[::-1, :, :] 41 | else: 42 | patch = data 43 | 44 | input = patch[:, :, 0:3] 45 | if self.sigma: 46 | noise = np.random.normal(loc=0, scale=self.sigma/255.0, size=input.shape) 47 | input = input + noise 48 | input = np.clip(input, 0.0, 1.0) 49 | 50 | label = patch[:, :, 3:6] 51 | fld_info = patch[:, :, 6:8] 52 | 53 | input_wz_fld = np.concatenate([input, fld_info], 2) 54 | 55 | input = torch.from_numpy(np.ascontiguousarray(np.transpose(input_wz_fld, (2, 0, 1)))).float() 56 | label = torch.from_numpy(np.ascontiguousarray(np.transpose(label, (2, 0, 1)))).float() 57 | 58 | return input, label 59 | 60 | def __len__(self): 61 | return len(self.keys) 62 | -------------------------------------------------------------------------------- /PSF_generation/pad_PSF.m: -------------------------------------------------------------------------------- 1 | function [wav_PSF_pad, ori_sample_pos] = pad_PSF(wav_PSF, ... 2 | PSF_center_delta_pixel, ... 3 | data_cell_length) 4 | [PSF_h, PSF_w] = size(wav_PSF); 5 | if (PSF_center_delta_pixel(2) < 0)&&(PSF_center_delta_pixel(1) <= 0) 6 | % pad right bottom 7 | pad_h = abs(floor(PSF_center_delta_pixel(1))) * data_cell_length; 8 | pad_w = abs(floor(PSF_center_delta_pixel(2))) * data_cell_length; 9 | wav_PSF_pad = zeros(PSF_h + pad_h, PSF_w + pad_w); 10 | wav_PSF_pad(1:PSF_h, 1:PSF_w) = wav_PSF; 11 | ori_sample_pos = round([-PSF_center_delta_pixel(1), ... 12 | -PSF_center_delta_pixel(2)] * data_cell_length); 13 | elseif (PSF_center_delta_pixel(2) >= 0)&&(PSF_center_delta_pixel(1) < 0) 14 | % pad left bottom 15 | pad_h = abs(floor(PSF_center_delta_pixel(1))) * data_cell_length; 16 | pad_w = abs(ceil(PSF_center_delta_pixel(2))) * data_cell_length; 17 | wav_PSF_pad = zeros(PSF_h + pad_h, PSF_w + pad_w); 18 | wav_PSF_pad(1:PSF_h, pad_w + 1:pad_w + PSF_w) = wav_PSF; 19 | ori_sample_pos = round([-PSF_center_delta_pixel(1)*data_cell_length, ... 20 | pad_w - PSF_center_delta_pixel(2)*data_cell_length]); 21 | elseif (PSF_center_delta_pixel(2) > 0)&&(PSF_center_delta_pixel(1) >= 0) 22 | % pad left top 23 | pad_h = abs(ceil(PSF_center_delta_pixel(1))) * data_cell_length; 24 | pad_w = abs(ceil(PSF_center_delta_pixel(2))) * data_cell_length; 25 | wav_PSF_pad = zeros(PSF_h + pad_h, PSF_w + pad_w); 26 | wav_PSF_pad(pad_h + 1:PSF_h + pad_h, pad_w + 1:pad_w + PSF_w) = wav_PSF; 27 | ori_sample_pos = round([pad_h - PSF_center_delta_pixel(1)*data_cell_length, ... 28 | pad_w - PSF_center_delta_pixel(2)*data_cell_length]); 29 | elseif (PSF_center_delta_pixel(2) <= 0)&&(PSF_center_delta_pixel(1) >= 0) 30 | % pad right top 31 | pad_h = abs(ceil(PSF_center_delta_pixel(1))) * data_cell_length; 32 | pad_w = abs(floor(PSF_center_delta_pixel(2))) * data_cell_length; 33 | wav_PSF_pad = zeros(PSF_h + pad_h, PSF_w + pad_w); 34 | wav_PSF_pad(pad_h + 1:PSF_h + pad_h, 1:PSF_w) = wav_PSF; 35 | ori_sample_pos = round([pad_h - PSF_center_delta_pixel(1)*data_cell_length, ... 36 | -PSF_center_delta_pixel(2)*data_cell_length]); 37 | end 38 | end -------------------------------------------------------------------------------- /imaging_simulation/patch_conv.m: -------------------------------------------------------------------------------- 1 | function conved_img = patch_conv(img, PSF_cell_path, PSF_h_num, ... 2 | PSF_w_num, patch_length, PSF_uniform_size) 3 | % initializing 4 | conved_img = zeros(size(img)); 5 | % pad the input image with the PSF_uniform_size 6 | % which is the largest size of PSF 7 | img_pad = padarray(img, [PSF_uniform_size, PSF_uniform_size], 'replicate', 'both'); 8 | for PSF_index_h = 1:PSF_h_num 9 | for PSF_index_w = 1:PSF_w_num 10 | pad_img_h_range = (PSF_index_h-1)*patch_length+1 : ... 11 | (PSF_index_h)*patch_length+2*PSF_uniform_size; 12 | pad_img_w_range = (PSF_index_w-1)*patch_length+1 : ... 13 | (PSF_index_w)*patch_length+2*PSF_uniform_size; 14 | img_pad_patch = img_pad(pad_img_h_range, pad_img_w_range, :); 15 | % acquire the corresponding PSF 16 | PSF_path = strcat(PSF_cell_path, 'PSF_cell_', ... 17 | num2str(PSF_index_h, '%03d'), '_', ... 18 | num2str(PSF_index_w, '%03d'), '.mat'); 19 | PSF = load(PSF_path); 20 | PSF = PSF.PSF_info; 21 | % convolution 22 | conved_img_pad_patch_r = imfilter(img_pad_patch(:, :, 1), PSF(:, :, 1),... 23 | 'conv', 'same', 'replicate'); 24 | conved_img_pad_patch_g = imfilter(img_pad_patch(:, :, 2), PSF(:, :, 2),... 25 | 'conv', 'same', 'replicate'); 26 | conved_img_pad_patch_b = imfilter(img_pad_patch(:, :, 3), PSF(:, :, 3),... 27 | 'conv', 'same', 'replicate'); 28 | conved_img_pad_patch = cat(3, conved_img_pad_patch_r, ... 29 | conved_img_pad_patch_g, ... 30 | conved_img_pad_patch_b); 31 | % patch back to the conved_img 32 | img_h_range = (PSF_index_h-1)*patch_length+1 : ... 33 | (PSF_index_h)*patch_length; 34 | img_w_range = (PSF_index_w-1)*patch_length+1 : ... 35 | (PSF_index_w)*patch_length; 36 | conved_img(img_h_range, img_w_range, :) = ... 37 | conved_img_pad_patch(PSF_uniform_size+1 : PSF_uniform_size+patch_length, ... 38 | PSF_uniform_size+1 : PSF_uniform_size+patch_length, :); 39 | end 40 | end 41 | end 42 | 43 | -------------------------------------------------------------------------------- /PSF_generation/judge_main_wav.m: -------------------------------------------------------------------------------- 1 | function [main_center_h, PSF_fld_tmp, PSF_wav_tmp] = judge_main_wav(fld_sample_pre, fld_index_int) 2 | % define the wavelength of main light 3 | % notice!! this data is different between optical designs!! 4 | % please re-calibrate the result for a new optical design!! 5 | if (0.00 <= fld_index_int)&&(fld_index_int < 0.03) 6 | % wavelength of main light is 730nm 7 | fld_sample_mat = strcat(fld_sample_pre, '\wav_073.mat'); 8 | elseif (0.04 <= fld_index_int)&&(fld_index_int < 1.23) 9 | % wavelength of main light is 400nm 10 | fld_sample_mat = strcat(fld_sample_pre, '\wav_040.mat'); 11 | elseif (1.24 <= fld_index_int)&&(fld_index_int < 2.51) 12 | % wavelength of main light is 730nm 13 | fld_sample_mat = strcat(fld_sample_pre, '\wav_073.mat'); 14 | elseif (2.52 <= fld_index_int)&&(fld_index_int < 2.71) 15 | % wavelength of main light is 400nm 16 | fld_sample_mat = strcat(fld_sample_pre, '\wav_040.mat'); 17 | elseif (2.72 <= fld_index_int)&&(fld_index_int < 3.71) 18 | % wavelength of main light is 730nm 19 | fld_sample_mat = strcat(fld_sample_pre, '\wav_073.mat'); 20 | elseif (3.71 <= fld_index_int)&&(fld_index_int < 3.73) 21 | % wavelength of main light is 460nm 22 | fld_sample_mat = strcat(fld_sample_pre, '\wav_046.mat'); 23 | elseif (3.73 <= fld_index_int)&&(fld_index_int < 3.77) 24 | % wavelength of main light is 450nm 25 | fld_sample_mat = strcat(fld_sample_pre, '\wav_045.mat'); 26 | elseif (3.77 <= fld_index_int)&&(fld_index_int < 3.80) 27 | % wavelength of main light is 440nm 28 | fld_sample_mat = strcat(fld_sample_pre, '\wav_044.mat'); 29 | elseif (3.80 <= fld_index_int)&&(fld_index_int < 3.82) 30 | % wavelength of main light is 430nm 31 | fld_sample_mat = strcat(fld_sample_pre, '\wav_043.mat'); 32 | elseif (3.82 <= fld_index_int)&&(fld_index_int <= 4.00) 33 | % wavelength of main light is 400nm 34 | fld_sample_mat = strcat(fld_sample_pre, '\wav_040.mat'); 35 | else 36 | error('Invalid field value!'); 37 | end 38 | % read out the simulation information 39 | wav_txt = load(fld_sample_mat); 40 | wav_txt = wav_txt.wav_txt; 41 | PSF_wav = wav_txt{9}; 42 | PSF_center = wav_txt{16}; 43 | PSF_wav_tmp = split(PSF_wav, ' '); 44 | PSF_center_tmp = split(PSF_center, ' '); 45 | % the position, field, wavelength of main light 46 | main_center_h = str2double(PSF_center_tmp{8}); 47 | PSF_fld_tmp = str2double(PSF_wav_tmp{5}); 48 | PSF_wav_tmp = str2double(PSF_wav_tmp{1}); 49 | end -------------------------------------------------------------------------------- /fov_deformable_net/dcn/functions/deform_conv_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class DeformConvFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, offset, weight, bias, 18 | stride, padding, dilation, group, deformable_groups, im2col_step): 19 | ctx.stride = _pair(stride) 20 | ctx.padding = _pair(padding) 21 | ctx.dilation = _pair(dilation) 22 | ctx.kernel_size = _pair(weight.shape[2:4]) 23 | ctx.group = group 24 | ctx.deformable_groups = deformable_groups 25 | ctx.im2col_step = im2col_step 26 | output = DCN.deform_conv_forward(input, weight, bias, 27 | offset, 28 | ctx.kernel_size[0], ctx.kernel_size[1], 29 | ctx.stride[0], ctx.stride[1], 30 | ctx.padding[0], ctx.padding[1], 31 | ctx.dilation[0], ctx.dilation[1], 32 | ctx.group, 33 | ctx.deformable_groups, 34 | ctx.im2col_step) 35 | ctx.save_for_backward(input, offset, weight, bias) 36 | return output 37 | 38 | @staticmethod 39 | @once_differentiable 40 | def backward(ctx, grad_output): 41 | input, offset, weight, bias = ctx.saved_tensors 42 | grad_input, grad_offset, grad_weight, grad_bias = \ 43 | DCN.deform_conv_backward(input, weight, 44 | bias, 45 | offset, 46 | grad_output, 47 | ctx.kernel_size[0], ctx.kernel_size[1], 48 | ctx.stride[0], ctx.stride[1], 49 | ctx.padding[0], ctx.padding[1], 50 | ctx.dilation[0], ctx.dilation[1], 51 | ctx.group, 52 | ctx.deformable_groups, 53 | ctx.im2col_step) 54 | 55 | return grad_input, grad_offset, grad_weight, grad_bias,\ 56 | None, None, None, None, None, None 57 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/functions/deform_conv_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class DeformConvFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, offset, weight, bias, 18 | stride, padding, dilation, group, deformable_groups, im2col_step): 19 | ctx.stride = _pair(stride) 20 | ctx.padding = _pair(padding) 21 | ctx.dilation = _pair(dilation) 22 | ctx.kernel_size = _pair(weight.shape[2:4]) 23 | ctx.group = group 24 | ctx.deformable_groups = deformable_groups 25 | ctx.im2col_step = im2col_step 26 | output = DCN.deform_conv_forward(input, weight, bias, 27 | offset, 28 | ctx.kernel_size[0], ctx.kernel_size[1], 29 | ctx.stride[0], ctx.stride[1], 30 | ctx.padding[0], ctx.padding[1], 31 | ctx.dilation[0], ctx.dilation[1], 32 | ctx.group, 33 | ctx.deformable_groups, 34 | ctx.im2col_step) 35 | ctx.save_for_backward(input, offset, weight, bias) 36 | return output 37 | 38 | @staticmethod 39 | @once_differentiable 40 | def backward(ctx, grad_output): 41 | input, offset, weight, bias = ctx.saved_tensors 42 | grad_input, grad_offset, grad_weight, grad_bias = \ 43 | DCN.deform_conv_backward(input, weight, 44 | bias, 45 | offset, 46 | grad_output, 47 | ctx.kernel_size[0], ctx.kernel_size[1], 48 | ctx.stride[0], ctx.stride[1], 49 | ctx.padding[0], ctx.padding[1], 50 | ctx.dilation[0], ctx.dilation[1], 51 | ctx.group, 52 | ctx.deformable_groups, 53 | ctx.im2col_step) 54 | 55 | return grad_input, grad_offset, grad_weight, grad_bias,\ 56 | None, None, None, None, None, None 57 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/functions/modulated_deform_conv_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class ModulatedDeformConvFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, offset, mask, weight, bias, 18 | stride, padding, dilation, groups, deformable_groups, im2col_step): 19 | ctx.stride = _pair(stride) 20 | ctx.padding = _pair(padding) 21 | ctx.dilation = _pair(dilation) 22 | ctx.kernel_size = _pair(weight.shape[2:4]) 23 | ctx.groups = groups 24 | ctx.deformable_groups = deformable_groups 25 | ctx.im2col_step = im2col_step 26 | output = DCN.modulated_deform_conv_forward(input, weight, bias, 27 | offset, mask, 28 | ctx.kernel_size[0], ctx.kernel_size[1], 29 | ctx.stride[0], ctx.stride[1], 30 | ctx.padding[0], ctx.padding[1], 31 | ctx.dilation[0], ctx.dilation[1], 32 | ctx.groups, 33 | ctx.deformable_groups, 34 | ctx.im2col_step) 35 | ctx.save_for_backward(input, offset, mask, weight, bias) 36 | return output 37 | 38 | @staticmethod 39 | @once_differentiable 40 | def backward(ctx, grad_output): 41 | input, offset, mask, weight, bias = ctx.saved_tensors 42 | grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \ 43 | DCN.modulated_deform_conv_backward(input, weight, 44 | bias, 45 | offset, mask, 46 | grad_output, 47 | ctx.kernel_size[0], ctx.kernel_size[1], 48 | ctx.stride[0], ctx.stride[1], 49 | ctx.padding[0], ctx.padding[1], 50 | ctx.dilation[0], ctx.dilation[1], 51 | ctx.groups, 52 | ctx.deformable_groups, 53 | ctx.im2col_step) 54 | 55 | return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\ 56 | None, None, None, None, None, None 57 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/functions/modulated_deform_conv_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class ModulatedDeformConvFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, offset, mask, weight, bias, 18 | stride, padding, dilation, groups, deformable_groups, im2col_step): 19 | ctx.stride = _pair(stride) 20 | ctx.padding = _pair(padding) 21 | ctx.dilation = _pair(dilation) 22 | ctx.kernel_size = _pair(weight.shape[2:4]) 23 | ctx.groups = groups 24 | ctx.deformable_groups = deformable_groups 25 | ctx.im2col_step = im2col_step 26 | output = DCN.modulated_deform_conv_forward(input, weight, bias, 27 | offset, mask, 28 | ctx.kernel_size[0], ctx.kernel_size[1], 29 | ctx.stride[0], ctx.stride[1], 30 | ctx.padding[0], ctx.padding[1], 31 | ctx.dilation[0], ctx.dilation[1], 32 | ctx.groups, 33 | ctx.deformable_groups, 34 | ctx.im2col_step) 35 | ctx.save_for_backward(input, offset, mask, weight, bias) 36 | return output 37 | 38 | @staticmethod 39 | @once_differentiable 40 | def backward(ctx, grad_output): 41 | input, offset, mask, weight, bias = ctx.saved_tensors 42 | grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \ 43 | DCN.modulated_deform_conv_backward(input, weight, 44 | bias, 45 | offset, mask, 46 | grad_output, 47 | ctx.kernel_size[0], ctx.kernel_size[1], 48 | ctx.stride[0], ctx.stride[1], 49 | ctx.padding[0], ctx.padding[1], 50 | ctx.dilation[0], ctx.dilation[1], 51 | ctx.groups, 52 | ctx.deformable_groups, 53 | ctx.im2col_step) 54 | 55 | return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\ 56 | None, None, None, None, None, None 57 | -------------------------------------------------------------------------------- /imaging_simulation/imaging_simulation.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % variable declaration 3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 4 | % dataset image path. the images in the object image path have the same 5 | % resolution with the camera sensor 6 | object_img_path = './dataset/object_plane_image/'; 7 | img_path_dir = dir(object_img_path); 8 | img_file_num = length(img_path_dir) - 2; 9 | image_img_path = './dataset/image_plane_image/'; 10 | % PSF cell saving path 11 | PSF_cell_path = '../PSF_generation/PSF_cell/'; 12 | % white balance information path 13 | wb_path = './white_balance_info.mat'; 14 | % color correction matrix path 15 | ccm_path = './color_correction_info.mat'; 16 | % bayer pattern of sensor 17 | bayer_pattern = 'BGGR'; 18 | % image size which equals to the sensor resolution 19 | H = 3000; W = 4000; 20 | % patch length between different PSFs 21 | patch_length = 10; 22 | % psf number in column and line 23 | PSF_h_num = H / patch_length; 24 | PSF_w_num = W / patch_length; 25 | % PSF uniform size which is the largest size of PSF 26 | PSF_uniform_size = 10; 27 | 28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 29 | % imaging simulation 30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 31 | for img_index = 1:img_file_num 32 | img_file_path = strcat(object_img_path, '\', img_path_dir(img_index+2).name); 33 | img = im2double(imread(img_file_path)); 34 | % inverse gamma compression 35 | img_inv_gamma = imadjust(img, [0 1], [0 1], 2.2); 36 | % inverse color correction matrix 37 | img_inv_ccm = apply_ccm(img_inv_gamma, ccm, 1); 38 | % randomly choose a color temperature 39 | color_temperature_index = round(12 * rand); 40 | wb = wb(color_temperature_index, :); 41 | % inverse white balance 42 | img_inv_wb = apply_wb(img_inv_ccm, wb, 1); 43 | % optical PSF convolution 44 | img_aberration = patch_conv(img_inv_wb, PSF_cell_path, PSF_h_num, ... 45 | PSF_w_num, patch_length, PSF_uniform_size); 46 | % mosaicing 47 | img_mosaiced = mosaicing(img_aberration, bayer_pattern); 48 | % demosaicing 49 | img_demosaiced = demosaic(img_mosaiced, bayer_pattern); 50 | % white balance 51 | img_wz_wb = apply_wb(img_demosaiced, wb, 0); 52 | % color correction matrix 53 | img_wz_ccm = apply_ccm(img_wz_wb, ccm, 0); 54 | % gamma compression 55 | img_wz_gamma = imadjust(img, [0 1], [0 1], 1/2.2); 56 | % convert to uint8 57 | img_uint8 = im2uint8(img_wz_gamma); 58 | % saving 59 | image_img_file_path = strcat(image_img_path, '\', img_path_dir(img_index+2).name); 60 | imwrite(img_uint8, image_img_file_path); 61 | end -------------------------------------------------------------------------------- /fov_deformable_net/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import time 4 | import math 5 | import torch 6 | import torchvision.transforms as transforms 7 | import numpy as np 8 | 9 | 10 | def create_dir(path): 11 | if not os.path.exists(path): 12 | os.makedirs(path) 13 | 14 | def RandomRot(img, angle=90, p=0.5): 15 | if random.random() > p: 16 | return transforms.functional.rotate(img, angle) 17 | return img 18 | 19 | def step_lr_adjust(optimizer, epoch, init_lr=1e-4, step_size=20, gamma=0.1): 20 | lr = init_lr * gamma ** (epoch // step_size) 21 | for param_group in optimizer.param_groups: 22 | param_group['lr'] = lr 23 | 24 | def cycle_lr_adjust(optimizer, epoch, base_lr=1e-5, max_lr=1e-4, step_size=10, gamma=1): 25 | cycle = np.floor(1 + epoch/(2 * step_size)) 26 | x = np.abs(epoch/step_size - 2 * cycle + 1) 27 | scale = gamma ** (epoch // (2 * step_size)) 28 | lr = base_lr + (max_lr - base_lr) * np.maximum(0, (1-x)) * scale 29 | for param_group in optimizer.param_groups: 30 | param_group['lr'] = lr 31 | 32 | def compare_psnr(img1, img2): 33 | img1 = np.float64(img1) 34 | img2 = np.float64(img2) 35 | mse = np.mean((img1 - img2) ** 2) 36 | if mse == 0: 37 | return 100 38 | 39 | PIXEL_MAX = 1.0 40 | return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) 41 | 42 | # using ImageNet values 43 | def normalize_tensor_transform(output, label): 44 | output_norm = torch.zeros_like(output) 45 | output_norm[:, 0, ...] = (output[:, 0, ...] - 0.485) / 0.229 46 | output_norm[:, 1, ...] = (output[:, 1, ...] - 0.456) / 0.224 47 | output_norm[:, 2, ...] = (output[:, 2, ...] - 0.406) / 0.225 48 | 49 | label_norm = torch.zeros_like(label) 50 | label_norm[:, 0, ...] = (label[:, 0, ...] - 0.485) / 0.229 51 | label_norm[:, 1, ...] = (label[:, 1, ...] - 0.456) / 0.224 52 | label_norm[:, 2, ...] = (label[:, 2, ...] - 0.406) / 0.225 53 | 54 | return output_norm, label_norm 55 | 56 | def process(img, ccm): 57 | # apply gamma 58 | img_out = torch.pow((img+1e-8), 0.454) 59 | # apply ccm 60 | img_out = torch.einsum('ikjl, mk -> imjl', [img_out, ccm]) 61 | return img_out 62 | 63 | def apply_cmatrix(img, ccm): 64 | if not img.shape[1] == 3: 65 | raise ValueError('Incorrect channel dimension!') 66 | 67 | img_out = torch.zeros_like(img) 68 | img_out[:, 0, :, :] = ccm[0, 0] * img[:, 0, :, :] + ccm[0, 1] * img_out[:, 1, :, :] + ccm[0, 2] * img_out[:, 2, :, :] 69 | img_out[:, 1, :, :] = ccm[1, 0] * img[:, 0, :, :] + ccm[1, 1] * img_out[:, 1, :, :] + ccm[1, 2] * img_out[:, 2, :, :] 70 | img_out[:, 2, :, :] = ccm[2, 0] * img[:, 0, :, :] + ccm[2, 1] * img_out[:, 1, :, :] + ccm[2, 2] * img_out[:, 2, :, :] 71 | return img_out -------------------------------------------------------------------------------- /fov_deformable_net/dcn/functions/deform_psroi_pooling_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class DeformRoIPoolingFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, rois, offset, 18 | spatial_scale, 19 | pooled_size, 20 | output_dim, 21 | no_trans, 22 | group_size=1, 23 | part_size=None, 24 | sample_per_part=4, 25 | trans_std=.0): 26 | ctx.spatial_scale = spatial_scale 27 | ctx.no_trans = int(no_trans) 28 | ctx.output_dim = output_dim 29 | ctx.group_size = group_size 30 | ctx.pooled_size = pooled_size 31 | ctx.part_size = pooled_size if part_size is None else part_size 32 | ctx.sample_per_part = sample_per_part 33 | ctx.trans_std = trans_std 34 | 35 | output, output_count = \ 36 | DCN.deform_psroi_pooling_forward(input, rois, offset, 37 | ctx.no_trans, ctx.spatial_scale, 38 | ctx.output_dim, ctx.group_size, 39 | ctx.pooled_size, ctx.part_size, 40 | ctx.sample_per_part, ctx.trans_std) 41 | ctx.save_for_backward(input, rois, offset, output_count) 42 | return output 43 | 44 | @staticmethod 45 | @once_differentiable 46 | def backward(ctx, grad_output): 47 | input, rois, offset, output_count = ctx.saved_tensors 48 | grad_input, grad_offset = \ 49 | DCN.deform_psroi_pooling_backward(grad_output, 50 | input, 51 | rois, 52 | offset, 53 | output_count, 54 | ctx.no_trans, 55 | ctx.spatial_scale, 56 | ctx.output_dim, 57 | ctx.group_size, 58 | ctx.pooled_size, 59 | ctx.part_size, 60 | ctx.sample_per_part, 61 | ctx.trans_std) 62 | 63 | return grad_input, None, grad_offset, \ 64 | None, None, None, None, None, None, None, None 65 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/functions/deform_psroi_pooling_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import math 7 | import torch 8 | from torch import nn 9 | from torch.autograd import Function 10 | from torch.nn.modules.utils import _pair 11 | from torch.autograd.function import once_differentiable 12 | 13 | import DCN 14 | 15 | class DeformRoIPoolingFunction(Function): 16 | @staticmethod 17 | def forward(ctx, input, rois, offset, 18 | spatial_scale, 19 | pooled_size, 20 | output_dim, 21 | no_trans, 22 | group_size=1, 23 | part_size=None, 24 | sample_per_part=4, 25 | trans_std=.0): 26 | ctx.spatial_scale = spatial_scale 27 | ctx.no_trans = int(no_trans) 28 | ctx.output_dim = output_dim 29 | ctx.group_size = group_size 30 | ctx.pooled_size = pooled_size 31 | ctx.part_size = pooled_size if part_size is None else part_size 32 | ctx.sample_per_part = sample_per_part 33 | ctx.trans_std = trans_std 34 | 35 | output, output_count = \ 36 | DCN.deform_psroi_pooling_forward(input, rois, offset, 37 | ctx.no_trans, ctx.spatial_scale, 38 | ctx.output_dim, ctx.group_size, 39 | ctx.pooled_size, ctx.part_size, 40 | ctx.sample_per_part, ctx.trans_std) 41 | ctx.save_for_backward(input, rois, offset, output_count) 42 | return output 43 | 44 | @staticmethod 45 | @once_differentiable 46 | def backward(ctx, grad_output): 47 | input, rois, offset, output_count = ctx.saved_tensors 48 | grad_input, grad_offset = \ 49 | DCN.deform_psroi_pooling_backward(grad_output, 50 | input, 51 | rois, 52 | offset, 53 | output_count, 54 | ctx.no_trans, 55 | ctx.spatial_scale, 56 | ctx.output_dim, 57 | ctx.group_size, 58 | ctx.pooled_size, 59 | ctx.part_size, 60 | ctx.sample_per_part, 61 | ctx.trans_std) 62 | 63 | return grad_input, None, grad_offset, \ 64 | None, None, None, None, None, None, None, None 65 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/README.md: -------------------------------------------------------------------------------- 1 | # Deformable-ConvNets-V2 in PyTorch 2 | 3 | This repo is an implementation of [Deformable Convolution V2](https://arxiv.org/abs/1811.11168). 4 | Ported from the original [MXNet implementation](https://github.com/msracver/Deformable-ConvNets/tree/master/DCNv2_op). 5 | 6 | Refer to [mmdetection branch](https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/mmdetection) in this repo for a complete framework. Results of DCNv2 based on mmdetection code base can be found at [model zoo](https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/MODEL_ZOO.md#deformable-conv-v2). Many thanks to [mmdetection](https://github.com/open-mmlab/mmdetection) for their strong and clean framework. 7 | 8 | ## Build 9 | 10 | ``` 11 | sh make.sh 12 | ``` 13 | 14 | See `test.py` and `test_modulated.py` for example usage. 15 | 16 | ## Notice 17 | 18 | This repo provides the deformable conv layer which can reproduce the results in the Deformable ConvNets v2 paper. The major changes are as follows: 19 | 20 | * To better handle occasions where sampling locations are outside of the image boundary. 21 | 22 | In the previous operator, if the sampling location is outside of the feature map boundary, its sampled value would be zero. Thus, the gradient with respect to learnable offset would be zero. We found such a scheme may deteriate the performance in ImageNet classification (perhaps because the feature maps are of low resolution). For object detection on COCO, both the previous and the updated operators deliver the same results. 23 | 24 | In the new operator, if the sampling location is within one pixel outside of the feature map boundary, bilinear sampling would also be applied. And gradient with respect to learnable offset can be non zero for such locations. This is implemented by padding zeros (by one row/column) outside of the boundaries of feature maps, and performing bilinear sampling on the padded feature maps. 25 | 26 | 27 | * The efficiency of processing multiple images in a mini-batch is considerably improved. 28 | 29 | Both the previous and the updated operators follow the following computation pipeline (illustrated by a 3x3 deformable convolution with input data of NxCxHxW and output data of NxC'xHxW): 30 | 31 | for i in range(N/S): 32 | step 1 (slicing): slicing the input data at the batch dimension from i*S to (i+1)*S, input (NxCxHxW) -> sliced input (SxCxHxW) 33 | step 2 (deformable im2col): sliced input (SxCxHxW)+sliced offset (Sx18xHxW) -> column (Cx9xSxHxW) 34 | step 3 (MatMul&reshape): weight matrix (C'x 9C) * column (9CxSHW) -> temp sliced output (C'xSxHxW) -> sliced output (SxC'xHxW) 35 | step 4 (Merge): merge sliced output to form the whole output data (NxC'xHxW) 36 | end 37 | 38 | In the previous operator, S is fixed as 1. In the updated operator, S can be set by the *im2col_step* parameter, whose default value is min(N, 64). The updated operator is significantly faster than the existing one when the image batch size is large. 39 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/deform_conv.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "cpu/deform_conv_cpu.h" 4 | 5 | #ifdef WITH_CUDA 6 | #include "cuda/deform_conv_cuda.h" 7 | #endif 8 | 9 | 10 | at::Tensor 11 | deform_conv_forward(const at::Tensor &input, 12 | const at::Tensor &weight, 13 | const at::Tensor &bias, 14 | const at::Tensor &offset, 15 | const int kernel_h, 16 | const int kernel_w, 17 | const int stride_h, 18 | const int stride_w, 19 | const int pad_h, 20 | const int pad_w, 21 | const int dilation_h, 22 | const int dilation_w, 23 | const int group, 24 | const int deformable_group, 25 | const int im2col_step) 26 | { 27 | if (input.type().is_cuda()) 28 | { 29 | #ifdef WITH_CUDA 30 | return deform_conv_cuda_forward(input, weight, bias, offset, 31 | kernel_h, kernel_w, 32 | stride_h, stride_w, 33 | pad_h, pad_w, 34 | dilation_h, dilation_w, 35 | group, 36 | deformable_group, 37 | im2col_step); 38 | #else 39 | AT_ERROR("Not compiled with GPU support"); 40 | #endif 41 | } 42 | AT_ERROR("Not implemented on the CPU"); 43 | } 44 | 45 | std::vector 46 | deform_conv_backward(const at::Tensor &input, 47 | const at::Tensor &weight, 48 | const at::Tensor &bias, 49 | const at::Tensor &offset, 50 | const at::Tensor &grad_output, 51 | const int kernel_h, 52 | const int kernel_w, 53 | const int stride_h, 54 | const int stride_w, 55 | const int pad_h, 56 | const int pad_w, 57 | const int dilation_h, 58 | const int dilation_w, 59 | const int group, 60 | const int deformable_group, 61 | const int im2col_step) 62 | { 63 | if (input.type().is_cuda()) 64 | { 65 | #ifdef WITH_CUDA 66 | return deform_conv_cuda_backward(input, 67 | weight, 68 | bias, 69 | offset, 70 | grad_output, 71 | kernel_h, kernel_w, 72 | stride_h, stride_w, 73 | pad_h, pad_w, 74 | dilation_h, dilation_w, 75 | group, 76 | deformable_group, 77 | im2col_step); 78 | #else 79 | AT_ERROR("Not compiled with GPU support"); 80 | #endif 81 | } 82 | AT_ERROR("Not implemented on the CPU"); 83 | } 84 | 85 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/test_analysis.py: -------------------------------------------------------------------------------- 1 | # %% 2 | # predefinition 3 | import time 4 | import numpy as np 5 | import torch 6 | import difftrace as dg 7 | # load lens 8 | device = torch.device('cpu') 9 | dtype = torch.float64 10 | lens = dg.System('lens_file/doubleGauss.json', torch.float64, torch.device('cpu')) 11 | # define analysis 12 | views = torch.tensor([0., 10., 14.], dtype=dtype, device=device) 13 | wavelengths = torch.tensor([450.00e-6, 500.00e-6, 550.00e-6, 600.00e-6, 650.00e-6], dtype=dtype, device=device) 14 | ana = dg.Analysis(lens, views, wavelengths, dtype=dtype, device=device) 15 | # %% 16 | # test plot setup 2d 17 | ana.plot_setup_2d() 18 | # %% 19 | # test plot setup 2d with the ray tracing 20 | ana.plot_setup_2d_with_trace() 21 | # %% 22 | # test spot diagram 23 | ana.spot_diagram() 24 | # %% 25 | # report single ray tracing results 26 | ana.single_ray_trace() 27 | # %% 28 | # wavefront map of the system 29 | _ = ana.wavefront_map() 30 | # %% 31 | # different psf calculation method 32 | import matplotlib.pyplot as plt 33 | pupil_sampling = 201 34 | image_sampling = 101 35 | image_delta = 0.0002 36 | sample_distribution = 'hexapolar' 37 | psf_spot = ana.psf_spot(pupil_sampling=pupil_sampling, 38 | image_sampling=image_sampling, 39 | image_delta=image_delta) 40 | plt.imshow(psf_spot, cmap='jet') 41 | # %% 42 | psf_coherent = ana.psf_coherent(pupil_sampling=pupil_sampling, 43 | image_sampling=image_sampling, 44 | image_delta=image_delta) 45 | plt.imshow(psf_coherent, cmap='jet') 46 | # %% 47 | psf_huygens = ana.psf_huygens(pupil_sampling=pupil_sampling, 48 | image_sampling=image_sampling, 49 | image_delta=image_delta) 50 | plt.imshow(psf_huygens, cmap='jet') 51 | # %% 52 | psf_kirchoff = ana.psf_kirchoff(pupil_sampling=pupil_sampling, 53 | image_sampling=image_sampling, 54 | image_delta=image_delta) 55 | plt.imshow(psf_kirchoff, cmap='jet') 56 | # %% 57 | # different mtf calculation method 58 | _, _, _ = ana.mtf(pupil_sampling, image_sampling, image_delta, method='coherent', show=True) 59 | # %% 60 | _, _, _ = ana.mtf(pupil_sampling, image_sampling, image_delta, method='kirchoff', show=True) 61 | # %% 62 | # mtf through focus 63 | MTF_T_through_focus, MTF_S_through_focus = ana.mtf_huygens_through_focus(pupil_sampling=100, 64 | image_sampling=100, 65 | image_delta=0.0002, 66 | frequency=25, 67 | delta_focus=0.01, 68 | steps=11) 69 | plt.plot(MTF_T_through_focus) 70 | plt.plot(MTF_S_through_focus) 71 | # %% 72 | # relative illumination 73 | field_density = 11 74 | ri_list = ana.relative_illumination(field_density) 75 | plt.plot(np.asarray(ri_list)) 76 | # %% 77 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/modulated_deform_conv.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "cpu/modulated_deform_conv_cpu.h" 4 | 5 | #ifdef WITH_CUDA 6 | #include "cuda/modulated_deform_conv_cuda.h" 7 | #endif 8 | 9 | 10 | at::Tensor 11 | modulated_deform_conv_forward(const at::Tensor &input, 12 | const at::Tensor &weight, 13 | const at::Tensor &bias, 14 | const at::Tensor &offset, 15 | const at::Tensor &mask, 16 | const int kernel_h, 17 | const int kernel_w, 18 | const int stride_h, 19 | const int stride_w, 20 | const int pad_h, 21 | const int pad_w, 22 | const int dilation_h, 23 | const int dilation_w, 24 | const int group, 25 | const int deformable_group, 26 | const int im2col_step) 27 | { 28 | if (input.type().is_cuda()) 29 | { 30 | #ifdef WITH_CUDA 31 | return modulated_deform_conv_cuda_forward(input, weight, bias, offset, mask, 32 | kernel_h, kernel_w, 33 | stride_h, stride_w, 34 | pad_h, pad_w, 35 | dilation_h, dilation_w, 36 | group, 37 | deformable_group, 38 | im2col_step); 39 | #else 40 | AT_ERROR("Not compiled with GPU support"); 41 | #endif 42 | } 43 | AT_ERROR("Not implemented on the CPU"); 44 | } 45 | 46 | std::vector 47 | modulated_deform_conv_backward(const at::Tensor &input, 48 | const at::Tensor &weight, 49 | const at::Tensor &bias, 50 | const at::Tensor &offset, 51 | const at::Tensor &mask, 52 | const at::Tensor &grad_output, 53 | const int kernel_h, 54 | const int kernel_w, 55 | const int stride_h, 56 | const int stride_w, 57 | const int pad_h, 58 | const int pad_w, 59 | const int dilation_h, 60 | const int dilation_w, 61 | const int group, 62 | const int deformable_group, 63 | const int im2col_step) 64 | { 65 | if (input.type().is_cuda()) 66 | { 67 | #ifdef WITH_CUDA 68 | return modulated_deform_conv_cuda_backward(input, 69 | weight, 70 | bias, 71 | offset, 72 | mask, 73 | grad_output, 74 | kernel_h, kernel_w, 75 | stride_h, stride_w, 76 | pad_h, pad_w, 77 | dilation_h, dilation_w, 78 | group, 79 | deformable_group, 80 | im2col_step); 81 | #else 82 | AT_ERROR("Not compiled with GPU support"); 83 | #endif 84 | } 85 | AT_ERROR("Not implemented on the CPU"); 86 | } 87 | 88 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/src/deform_psroi_pooling.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "cpu/deform_psroi_pooling_cpu.h" 4 | 5 | #ifdef WITH_CUDA 6 | #include "cuda/deform_psroi_pooling_cuda.h" 7 | #endif 8 | 9 | 10 | std::tuple 11 | deform_psroi_pooling_forward(const at::Tensor &input, 12 | const at::Tensor &bbox, 13 | const at::Tensor &trans, 14 | const int no_trans, 15 | const float spatial_scale, 16 | const int output_dim, 17 | const int group_size, 18 | const int pooled_size, 19 | const int part_size, 20 | const int sample_per_part, 21 | const float trans_std) 22 | { 23 | if (input.type().is_cuda()) 24 | { 25 | #ifdef WITH_CUDA 26 | return deform_psroi_pooling_cuda_forward(input, 27 | bbox, 28 | trans, 29 | no_trans, 30 | spatial_scale, 31 | output_dim, 32 | group_size, 33 | pooled_size, 34 | part_size, 35 | sample_per_part, 36 | trans_std); 37 | #else 38 | AT_ERROR("Not compiled with GPU support"); 39 | #endif 40 | } 41 | AT_ERROR("Not implemented on the CPU"); 42 | } 43 | 44 | std::tuple 45 | deform_psroi_pooling_backward(const at::Tensor &out_grad, 46 | const at::Tensor &input, 47 | const at::Tensor &bbox, 48 | const at::Tensor &trans, 49 | const at::Tensor &top_count, 50 | const int no_trans, 51 | const float spatial_scale, 52 | const int output_dim, 53 | const int group_size, 54 | const int pooled_size, 55 | const int part_size, 56 | const int sample_per_part, 57 | const float trans_std) 58 | { 59 | if (input.type().is_cuda()) 60 | { 61 | #ifdef WITH_CUDA 62 | return deform_psroi_pooling_cuda_backward(out_grad, 63 | input, 64 | bbox, 65 | trans, 66 | top_count, 67 | no_trans, 68 | spatial_scale, 69 | output_dim, 70 | group_size, 71 | pooled_size, 72 | part_size, 73 | sample_per_part, 74 | trans_std); 75 | #else 76 | AT_ERROR("Not compiled with GPU support"); 77 | #endif 78 | } 79 | AT_ERROR("Not implemented on the CPU"); 80 | } -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/deform_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn import init 10 | from torch.nn.modules.utils import _pair 11 | 12 | from functions.deform_conv_func import DeformConvFunction 13 | 14 | class DeformConv(nn.Module): 15 | 16 | def __init__(self, in_channels, out_channels, 17 | kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True): 18 | super(DeformConv, self).__init__() 19 | 20 | if in_channels % groups != 0: 21 | raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups)) 22 | if out_channels % groups != 0: 23 | raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups)) 24 | 25 | self.in_channels = in_channels 26 | self.out_channels = out_channels 27 | self.kernel_size = _pair(kernel_size) 28 | self.stride = _pair(stride) 29 | self.padding = _pair(padding) 30 | self.dilation = _pair(dilation) 31 | self.groups = groups 32 | self.deformable_groups = deformable_groups 33 | self.im2col_step = im2col_step 34 | self.use_bias = bias 35 | 36 | self.weight = nn.Parameter(torch.Tensor( 37 | out_channels, in_channels//groups, *self.kernel_size)) 38 | self.bias = nn.Parameter(torch.Tensor(out_channels)) 39 | self.reset_parameters() 40 | if not self.use_bias: 41 | self.bias.requires_grad = False 42 | 43 | def reset_parameters(self): 44 | n = self.in_channels 45 | init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 46 | if self.bias is not None: 47 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) 48 | bound = 1 / math.sqrt(fan_in) 49 | init.uniform_(self.bias, -bound, bound) 50 | 51 | def forward(self, input, offset): 52 | assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 53 | offset.shape[1] 54 | return DeformConvFunction.apply(input, offset, 55 | self.weight, 56 | self.bias, 57 | self.stride, 58 | self.padding, 59 | self.dilation, 60 | self.groups, 61 | self.deformable_groups, 62 | self.im2col_step) 63 | 64 | _DeformConv = DeformConvFunction.apply 65 | 66 | class DeformConvPack(DeformConv): 67 | 68 | def __init__(self, in_channels, out_channels, 69 | kernel_size, stride, padding, 70 | dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1): 71 | super(DeformConvPack, self).__init__(in_channels, out_channels, 72 | kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias) 73 | 74 | out_channels = self.deformable_groups * 2 * self.kernel_size[0] * self.kernel_size[1] 75 | self.conv_offset = nn.Conv2d(self.in_channels, 76 | out_channels, 77 | kernel_size=self.kernel_size, 78 | stride=self.stride, 79 | padding=self.padding, 80 | bias=True) 81 | self.conv_offset.lr_mult = lr_mult 82 | self.init_offset() 83 | 84 | def init_offset(self): 85 | self.conv_offset.weight.data.zero_() 86 | self.conv_offset.bias.data.zero_() 87 | 88 | def forward(self, input): 89 | offset = self.conv_offset(input) 90 | return DeformConvFunction.apply(input, offset, 91 | self.weight, 92 | self.bias, 93 | self.stride, 94 | self.padding, 95 | self.dilation, 96 | self.groups, 97 | self.deformable_groups, 98 | self.im2col_step) 99 | 100 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/lens_file/doubleGauss.json: -------------------------------------------------------------------------------- 1 | { 2 | "Description" : "DoubleGauss Zemax", 3 | "OBJECT": { 4 | "index" : 0, 5 | "roc" : 0.000000000000000E+000, 6 | "distance": 0.000000000000000E+000, 7 | "material": "vacuum", 8 | "radius" : 0.000000000000000E+000, 9 | "conic" : 0.000000000000000E+000, 10 | "ai-terms": null, 11 | "variable": null 12 | }, 13 | "Standard1": { 14 | "index" : 1, 15 | "roc" : 5.415324616573791E+001, 16 | "distance": 8.746657850000000E+000, 17 | "material": "SK2", 18 | "radius" : 2.933920437194000E+001, 19 | "conic" : 0.000000000000000E+000, 20 | "ai-terms": null, 21 | "variable": ["c"], 22 | "bounds" : [[0, 1.0]] 23 | }, 24 | "Standard2": { 25 | "index" : 2, 26 | "roc" : 1.525219209401110E+002, 27 | "distance": 5.000000000000000E-001, 28 | "material": "vacuum", 29 | "radius" : 2.826658482256000E+001, 30 | "conic" : 0.000000000000000E+000, 31 | "ai-terms": null, 32 | "variable": ["c", "d"], 33 | "bounds" : [[0, 1.0], [0, 1E16]] 34 | }, 35 | "Standard3": { 36 | "index" : 3, 37 | "roc" : 3.595062445048480E+001, 38 | "distance": 1.400000000000000E+001, 39 | "material": "SK16", 40 | "radius" : 2.435458095155000E+001, 41 | "conic" : 0.000000000000000E+000, 42 | "ai-terms": null, 43 | "variable": null 44 | }, 45 | "Standard4": { 46 | "index" : 4, 47 | "roc" : 0.000000000000000E+000, 48 | "distance": 3.776965890000000E+000, 49 | "material": "F5", 50 | "radius" : 2.135822601177000E+001, 51 | "conic" : 0.000000000000000E+000, 52 | "ai-terms": null, 53 | "variable": null 54 | }, 55 | "Standard5": { 56 | "index" : 5, 57 | "roc" : 2.226992461800949E+001, 58 | "distance": 1.425305930000000E+001, 59 | "material": "vacuum", 60 | "radius" : 1.493283368295000E+001, 61 | "conic" : 0.000000000000000E+000, 62 | "ai-terms": null, 63 | "variable": null 64 | }, 65 | "STOP": { 66 | "index" : 6, 67 | "roc" : 0.000000000000000E+000, 68 | "distance": 1.242812910000000E+001, 69 | "material": "vacuum", 70 | "radius" : 1.000000000000000E+001, 71 | "conic" : 0.000000000000000E+000, 72 | "ai-terms": null, 73 | "variable": null 74 | }, 75 | "Standard7": { 76 | "index" : 7, 77 | "roc" : -2.568503303046087E+001, 78 | "distance": 3.776965890000000E+000, 79 | "material": "F5", 80 | "radius" : 1.330133689386000E+001, 81 | "conic" : 0.000000000000000E+000, 82 | "ai-terms": null, 83 | "variable": null 84 | }, 85 | "Standard8": { 86 | "index" : 8, 87 | "roc" : 0.000000000000000E+000, 88 | "distance": 1.083392850000000E+001, 89 | "material": "SK16", 90 | "radius" : 1.662503180280000E+001, 91 | "conic" : 0.000000000000000E+000, 92 | "ai-terms": null, 93 | "variable": null 94 | }, 95 | "Standard9": { 96 | "index" : 9, 97 | "roc" : -3.698022072863240E+001, 98 | "distance": 5.000000000000000E-001, 99 | "material": "vacuum", 100 | "radius" : 1.906148230444000E+001, 101 | "conic" : 0.000000000000000E+000, 102 | "ai-terms": null, 103 | "variable": null 104 | }, 105 | "Standard10": { 106 | "index" : 10, 107 | "roc" : 1.964173340965081E+002, 108 | "distance": 6.858174910000000E+000, 109 | "material": "SK16", 110 | "radius" : 2.147382942899000E+001, 111 | "conic" : 0.000000000000000E+000, 112 | "ai-terms": null, 113 | "variable": null 114 | }, 115 | "Standard11": { 116 | "index" : 11, 117 | "roc" : -6.714755002373626E+001, 118 | "distance": 5.731453790500000E+001, 119 | "material": "vacuum", 120 | "radius" : 2.179718377169000E+001, 121 | "conic" : 0.000000000000000E+000, 122 | "ai-terms": null, 123 | "variable": null 124 | }, 125 | "IMAGE": { 126 | "index" : 12, 127 | "roc" : 0.000000000000000E+000, 128 | "distance": 0.000000000000000E+000, 129 | "material": "vacuum", 130 | "radius" : 3.456914634886920E+001, 131 | "conic" : 0.000000000000000E+000, 132 | "ai-terms": null, 133 | "variable": null 134 | } 135 | } -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/modules/deform_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn import init 10 | from torch.nn.modules.utils import _pair 11 | 12 | from functions.deform_conv_func import DeformConvFunction 13 | 14 | class DeformConv(nn.Module): 15 | 16 | def __init__(self, in_channels, out_channels, 17 | kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True): 18 | super(DeformConv, self).__init__() 19 | 20 | if in_channels % groups != 0: 21 | raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups)) 22 | if out_channels % groups != 0: 23 | raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups)) 24 | 25 | self.in_channels = in_channels 26 | self.out_channels = out_channels 27 | self.kernel_size = _pair(kernel_size) 28 | self.stride = _pair(stride) 29 | self.padding = _pair(padding) 30 | self.dilation = _pair(dilation) 31 | self.groups = groups 32 | self.deformable_groups = deformable_groups 33 | self.im2col_step = im2col_step 34 | self.use_bias = bias 35 | 36 | self.weight = nn.Parameter(torch.Tensor( 37 | out_channels, in_channels//groups, *self.kernel_size)) 38 | self.bias = nn.Parameter(torch.Tensor(out_channels)) 39 | self.reset_parameters() 40 | if not self.use_bias: 41 | self.bias.requires_grad = False 42 | 43 | def reset_parameters(self): 44 | n = self.in_channels 45 | init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 46 | if self.bias is not None: 47 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) 48 | bound = 1 / math.sqrt(fan_in) 49 | init.uniform_(self.bias, -bound, bound) 50 | 51 | def forward(self, input, offset): 52 | assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 53 | offset.shape[1] 54 | return DeformConvFunction.apply(input, offset, 55 | self.weight, 56 | self.bias, 57 | self.stride, 58 | self.padding, 59 | self.dilation, 60 | self.groups, 61 | self.deformable_groups, 62 | self.im2col_step) 63 | 64 | _DeformConv = DeformConvFunction.apply 65 | 66 | class DeformConvPack(DeformConv): 67 | 68 | def __init__(self, in_channels, out_channels, 69 | kernel_size, stride, padding, 70 | dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1): 71 | super(DeformConvPack, self).__init__(in_channels, out_channels, 72 | kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias) 73 | 74 | out_channels = self.deformable_groups * 2 * self.kernel_size[0] * self.kernel_size[1] 75 | self.conv_offset = nn.Conv2d(self.in_channels, 76 | out_channels, 77 | kernel_size=self.kernel_size, 78 | stride=self.stride, 79 | padding=self.padding, 80 | bias=True) 81 | self.conv_offset.lr_mult = lr_mult 82 | self.init_offset() 83 | 84 | def init_offset(self): 85 | self.conv_offset.weight.data.zero_() 86 | self.conv_offset.bias.data.zero_() 87 | 88 | def forward(self, input): 89 | offset = self.conv_offset(input) 90 | return DeformConvFunction.apply(input, offset, 91 | self.weight, 92 | self.bias, 93 | self.stride, 94 | self.padding, 95 | self.dilation, 96 | self.groups, 97 | self.deformable_groups, 98 | self.im2col_step) 99 | 100 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/modules/modulated_deform_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn import init 10 | from torch.nn.modules.utils import _pair 11 | 12 | from functions.modulated_deform_conv_func import ModulatedDeformConvFunction 13 | 14 | class ModulatedDeformConv(nn.Module): 15 | 16 | def __init__(self, in_channels, out_channels, 17 | kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True): 18 | super(ModulatedDeformConv, self).__init__() 19 | 20 | if in_channels % groups != 0: 21 | raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups)) 22 | if out_channels % groups != 0: 23 | raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups)) 24 | 25 | self.in_channels = in_channels 26 | self.out_channels = out_channels 27 | self.kernel_size = _pair(kernel_size) 28 | self.stride = _pair(stride) 29 | self.padding = _pair(padding) 30 | self.dilation = _pair(dilation) 31 | self.groups = groups 32 | self.deformable_groups = deformable_groups 33 | self.im2col_step = im2col_step 34 | self.use_bias = bias 35 | 36 | self.weight = nn.Parameter(torch.Tensor( 37 | out_channels, in_channels//groups, *self.kernel_size)) 38 | self.bias = nn.Parameter(torch.Tensor(out_channels)) 39 | self.reset_parameters() 40 | if not self.use_bias: 41 | self.bias.requires_grad = False 42 | 43 | def reset_parameters(self): 44 | n = self.in_channels 45 | init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 46 | if self.bias is not None: 47 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) 48 | bound = 1 / math.sqrt(fan_in) 49 | init.uniform_(self.bias, -bound, bound) 50 | 51 | def forward(self, input, offset, mask): 52 | assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 53 | offset.shape[1] 54 | assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 55 | mask.shape[1] 56 | return ModulatedDeformConvFunction.apply(input, offset, mask, 57 | self.weight, 58 | self.bias, 59 | self.stride, 60 | self.padding, 61 | self.dilation, 62 | self.groups, 63 | self.deformable_groups, 64 | self.im2col_step) 65 | 66 | _ModulatedDeformConv = ModulatedDeformConvFunction.apply 67 | 68 | class ModulatedDeformConvPack(ModulatedDeformConv): 69 | 70 | def __init__(self, in_channels, out_channels, 71 | kernel_size, stride, padding, 72 | dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1): 73 | super(ModulatedDeformConvPack, self).__init__(in_channels, out_channels, 74 | kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias) 75 | 76 | out_channels = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1] 77 | self.conv_offset_mask = nn.Conv2d(self.in_channels, 78 | out_channels, 79 | kernel_size=self.kernel_size, 80 | stride=self.stride, 81 | padding=self.padding, 82 | bias=True) 83 | self.conv_offset_mask.lr_mult = lr_mult 84 | self.init_offset() 85 | 86 | def init_offset(self): 87 | self.conv_offset_mask.weight.data.zero_() 88 | self.conv_offset_mask.bias.data.zero_() 89 | 90 | def forward(self, input): 91 | out = self.conv_offset_mask(input) 92 | o1, o2, mask = torch.chunk(out, 3, dim=1) 93 | offset = torch.cat((o1, o2), dim=1) 94 | mask = torch.sigmoid(mask) 95 | return ModulatedDeformConvFunction.apply(input, offset, mask, 96 | self.weight, 97 | self.bias, 98 | self.stride, 99 | self.padding, 100 | self.dilation, 101 | self.groups, 102 | self.deformable_groups, 103 | self.im2col_step) 104 | 105 | -------------------------------------------------------------------------------- /fov_deformable_net/option/option.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | 4 | parser = argparse.ArgumentParser(description='SADNET') 5 | # File paths 6 | parser.add_argument('--src_path', type=str, 7 | default="./dataset_train.h5", 8 | help='training dataset path') 9 | parser.add_argument('--val_path', type=str, 10 | default="./dataset_valid.h5", 11 | help='validating dataset path, if not, set None') 12 | parser.add_argument('--ckpt_dir', type=str, 13 | default="./ckpt_dir", 14 | help='model directory') 15 | parser.add_argument('--log_dir', type=str, 16 | default="./log_dir", 17 | help='log directory') 18 | # Hardware specifications 19 | parser.add_argument('--gpu', type=str, default="2", 20 | help='GPUs') 21 | 22 | # Training parameters 23 | parser.add_argument('--batch_size', type=int, default=32, 24 | help='training batch size') 25 | parser.add_argument('--val_batch_size', type=int, default=4, 26 | help='validing batch size') 27 | parser.add_argument('--patch_size', type=int, default=128, 28 | help='training patch size') 29 | parser.add_argument('--sigma', type=int, default=2, 30 | help='noise sigma') 31 | parser.add_argument('--n_epoch', type=int, default=30, 32 | help='the number of training epochs') 33 | parser.add_argument('--lr', type=float, default=1e-4, 34 | help='learning rate') 35 | parser.add_argument('--milestone', type=int, default=5, 36 | help='the epochs for weight decay') 37 | parser.add_argument('--val_epoch', type=int, default=1, 38 | help='do validation per every N epochs') 39 | parser.add_argument('--save_val_img', type=bool, default=True, 40 | help='save the last validated image for comparison') 41 | parser.add_argument('--val_patch_size', type=int, default=512, 42 | help='patch size in validation dataset') 43 | parser.add_argument('--save_epoch', type=int, default=1, 44 | help='save model per every N epochs') 45 | parser.add_argument('--gamma', type=float, default=0.5, 46 | help='learning rate decay factor for every milestone') 47 | parser.add_argument('--finetune', type=bool, default=False, 48 | help='if finetune model, set True') 49 | parser.add_argument('--init_epoch', type=int, default=0, 50 | help='if finetune model, set the initial epoch') 51 | parser.add_argument('--wz_process', type=bool, default=False, 52 | help='calculate loss in RGB domain') 53 | parser.add_argument('--ccm', type=np.array, 54 | default=np.array([[ 1.93994141, -0.73925781, -0.20068359], 55 | [-0.28857422, 1.59741211, -0.30883789], 56 | [-0.0078125 , -0.45654297, 1.46435547]]), 57 | help='ccm') 58 | 59 | # loss 60 | parser.add_argument('--t_loss', type=str, default='L2', 61 | help='training loss: L2, L1, L2_wz_TV, L2_wz_Perceptual, L2_wz_SSIM') 62 | parser.add_argument('--tv_weight', type=float, default=4e-8, 63 | help='tvloss weight') 64 | parser.add_argument('--style_weight', type=float, default=1, 65 | help='style weight of perceptual loss') 66 | parser.add_argument('--content_weight', type=float, default=1, 67 | help='content weight of perceptual loss') 68 | parser.add_argument('--ssim_weight', type=float, default=2e-1, 69 | help='ssim weight of ssim loss') 70 | 71 | 72 | # model 73 | parser.add_argument('--NetName', default='DFUNet', 74 | help='model name') 75 | parser.add_argument('--n_channel', type=int, default=32, 76 | help='number of convolutional channels') 77 | parser.add_argument('--offset_channel', type=int, default=32, 78 | help='number of offset channels') 79 | parser.add_argument('--fov_att', type=bool, default=False, 80 | help='whether using FOV attention block') 81 | parser.add_argument('--kernel_size', type=list, default=[5], 82 | help='kernel size to be estimated') 83 | parser.add_argument('--color', type=bool, default=True, 84 | help='color image or gray image') 85 | 86 | # test 87 | # File paths 88 | parser.add_argument('--gt_src_path', type=str, default="./test_datasets/label", 89 | help='testing clear image path, if not, set None') 90 | parser.add_argument('--blur_src_path', type=str, default="./test_datasets/input_20201228", 91 | help='testing noisy image path') 92 | parser.add_argument('--result_png_path', type=str, default="./test_result", 93 | help='result directory') 94 | parser.add_argument('--ckpt_dir_test', type=str, default="./ckpt_dir", 95 | help='model directory') 96 | parser.add_argument('--epoch_test', type=int, default=30, 97 | help='the epoch for testing') 98 | 99 | # test_real 100 | # File paths 101 | parser.add_argument('--real_blur_src_path', type=str, 102 | default="./camera04", 103 | help='testing real abberation image path, if not, set None') 104 | parser.add_argument('--real_dst_tiff_path', type=str, 105 | default="./real_test_result", 106 | help='result directory') 107 | parser.add_argument('--ckpt_dir_test_real', type=str, 108 | default="./ckpt_dir", 109 | help='model directory') 110 | parser.add_argument('--epoch_test_real', type=int, default=30, 111 | help='the epoch for testing real image') 112 | 113 | args = parser.parse_args() 114 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/deform_psroi_pooling.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn.modules.utils import _pair 10 | 11 | from functions.deform_psroi_pooling_func import DeformRoIPoolingFunction 12 | 13 | class DeformRoIPooling(nn.Module): 14 | 15 | def __init__(self, 16 | spatial_scale, 17 | pooled_size, 18 | output_dim, 19 | no_trans, 20 | group_size=1, 21 | part_size=None, 22 | sample_per_part=4, 23 | trans_std=.0): 24 | super(DeformRoIPooling, self).__init__() 25 | self.spatial_scale = spatial_scale 26 | self.pooled_size = pooled_size 27 | self.output_dim = output_dim 28 | self.no_trans = no_trans 29 | self.group_size = group_size 30 | self.part_size = pooled_size if part_size is None else part_size 31 | self.sample_per_part = sample_per_part 32 | self.trans_std = trans_std 33 | 34 | def forward(self, input, rois, offset): 35 | assert input.shape[1] == self.output_dim 36 | if self.no_trans: 37 | offset = input.new() 38 | return DeformRoIPoolingFunction.apply(input, rois, offset, 39 | self.spatial_scale, 40 | self.pooled_size, 41 | self.output_dim, 42 | self.no_trans, 43 | self.group_size, 44 | self.part_size, 45 | self.sample_per_part, 46 | self.trans_std) 47 | 48 | _DeformRoIPooling = DeformRoIPoolingFunction.apply 49 | 50 | class DeformRoIPoolingPack(DeformRoIPooling): 51 | 52 | def __init__(self, 53 | spatial_scale, 54 | pooled_size, 55 | output_dim, 56 | no_trans, 57 | group_size=1, 58 | part_size=None, 59 | sample_per_part=4, 60 | trans_std=.0, 61 | deform_fc_dim=1024): 62 | super(DeformRoIPoolingPack, self).__init__(spatial_scale, 63 | pooled_size, 64 | output_dim, 65 | no_trans, 66 | group_size, 67 | part_size, 68 | sample_per_part, 69 | trans_std) 70 | 71 | self.deform_fc_dim = deform_fc_dim 72 | 73 | if not no_trans: 74 | self.offset_mask_fc = nn.Sequential( 75 | nn.Linear(self.pooled_size * self.pooled_size * 76 | self.output_dim, self.deform_fc_dim), 77 | nn.ReLU(inplace=True), 78 | nn.Linear(self.deform_fc_dim, self.deform_fc_dim), 79 | nn.ReLU(inplace=True), 80 | nn.Linear(self.deform_fc_dim, self.pooled_size * 81 | self.pooled_size * 3) 82 | ) 83 | self.offset_mask_fc[4].weight.data.zero_() 84 | self.offset_mask_fc[4].bias.data.zero_() 85 | 86 | def forward(self, input, rois): 87 | offset = input.new() 88 | 89 | if not self.no_trans: 90 | 91 | # do roi_align first 92 | n = rois.shape[0] 93 | roi = DeformRoIPoolingFunction.apply(input, rois, offset, 94 | self.spatial_scale, 95 | self.pooled_size, 96 | self.output_dim, 97 | True, # no trans 98 | self.group_size, 99 | self.part_size, 100 | self.sample_per_part, 101 | self.trans_std) 102 | 103 | # build mask and offset 104 | offset_mask = self.offset_mask_fc(roi.view(n, -1)) 105 | offset_mask = offset_mask.view( 106 | n, 3, self.pooled_size, self.pooled_size) 107 | o1, o2, mask = torch.chunk(offset_mask, 3, dim=1) 108 | offset = torch.cat((o1, o2), dim=1) 109 | mask = torch.sigmoid(mask) 110 | 111 | # do pooling with offset and mask 112 | return DeformRoIPoolingFunction.apply(input, rois, offset, 113 | self.spatial_scale, 114 | self.pooled_size, 115 | self.output_dim, 116 | self.no_trans, 117 | self.group_size, 118 | self.part_size, 119 | self.sample_per_part, 120 | self.trans_std) * mask 121 | # only roi_align 122 | return DeformRoIPoolingFunction.apply(input, rois, offset, 123 | self.spatial_scale, 124 | self.pooled_size, 125 | self.output_dim, 126 | self.no_trans, 127 | self.group_size, 128 | self.part_size, 129 | self.sample_per_part, 130 | self.trans_std) 131 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/build/lib.linux-x86_64-3.7/modules/deform_psroi_pooling.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn.modules.utils import _pair 10 | 11 | from functions.deform_psroi_pooling_func import DeformRoIPoolingFunction 12 | 13 | class DeformRoIPooling(nn.Module): 14 | 15 | def __init__(self, 16 | spatial_scale, 17 | pooled_size, 18 | output_dim, 19 | no_trans, 20 | group_size=1, 21 | part_size=None, 22 | sample_per_part=4, 23 | trans_std=.0): 24 | super(DeformRoIPooling, self).__init__() 25 | self.spatial_scale = spatial_scale 26 | self.pooled_size = pooled_size 27 | self.output_dim = output_dim 28 | self.no_trans = no_trans 29 | self.group_size = group_size 30 | self.part_size = pooled_size if part_size is None else part_size 31 | self.sample_per_part = sample_per_part 32 | self.trans_std = trans_std 33 | 34 | def forward(self, input, rois, offset): 35 | assert input.shape[1] == self.output_dim 36 | if self.no_trans: 37 | offset = input.new() 38 | return DeformRoIPoolingFunction.apply(input, rois, offset, 39 | self.spatial_scale, 40 | self.pooled_size, 41 | self.output_dim, 42 | self.no_trans, 43 | self.group_size, 44 | self.part_size, 45 | self.sample_per_part, 46 | self.trans_std) 47 | 48 | _DeformRoIPooling = DeformRoIPoolingFunction.apply 49 | 50 | class DeformRoIPoolingPack(DeformRoIPooling): 51 | 52 | def __init__(self, 53 | spatial_scale, 54 | pooled_size, 55 | output_dim, 56 | no_trans, 57 | group_size=1, 58 | part_size=None, 59 | sample_per_part=4, 60 | trans_std=.0, 61 | deform_fc_dim=1024): 62 | super(DeformRoIPoolingPack, self).__init__(spatial_scale, 63 | pooled_size, 64 | output_dim, 65 | no_trans, 66 | group_size, 67 | part_size, 68 | sample_per_part, 69 | trans_std) 70 | 71 | self.deform_fc_dim = deform_fc_dim 72 | 73 | if not no_trans: 74 | self.offset_mask_fc = nn.Sequential( 75 | nn.Linear(self.pooled_size * self.pooled_size * 76 | self.output_dim, self.deform_fc_dim), 77 | nn.ReLU(inplace=True), 78 | nn.Linear(self.deform_fc_dim, self.deform_fc_dim), 79 | nn.ReLU(inplace=True), 80 | nn.Linear(self.deform_fc_dim, self.pooled_size * 81 | self.pooled_size * 3) 82 | ) 83 | self.offset_mask_fc[4].weight.data.zero_() 84 | self.offset_mask_fc[4].bias.data.zero_() 85 | 86 | def forward(self, input, rois): 87 | offset = input.new() 88 | 89 | if not self.no_trans: 90 | 91 | # do roi_align first 92 | n = rois.shape[0] 93 | roi = DeformRoIPoolingFunction.apply(input, rois, offset, 94 | self.spatial_scale, 95 | self.pooled_size, 96 | self.output_dim, 97 | True, # no trans 98 | self.group_size, 99 | self.part_size, 100 | self.sample_per_part, 101 | self.trans_std) 102 | 103 | # build mask and offset 104 | offset_mask = self.offset_mask_fc(roi.view(n, -1)) 105 | offset_mask = offset_mask.view( 106 | n, 3, self.pooled_size, self.pooled_size) 107 | o1, o2, mask = torch.chunk(offset_mask, 3, dim=1) 108 | offset = torch.cat((o1, o2), dim=1) 109 | mask = torch.sigmoid(mask) 110 | 111 | # do pooling with offset and mask 112 | return DeformRoIPoolingFunction.apply(input, rois, offset, 113 | self.spatial_scale, 114 | self.pooled_size, 115 | self.output_dim, 116 | self.no_trans, 117 | self.group_size, 118 | self.part_size, 119 | self.sample_per_part, 120 | self.trans_std) * mask 121 | # only roi_align 122 | return DeformRoIPoolingFunction.apply(input, rois, offset, 123 | self.spatial_scale, 124 | self.pooled_size, 125 | self.output_dim, 126 | self.no_trans, 127 | self.group_size, 128 | self.part_size, 129 | self.sample_per_part, 130 | self.trans_std) 131 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/difftrace/zernike.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | class Zernike_polynomial: 6 | """https://en.wikipedia.org/wiki/Zernike_polynomials""" 7 | def __init__(self, coefficient=None, device=torch.device('cpu')): 8 | if coefficient is not None: 9 | self.c = coefficient 10 | else: 11 | self.c = torch.zeros(15) 12 | self.device = device 13 | def get_coefficient(self): 14 | return self.c 15 | 16 | def evaluate(self, R, P, require_matrix=False): 17 | # 计算各项数值,在外面乘系数c 18 | matrix = [] 19 | # value = [] 20 | if not torch.is_tensor(R): 21 | R = torch.tensor(R) 22 | if not torch.is_tensor(P): 23 | P = torch.tensor(P) 24 | for r, p in zip(R, P): 25 | zernike_terms = [torch.tensor(1), 26 | 2 * r * torch.sin(p), 27 | 2 * r * torch.cos(p), 28 | np.sqrt(6) * torch.pow(r, 2) * torch.sin(2 * p), 29 | np.sqrt(3) * (2 * torch.pow(r, 2) - 1), 30 | np.sqrt(6) * torch.pow(r, 2) * torch.cos(2 * p), 31 | np.sqrt(8) * torch.pow(r, 3) * torch.sin(3 * p), 32 | np.sqrt(8) * (3 * torch.pow(r, 3) - 2 * r) * torch.sin(p), 33 | np.sqrt(8) * (3 * torch.pow(r, 3) - 2 * r) * torch.cos(p), 34 | np.sqrt(8) * torch.pow(r, 3) * torch.cos(3 * p), 35 | np.sqrt(10) * torch.pow(r, 4) * torch.sin(4 * p), 36 | np.sqrt(10) * (4 * torch.pow(r, 4) - 3 * torch.pow(r, 2)) * torch.sin(2 * p), 37 | np.sqrt(5) * (6 * torch.pow(r, 4) - 6 * torch.pow(r, 2) + 1), 38 | np.sqrt(10) * (4 * torch.pow(r, 4) - 3 * torch.pow(r, 2)) * torch.cos(2 * p), 39 | np.sqrt(10) * torch.pow(r, 4) * torch.cos(4 * p)] 40 | matrix.append(zernike_terms) 41 | # value.append(sum(map(lambda x,y:x*y,zernike_terms, self.c))) 42 | if require_matrix: 43 | return torch.tensor(matrix).to(self.device) 44 | else: 45 | return torch.einsum('ij,j->i', (torch.tensor(matrix).to(self.device), self.c)) 46 | 47 | def polynomial_fit(self, R, P, V): 48 | A = self.evaluate(R, P, require_matrix=True) 49 | B = V.reshape(-1) 50 | X, residuals, rank, singular_values = torch.linalg.lstsq(A, B, driver='gels') 51 | print("fitted coefficients:{}".format(X)) 52 | print("lstsq residuals:{}".format(residuals)) 53 | self.c = X 54 | 55 | def diff(self, R, P): 56 | """return dz/dx,dz/dy""" 57 | dzdx = [] 58 | dzdy = [] 59 | if not torch.is_tensor(R): 60 | R = torch.tensor(R) 61 | if not torch.is_tensor(P): 62 | P = torch.tensor(P) 63 | for r, p in zip(R, P): 64 | zernike_diff_p = [torch.tensor(0), 65 | 2 * r * torch.cos(p), 66 | 2 * r * (-1) * torch.sin(p), 67 | np.sqrt(6) * torch.pow(r, 2) * 2 * torch.cos(2 * p), 68 | torch.tensor(0), 69 | np.sqrt(6) * torch.pow(r, 2) * (-2) * torch.sin(2 * p), 70 | np.sqrt(8) * torch.pow(r, 3) * 3 * torch.cos(3 * p), 71 | np.sqrt(8) * (3 * torch.pow(r, 3) - 2 * r) * torch.cos(p), 72 | np.sqrt(8) * (3 * torch.pow(r, 3) - 2 * r) * (-1) * torch.sin(p), 73 | np.sqrt(8) * torch.pow(r, 3) * (-3) * torch.sin(3 * p), 74 | np.sqrt(10) * torch.pow(r, 4) * 4 * torch.cos(4 * p), 75 | np.sqrt(10) * (4 * torch.pow(r, 4) - 3 * torch.pow(r, 2)) * 2 * torch.cos(2 * p), 76 | torch.tensor(0), 77 | np.sqrt(10) * (4 * torch.pow(r, 4) - 3 * torch.pow(r, 2)) * (-2) * torch.sin(2 * p), 78 | np.sqrt(10) * torch.pow(r, 4) * (-4) * torch.sin(4 * p)] 79 | dzdp = torch.einsum('i,i', torch.tensor(zernike_diff_p).to(self.device), self.c) 80 | zernike_diff_r = [torch.tensor(0), 81 | 2 * torch.sin(p), 82 | 2 * torch.cos(p), 83 | np.sqrt(6) * 2 * r * torch.sin(2 * p), 84 | np.sqrt(3) * (4 * r), 85 | np.sqrt(6) * 2 * r * torch.cos(2 * p), 86 | np.sqrt(8) * 3 * torch.pow(r, 2) * torch.sin(3 * p), 87 | np.sqrt(8) * (9 * torch.pow(r, 2) - 2) * torch.sin(p), 88 | np.sqrt(8) * (9 * torch.pow(r, 2) - 2) * torch.cos(p), 89 | np.sqrt(8) * 3 * torch.pow(r, 2) * torch.cos(3 * p), 90 | np.sqrt(10) * 4 * torch.pow(r, 3) * torch.sin(4 * p), 91 | np.sqrt(10) * (16 * torch.pow(r, 3) - 6 * r) * torch.sin(2 * p), 92 | np.sqrt(5) * (24 * torch.pow(r, 3) - 12 * r), 93 | np.sqrt(10) * (16 * torch.pow(r, 3) - 6 * r) * torch.cos(2 * p), 94 | np.sqrt(10) * 4 * torch.pow(r, 3) * torch.cos(4 * p)] 95 | dzdr = torch.einsum('i,i', torch.tensor(zernike_diff_r).to(self.device), self.c) 96 | # jacobian 97 | if r == 0: 98 | # divide 0 will cause nan grad 99 | dzdx.append(torch.tensor(0)) 100 | dzdy.append(torch.tensor(0)) 101 | else: 102 | dzdx.append((r * torch.cos(p) * dzdr - torch.sin(p) * dzdp) / r) 103 | dzdy.append((r * torch.sin(p) * dzdr + torch.cos(p) * dzdp) / r) 104 | return torch.tensor(dzdx).to(self.device), torch.tensor(dzdy).to(self.device) 105 | 106 | 107 | 108 | 109 | if __name__ == "__main__": 110 | rho = torch.linspace(-1,1,20) 111 | phi = torch.linspace(-2,2,20) 112 | # value = torch.rand(20) 113 | # test_poly = Zernike_polynomial() 114 | # test_poly.polynomial_fit(rho, phi, value) 115 | # test_poly.evaluate([1,0.5],[0,1],require_value=True) 116 | # print(test_poly.evaluate([1,0.5],[0,1])) 117 | # def cartesian2polar(X, Y): 118 | # """coordinates conversion 好吧 直接生成complex然后取abs和angle就好了""" 119 | # # r = torch.tensor(list(map(lambda x, y: torch.sqrt(x**2+y**2), X, Y))) 120 | # # p = torch.tensor(list(map(lambda x, y: torch.atan2(y, x), X, Y))) 121 | # # return torch.polar(r, p) 122 | # return torch.complex(X, Y) 123 | print(cartesian2polar(rho,phi)) 124 | print(rho.dtype) -------------------------------------------------------------------------------- /fov_deformable_net/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import time 4 | import torch 5 | import numpy as np 6 | from torch.utils.data import DataLoader 7 | from torch.autograd import Variable 8 | 9 | from option.option_20220330 import args 10 | from loss import * 11 | from utils import * 12 | from deformable_unet import DFUNet 13 | from dataloader import Dataset_from_h5 14 | 15 | def lr_adjust(optimizer, epoch, init_lr=1e-4, step_size=20, gamma=0.5): 16 | lr = init_lr * gamma ** (epoch // step_size) 17 | for param_group in optimizer.param_groups: 18 | param_group['lr'] = lr 19 | 20 | def train(): 21 | 22 | # Load dataset 23 | dataset = Dataset_from_h5(src_path=args.src_path, recrop_patch_size=args.patch_size, sigma=args.sigma, train=True) 24 | dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, drop_last=True) 25 | dataset_val = Dataset_from_h5(src_path=args.val_path, recrop_patch_size=args.val_patch_size, sigma=args.sigma, train=False) 26 | dataloader_val = DataLoader(dataset=dataset_val, batch_size=args.val_batch_size, shuffle=False, num_workers=8, drop_last=True) 27 | print('Training path of {:s};\nValidation path of {:s};'.format(args.src_path, args.val_path)) 28 | # Build model 29 | input_channel, output_channel = 5, 3 30 | model = DFUNet(input_channel, output_channel, args.n_channel, args.offset_channel) 31 | model.initialize_weights() 32 | 33 | if args.finetune: 34 | model_dict = torch.load(args.ckpt_dir+'model_%04d_dict.pth' % args.init_epoch) 35 | model.load_state_dict(model_dict) 36 | 37 | if args.t_loss == 'L2': 38 | criterion = torch.nn.MSELoss() 39 | print('Training with L2Loss!') 40 | elif args.t_loss == 'L1': 41 | criterion = torch.nn.L1Loss() 42 | print('Training with L1Loss!') 43 | elif args.t_loss == 'L2_wz_TV': 44 | criterion = L2_wz_TV(args) 45 | print('Training with L2 and TV Loss!') 46 | elif args.t_loss == 'L2_wz_Perceptual': 47 | criterion = L2_wz_Perceptual(args) 48 | print('Training with L2 and Perceptual Loss!') 49 | 50 | if torch.cuda.is_available(): 51 | print('Use {} GPU, which order is {:s}th'.format(torch.cuda.device_count(), args.gpu)) 52 | if torch.cuda.device_count() > 1: 53 | #model = torch.nn.DataParallel(model, device_ids=[0]).cuda() 54 | model = torch.nn.DataParallel(model).cuda() 55 | else: 56 | model = model.cuda() 57 | 58 | criterion = criterion.cuda() 59 | 60 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) 61 | # writer = SummaryWriter(args.log_dir) 62 | ccm = torch.from_numpy(np.ascontiguousarray(args.ccm)).float().cuda() 63 | 64 | for epoch in range(args.init_epoch, args.n_epoch): 65 | loss_sum = 0 66 | step_lr_adjust(optimizer, epoch, init_lr=args.lr, step_size=args.milestone, gamma=args.gamma) 67 | print('Epoch {}, lr {}'.format(epoch+1, optimizer.param_groups[0]['lr'])) 68 | start_time = time.time() 69 | for i, data in enumerate(dataloader): 70 | input, label = data 71 | if torch.cuda.is_available(): 72 | input, label = input.cuda(), label.cuda() 73 | input, label = Variable(input), Variable(label) 74 | 75 | model.train() 76 | model.zero_grad() 77 | optimizer.zero_grad() 78 | 79 | output = model(input) 80 | 81 | # whether with post processing 82 | if args.wz_process: 83 | print('Calculate Loss with post processing') 84 | label = process(label, ccm) 85 | output = process(output, ccm) 86 | 87 | # calculate loss 88 | loss = criterion(output, label) 89 | loss.backward() 90 | optimizer.step() 91 | loss_sum += loss.item() 92 | 93 | if (i % 100 == 0) and (i != 0) : 94 | loss_avg = loss_sum / 100 95 | loss_sum = 0.0 96 | print("Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.8f} Time: {:4.4f}s".format( 97 | epoch + 1, args.n_epoch, i + 1, len(dataloader), loss_avg, time.time()-start_time)) 98 | start_time = time.time() 99 | # Record train loss 100 | # writer.add_scalars('Loss_group', {'train_loss': loss_avg}, epoch) 101 | # # Record learning rate 102 | # writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], epoch) 103 | # save model 104 | if epoch % args.save_epoch == 0: 105 | if torch.cuda.device_count() > 1: 106 | torch.save(model.module.state_dict(), os.path.join(args.ckpt_dir, 'model_%04d_dict.pth' % (epoch+1))) 107 | else: 108 | torch.save(model.state_dict(), os.path.join(args.ckpt_dir, 'model_%04d_dict.pth' % (epoch+1))) 109 | 110 | # validation 111 | if epoch % args.val_epoch == 0: 112 | psnr = 0 113 | loss_val = 0 114 | model.eval() 115 | for i, data in enumerate(dataloader_val): 116 | input, label = data 117 | if torch.cuda.is_available(): 118 | input, label = input.cuda(), label.cuda() 119 | input, label = Variable(input), Variable(label) 120 | 121 | test_out = model(input) 122 | test_out.detach_() 123 | 124 | # compute loss 125 | loss_val += criterion(test_out, label).item() 126 | rgb_out = test_out.cpu().numpy().transpose((0,2,3,1)) 127 | clean = label.cpu().numpy().transpose((0,2,3,1)) 128 | for num in range(rgb_out.shape[0]): 129 | deblurred = np.clip(rgb_out[num], 0, 1) 130 | psnr += compare_psnr(clean[num], deblurred) 131 | img_nums = rgb_out.shape[0] * len(dataloader_val) 132 | psnr = psnr / img_nums 133 | loss_val = loss_val / len(dataloader_val) 134 | print('Validating: {:0>3} , loss: {:.8f}, PSNR: {:4.4f}'.format(img_nums, loss_val, psnr)) 135 | # writer.add_scalars('Loss_group', {'valid_loss': loss_val}, epoch) 136 | # writer.add_scalar('valid_psnr', psnr, epoch) 137 | if args.save_val_img: 138 | cv2.imwrite(args.ckpt_dir+"img/%04d_deblurred.png" % epoch, deblurred[..., ::-1]) 139 | 140 | # writer.close() 141 | 142 | if __name__ == "__main__": 143 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 144 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu 145 | 146 | create_dir(args.log_dir) 147 | create_dir(args.ckpt_dir) 148 | if args.save_val_img: 149 | create_dir(args.ckpt_dir+'_img/') 150 | train() 151 | -------------------------------------------------------------------------------- /fov_deformable_net/dcn/modules/modulated_deform_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import torch 7 | import math 8 | from torch import nn 9 | from torch.nn import init 10 | from torch.nn.modules.utils import _pair 11 | 12 | from functions.modulated_deform_conv_func import ModulatedDeformConvFunction 13 | 14 | class ModulatedDeformConv(nn.Module): 15 | 16 | def __init__(self, in_channels, out_channels, 17 | kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True): 18 | super(ModulatedDeformConv, self).__init__() 19 | 20 | if in_channels % groups != 0: 21 | raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups)) 22 | if out_channels % groups != 0: 23 | raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups)) 24 | 25 | self.in_channels = in_channels 26 | self.out_channels = out_channels 27 | self.kernel_size = _pair(kernel_size) 28 | self.stride = _pair(stride) 29 | self.padding = _pair(padding) 30 | self.dilation = _pair(dilation) 31 | self.groups = groups 32 | self.deformable_groups = deformable_groups 33 | self.im2col_step = im2col_step 34 | self.use_bias = bias 35 | 36 | self.weight = nn.Parameter(torch.Tensor( 37 | out_channels, in_channels//groups, *self.kernel_size)) 38 | self.bias = nn.Parameter(torch.Tensor(out_channels)) 39 | self.reset_parameters() 40 | if not self.use_bias: 41 | self.bias.requires_grad = False 42 | 43 | def reset_parameters(self): 44 | n = self.in_channels 45 | init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 46 | if self.bias is not None: 47 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) 48 | bound = 1 / math.sqrt(fan_in) 49 | init.uniform_(self.bias, -bound, bound) 50 | 51 | def forward(self, input, offset, mask): 52 | assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 53 | offset.shape[1] 54 | assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ 55 | mask.shape[1] 56 | return ModulatedDeformConvFunction.apply(input, offset, mask, 57 | self.weight, 58 | self.bias, 59 | self.stride, 60 | self.padding, 61 | self.dilation, 62 | self.groups, 63 | self.deformable_groups, 64 | self.im2col_step) 65 | 66 | _ModulatedDeformConv = ModulatedDeformConvFunction.apply 67 | 68 | class ModulatedDeformConvPack(ModulatedDeformConv): 69 | 70 | def __init__(self, in_channels, out_channels, 71 | kernel_size, stride, padding, 72 | dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1): 73 | super(ModulatedDeformConvPack, self).__init__(in_channels, out_channels, 74 | kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias) 75 | 76 | out_channels = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1] 77 | self.conv_offset_mask = nn.Conv2d(self.in_channels, 78 | out_channels, 79 | kernel_size=self.kernel_size, 80 | stride=self.stride, 81 | padding=self.padding, 82 | bias=True) 83 | self.conv_offset_mask.lr_mult = lr_mult 84 | self.init_offset() 85 | 86 | def init_offset(self): 87 | self.conv_offset_mask.weight.data.zero_() 88 | self.conv_offset_mask.bias.data.zero_() 89 | 90 | def forward(self, input): 91 | out = self.conv_offset_mask(input) 92 | o1, o2, mask = torch.chunk(out, 3, dim=1) 93 | offset = torch.cat((o1, o2), dim=1) 94 | mask = torch.sigmoid(mask) 95 | return ModulatedDeformConvFunction.apply(input, offset, mask, 96 | self.weight, 97 | self.bias, 98 | self.stride, 99 | self.padding, 100 | self.dilation, 101 | self.groups, 102 | self.deformable_groups, 103 | self.im2col_step) 104 | #==============================================================================# 105 | #==============================================================================# 106 | class ModulatedDeformConvPack2(ModulatedDeformConv): 107 | 108 | def __init__(self, in_channels, out_channels, 109 | kernel_size, stride, padding, 110 | dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1, extra_offset_mask=False, offset_in_channel=32): 111 | super(ModulatedDeformConvPack2, self).__init__(in_channels, out_channels, 112 | kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias) 113 | self.extra_offset_mask = extra_offset_mask 114 | out_channels = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1] 115 | self.conv_offset_mask = nn.Conv2d(offset_in_channel, 116 | out_channels, 117 | kernel_size=self.kernel_size, 118 | stride=self.stride, 119 | padding=self.padding, 120 | bias=True) 121 | self.conv_offset_mask.lr_mult = lr_mult 122 | self.init_offset() 123 | 124 | def init_offset(self): 125 | self.conv_offset_mask.weight.data.zero_() 126 | self.conv_offset_mask.bias.data.zero_() 127 | 128 | def forward(self, input): 129 | if self.extra_offset_mask: 130 | out = self.conv_offset_mask(input[1]) 131 | input = input[0] 132 | else: 133 | out = self.conv_offset_mask(input) 134 | o1, o2, mask = torch.chunk(out, 3, dim=1) 135 | offset = torch.cat((o1, o2), dim=1) 136 | mask = torch.sigmoid(mask) 137 | return ModulatedDeformConvFunction.apply(input, offset, mask, 138 | self.weight, 139 | self.bias, 140 | self.stride, 141 | self.padding, 142 | self.dilation, 143 | self.groups, 144 | self.deformable_groups, 145 | self.im2col_step) -------------------------------------------------------------------------------- /fov_deformable_net/dataset_generator.py: -------------------------------------------------------------------------------- 1 | from random import uniform 2 | import h5py 3 | import cv2 4 | import os 5 | import glob 6 | import time 7 | import tifffile 8 | import numpy as np 9 | 10 | from utils import create_dir 11 | 12 | def crop_patch(img, half_patch_size, stride, random_crop): 13 | """ 14 | crop image into patches 15 | input args: 16 | img: input image array, np.array 17 | half_patch_size: half of patch size, int 18 | stride: stride of neighbor patch, int 19 | random_crop: if random crop the input image, bool 20 | """ 21 | patch_list = [] 22 | [h, w, c] = img.shape 23 | ###################################################################################### 24 | # calculate the fov information 25 | h_range = np.arange(0, h, 1) 26 | w_range = np.arange(0, w, 1) 27 | img_fld_w, img_fld_h = np.meshgrid(w_range, h_range) 28 | img_fld_h = ((img_fld_h - (h-1)/2) / ((h-1)/2)).astype(np.float32) 29 | img_fld_w = ((img_fld_w - (w-1)/2) / ((w-1)/2)).astype(np.float32) 30 | img_fld_h = np.expand_dims(img_fld_h, -1) 31 | img_fld_w = np.expand_dims(img_fld_w, -1) 32 | img_wz_fld = np.concatenate([img, img_fld_h, img_fld_w], 2) 33 | ###################################################################################### 34 | if random_crop: 35 | crop_num = 100 36 | pos = [(np.random.randint(half_patch_size, h - half_patch_size), \ 37 | np.random.randint(half_patch_size, w - half_patch_size)) \ 38 | for i in range(crop_num)] 39 | else: 40 | pos = [(ht, wt) for ht in range(half_patch_size, h, stride) \ 41 | for wt in range(half_patch_size, w, stride)] 42 | 43 | for (ht, wt) in pos: 44 | cropped_img = img_wz_fld[ht - half_patch_size:ht + half_patch_size, wt - half_patch_size:wt + half_patch_size, :] 45 | patch_list.append(cropped_img) 46 | 47 | return patch_list 48 | 49 | def mask_img(img_wz_fld, fov, fov_interval): 50 | """ 51 | mask the img out of the range of fov 52 | """ 53 | mask = np.where((fov_interval[0] <= fov) and (fov <= fov_interval[1]), 1, 0) 54 | # mask the out range pixel of img 55 | img_wz_fld[..., 0:6][mask == 0] = 0 56 | return img_wz_fld 57 | 58 | def crop_patch_wzfov(img, half_patch_size, stride, random_crop, splited_fov, if_mask): 59 | """ 60 | crop image into patches 61 | input args: 62 | img: input image array, np.array 63 | half_patch_size: half of patch size, int 64 | stride: stride of neighbor patch, int 65 | random_crop: if random crop the input image, bool 66 | """ 67 | patch_list = [] 68 | [h, w, c] = img.shape 69 | ###################################################################################### 70 | # calculate the fov information 71 | h_range = np.arange(0, h, 1) 72 | w_range = np.arange(0, w, 1) 73 | img_fld_w, img_fld_h = np.meshgrid(w_range, h_range) 74 | img_fld_h = ((img_fld_h - (h-1)/2) / ((h-1)/2)).astype(np.float32) 75 | img_fld_w = ((img_fld_w - (w-1)/2) / ((w-1)/2)).astype(np.float32) 76 | img_fld_h = np.expand_dims(img_fld_h, -1) 77 | img_fld_w = np.expand_dims(img_fld_w, -1) 78 | img_wz_fld = np.concatenate([img, img_fld_h, img_fld_w], 2) 79 | ###################################################################################### 80 | if random_crop: 81 | crop_num = 100 82 | pos = [(np.random.randint(half_patch_size, h - half_patch_size), \ 83 | np.random.randint(half_patch_size, w - half_patch_size)) \ 84 | for i in range(crop_num)] 85 | else: 86 | pos = [(ht, wt) for ht in range(half_patch_size, h, stride) \ 87 | for wt in range(half_patch_size, w, stride)] 88 | 89 | for (ht, wt) in pos: 90 | cropped_img = img_wz_fld[ht - half_patch_size:ht + half_patch_size, wt - half_patch_size:wt + half_patch_size, :] 91 | # judge whether this cropped image is in the interval of fov 92 | cropped_fov = cropped_img[:, :, -2:] # fov information 93 | normalized_fov = np.sqrt(np.sum(np.power(cropped_fov, 2), 2)) / np.sqrt(1.0 + 1.0) 94 | 95 | if (splited_fov[0] <= np.max(normalized_fov)) and (np.min(normalized_fov) <= splited_fov[1]): 96 | if if_mask: 97 | cropped_img = mask_img(cropped_img, normalized_fov, splited_fov) 98 | 99 | patch_list.append(cropped_img) # include the image in the fov interval 100 | 101 | return patch_list 102 | 103 | def gen_dataset(src_input_files, src_label_files, dst_path, date_index, splited_fov, if_mask): 104 | """ 105 | generating datasets: 106 | input args: 107 | src_input_files: input image files list, list[] 108 | src_label_files: label image files list, list[] 109 | dst_path: path for saving h5py file, str 110 | """ 111 | # h5py file pathname, record the fov information 112 | h5py_path = dst_path + "/dataset_" + date_index + "_fov_" + \ 113 | str(int(splited_fov[0] * 10)) + "_" + str(int(splited_fov[1] * 10)) + ".h5" 114 | h5f = h5py.File(h5py_path, 'w') 115 | 116 | for img_idx in range(len(src_input_files)): 117 | print("Now processing img pairs of %s", os.path.basename(src_input_files[img_idx])) 118 | img_input = tifffile.imread(src_input_files[img_idx]) 119 | img_label = tifffile.imread(src_label_files[img_idx]) 120 | 121 | # normalize the input and the label 122 | img_input = np.asarray(img_input / 65535, np.float32) 123 | img_label = np.asarray(img_label / 65535, np.float32) 124 | 125 | # concate input and label together 126 | img_pair = np.concatenate([img_input, img_label], 2) 127 | 128 | # crop the patch 129 | if splited_fov == [0.0, 1.0]: 130 | patch_list = crop_patch(img_pair, 100, 100, False) 131 | else: 132 | patch_list = crop_patch_wzfov(img_pair, 100, 100, False, splited_fov, if_mask) 133 | 134 | # save the patches into h5py file 135 | for patch_idx in range(len(patch_list)): 136 | data = patch_list[patch_idx].copy() 137 | h5f.create_dataset(str(img_idx)+'_'+str(patch_idx), shape=(200,200,8), data=data) 138 | 139 | h5f.close() 140 | 141 | 142 | if __name__ == "__main__": 143 | # generating train/valid/test datasets 144 | date_ind = "2022xxxx" # date information for h5py file 145 | dataset_type = "valid" # type of dataset "train" or "valid" 146 | camera_idx = "camera0x" # index of camera "camera01" to "camera05" 147 | base_path = "./synthetic_datasets" # system path 148 | input_dir = "input_rgb_2022xxxx" # input data dir 149 | label_dir = "label_rgb" # label data dir 150 | if_mask = False # whether add mask 151 | 152 | src_input_path = os.path.join(base_path, camera_idx, dataset_type + "_datasets", input_dir) 153 | src_label_path = os.path.join(base_path, camera_idx, dataset_type + "_datasets", label_dir) 154 | dst_path = os.path.join(base_path, camera_idx, dataset_type + "_datasets", "h5py_file") 155 | create_dir(dst_path) 156 | 157 | src_input_files = sorted(glob.glob(src_input_path + "/*.tiff")) 158 | src_label_files = sorted(glob.glob(src_label_path + "/*.tiff")) 159 | 160 | splited_fov = [0.0, 1.0] 161 | print("start dataset generation!") 162 | # generate one dataset in one step, in one image, split fov by the interval 163 | for interval_idx in range(len(splited_fov)-1): 164 | gen_dataset(src_input_files, src_label_files, dst_path, date_ind, \ 165 | [splited_fov[interval_idx], splited_fov[interval_idx+1]], if_mask) -------------------------------------------------------------------------------- /fov_deformable_net/test.py: -------------------------------------------------------------------------------- 1 | import os, time, glob, cv2 2 | import numpy as np 3 | import matplotlib.image as mpimg 4 | from skimage.measure import compare_psnr, compare_ssim 5 | import torch 6 | import torchvision.transforms as transforms 7 | 8 | from utils import * 9 | from option.option_20201230 import args 10 | from model.__init__ import make_model 11 | 12 | def compute_fld_info(img): 13 | [h, w, c] = img.shape 14 | h_range = np.arange(0, h, 1) 15 | w_range = np.arange(0, w, 1) 16 | img_fld_w, img_fld_h = np.meshgrid(w_range, h_range) 17 | img_fld_h = ((img_fld_h - (h-1)/2) / ((h-1)/2)).astype(np.float32) 18 | img_fld_w = ((img_fld_w - (w-1)/2) / ((w-1)/2)).astype(np.float32) 19 | img_fld_h = np.expand_dims(img_fld_h, -1) 20 | img_fld_w = np.expand_dims(img_fld_w, -1) 21 | img_wz_fld = np.concatenate([img, img_fld_h, img_fld_w], 2) 22 | return img_wz_fld 23 | 24 | def crop_patch(padded_in_img_wz_fld, patch_size=500, pad_size=100): 25 | patch_list = [] 26 | [H_img, W_img, C] = padded_in_img_wz_fld.shape 27 | H_num = int((H_img-pad_size) / patch_size) 28 | W_num = int((W_img-pad_size) / patch_size) 29 | for h_index in range(H_num): 30 | for w_index in range(W_num): 31 | patch = padded_in_img_wz_fld[patch_size*h_index : patch_size*(h_index+1)+pad_size, 32 | patch_size*w_index : patch_size*(w_index+1)+pad_size, :] 33 | patch_list.append(patch) 34 | 35 | return patch_list 36 | 37 | def sew_up_img(out_patch_list, patch_size=500, pad_size=100, img_size=[3000, 4000]): 38 | rgb = np.zeros((img_size[0], img_size[1], 3)) 39 | for patch_index in range(len(out_patch_list)): 40 | # w seq first, h seq second 41 | h_index = patch_index // 8 42 | w_index = patch_index - h_index*8 43 | patch_data = out_patch_list[patch_index].copy() 44 | 45 | patch_data = patch_data[int(pad_size/2) : int(patch_size+pad_size/2), int(pad_size/2) : int(patch_size+pad_size/2), :] 46 | rgb[h_index*patch_size : (h_index+1)*patch_size, w_index*patch_size : (w_index+1)*patch_size] = patch_data 47 | 48 | return rgb 49 | 50 | def test_info_generator(gt_src_file_list, psnr, ssim, test_time, test_time_avr, result_txt_path): 51 | ''' 52 | record the psnr, ssim and the time consuming of each test 53 | gt_src_file_list: file list, list[] 54 | psnr: testing psnr recorder, list[] 55 | ssim: testing ssim recorder, list[] 56 | test_time: testing time recorder, list[] 57 | test_time_avr: average testing time, float 58 | result_txt_path: path to save the test info, str 59 | ''' 60 | f_info_head = open(result_txt_path, 'w') 61 | for i in range(len(gt_src_file_list)): 62 | f_info_head.write('src_file: %s: psnr: %f, ssim: %f, time: %f \n' %(os.path.basename(gt_src_file_list[i]), psnr[i], ssim[i], test_time[i])) 63 | 64 | f_info_head.write('average time: %f' %(test_time_avr)) 65 | f_info_head.close() 66 | return 0 67 | 68 | 69 | 70 | def evaluate_net(): 71 | create_dir(args.result_png_path) 72 | print('Testing path is %s' % args.blur_src_path) 73 | blurred_src_file_list = sorted(glob.glob(args.blur_src_path + '/*.png' )) 74 | gt_src_file_list = sorted(glob.glob(args.gt_src_path + '/*.png')) 75 | 76 | if args.gt_src_path: 77 | psnr = np.zeros(len(gt_src_file_list)) 78 | ssim = np.zeros(len(gt_src_file_list)) 79 | test_time = np.zeros(len(gt_src_file_list) * 96) 80 | 81 | # Build model 82 | input_channel, output_channel = 5, 3 83 | 84 | model = make_model(input_channel, output_channel, args) 85 | 86 | if torch.cuda.is_available(): 87 | model_dict = torch.load(args.ckpt_dir_test + '/model_%04d_dict.pth' % args.epoch_test) 88 | model.load_state_dict(model_dict) 89 | model = model.cuda() 90 | print('Finish loading the model of the %dth epoch' % args.epoch_test) 91 | else: 92 | print('There are not available cuda devices !') 93 | 94 | model.eval() 95 | 96 | #=================# 97 | for index in range(len(gt_src_file_list)): 98 | out_patch_list = [] 99 | img_name = os.path.split(gt_src_file_list[index])[-1].split('.')[0] 100 | 101 | # read the image 102 | gt_img = cv2.imread(gt_src_file_list[index]) 103 | gt_img = gt_img[..., ::-1] 104 | gt_img = np.asarray(gt_img / 255, np.float64) 105 | in_img = cv2.imread(blurred_src_file_list[index]) 106 | in_img = in_img[..., ::-1] 107 | in_img = np.asarray(in_img / 255, np.float64) 108 | 109 | # add noise 110 | if args.sigma: 111 | noise = np.random.normal(loc=0, scale=args.sigma/255.0, size=in_img.shape) 112 | in_img = in_img + noise 113 | in_img = np.clip(in_img, 0.0, 1.0) 114 | 115 | # compute field 116 | in_img_wz_fld = compute_fld_info(in_img) 117 | [h, w, c] = in_img_wz_fld.shape 118 | padded_in_img_wz_fld = np.pad(in_img_wz_fld, ((50, 50), (50, 50), (0, 0)), 'edge') 119 | # crop_patch 120 | patch_list = crop_patch(padded_in_img_wz_fld, patch_size=500, pad_size=100) 121 | # concat in and gt, gt->in 122 | print('process img: %s' % blurred_src_file_list[index]) 123 | for i in range(len(patch_list)): 124 | in_patch = patch_list[i].copy() 125 | in_patch = transforms.functional.to_tensor(in_patch) 126 | in_patch = in_patch.unsqueeze_(0).float() 127 | if torch.cuda.is_available(): 128 | in_patch = in_patch.cuda() 129 | 130 | torch.cuda.synchronize() 131 | start_time = time.time() 132 | with torch.no_grad(): 133 | out_patch = model(in_patch) 134 | torch.cuda.synchronize() 135 | test_time[index * 96 + i] = time.time() - start_time 136 | 137 | rgb_patch = out_patch.cpu().detach().numpy().transpose((0, 2, 3, 1)) 138 | rgb_patch = np.clip(rgb_patch[0], 0, 1) 139 | out_patch_list.append(rgb_patch) 140 | 141 | rgb = sew_up_img(out_patch_list, patch_size=500, pad_size=100, img_size=[3000, 4000]) 142 | 143 | # compare psnr and ssim 144 | psnr[index] = compare_psnr(gt_img, rgb) 145 | ssim[index] = compare_ssim(gt_img, rgb, multichannel=True) 146 | # save image 147 | rgb = rgb[..., ::-1] 148 | cv2.imwrite(args.result_png_path + '/' + img_name + ".png", np.uint8(rgb*255)) 149 | print('test image: %s saved!' %img_name) 150 | 151 | test_time_avr = 0 152 | #=========== 153 | #print psnr,ssim 154 | for i in range(len(gt_src_file_list)): 155 | print('src_file: %s: ' %(os.path.split(gt_src_file_list[i])[-1].split('.')[0])) 156 | if args.gt_src_path: 157 | print('psnr: %f, ssim: %f, average time: %f' % (psnr[i], ssim[i], test_time[i])) 158 | 159 | if i > 0: 160 | test_time_avr += test_time[i] 161 | 162 | test_time_avr = test_time_avr / (len(gt_src_file_list)-1) 163 | print('average time: %f' % (test_time_avr)) 164 | # save the psnr, ssim information 165 | result_txt_path = args.result_png_path + '/' + "test_result.txt" 166 | test_info_generator(gt_src_file_list, psnr, ssim, test_time, test_time_avr, result_txt_path) 167 | return 0 168 | 169 | if __name__ == "__main__": 170 | 171 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu 172 | print('Use {} GPU, which order is {:s}th'.format(torch.cuda.device_count(), args.gpu)) 173 | 174 | evaluate_net() 175 | -------------------------------------------------------------------------------- /fov_deformable_net/test_real.py: -------------------------------------------------------------------------------- 1 | import os, time, glob, cv2 2 | import tifffile 3 | import numpy as np 4 | import torch 5 | import torchvision.transforms as transforms 6 | import scipy.io as sio 7 | from utils import * 8 | from option.option_20220330 import args 9 | from deformable_unet import DFUNet 10 | 11 | def compute_fld_info(img): 12 | [h, w, c] = img.shape 13 | h_range = np.arange(0, h, 1) 14 | w_range = np.arange(0, w, 1) 15 | img_fld_w, img_fld_h = np.meshgrid(w_range, h_range) 16 | img_fld_h = ((img_fld_h - (h-1)/2) / ((h-1)/2)).astype(np.float32) 17 | img_fld_w = ((img_fld_w - (w-1)/2) / ((w-1)/2)).astype(np.float32) 18 | img_fld_h = np.expand_dims(img_fld_h, -1) 19 | img_fld_w = np.expand_dims(img_fld_w, -1) 20 | img_wz_fld = np.concatenate([img, img_fld_h, img_fld_w], 2) 21 | return img_wz_fld 22 | 23 | def crop_small_patch(in_img_wz_fld, patch_half_size=100): 24 | [H_img, W_img, C] = in_img_wz_fld.shape 25 | patch_1 = in_img_wz_fld[int(H_img/2)-patch_half_size : int(H_img/2)+patch_half_size, 26 | int(W_img/2)-patch_half_size : int(W_img/2)+patch_half_size, :] 27 | patch_2 = in_img_wz_fld[1413-patch_half_size : 1413+patch_half_size, 28 | 2119-patch_half_size : 2119+patch_half_size, :] 29 | patch_3 = in_img_wz_fld[1017-patch_half_size : 1017+patch_half_size, 30 | 1526-patch_half_size : 1526+patch_half_size, :] 31 | patch_4 = in_img_wz_fld[277-patch_half_size : 277+patch_half_size, 32 | 415-patch_half_size : 415+patch_half_size, :] 33 | patch_list = [patch_1, patch_2, patch_3, patch_4] 34 | return patch_list 35 | 36 | def crop_patch(padded_in_img_wz_fld, patch_size=500, pad_size=100): 37 | patch_list = [] 38 | [H_img, W_img, C] = padded_in_img_wz_fld.shape 39 | H_num = int((H_img-pad_size) / patch_size) 40 | W_num = int((W_img-pad_size) / patch_size) 41 | for h_index in range(H_num): 42 | for w_index in range(W_num): 43 | patch = padded_in_img_wz_fld[patch_size*h_index : patch_size*(h_index+1)+pad_size, 44 | patch_size*w_index : patch_size*(w_index+1)+pad_size, :] 45 | patch_list.append(patch) 46 | 47 | return patch_list 48 | 49 | def sew_up_img(out_patch_list, patch_size=500, pad_size=100, img_size=[3000, 4000]): 50 | rgb = np.zeros((img_size[0], img_size[1], 3)) 51 | for patch_index in range(len(out_patch_list)): 52 | # w seq first, h seq second 53 | h_index = patch_index // 8 54 | w_index = patch_index - h_index*8 55 | patch_data = out_patch_list[patch_index].copy() 56 | 57 | patch_data = patch_data[int(pad_size/2) : int(patch_size+pad_size/2), int(pad_size/2) : int(patch_size+pad_size/2), :] 58 | rgb[h_index*patch_size : (h_index+1)*patch_size, w_index*patch_size : (w_index+1)*patch_size] = patch_data 59 | 60 | return rgb 61 | 62 | def postprocessing(img, ccm): 63 | # copy image 64 | # img_out = np.zeros_like(img) 65 | # apply gamma 66 | img_out = np.power((img+1e-8), 0.454) 67 | img_out = np.clip(img_out, 0.0, 1.0) 68 | # apply ccm 69 | img_out = apply_cmatrix(img_out, ccm) 70 | img_out = np.clip(img_out, 0.0, 1.0) 71 | return img_out 72 | 73 | def apply_cmatrix(img, ccm): 74 | if not np.size(img, 2) == 3: 75 | raise ValueError('Incorrect channel dimension!') 76 | 77 | img_out = np.zeros_like(img) 78 | img_out[:, :, 0] = ccm[0, 0] * img[:, :, 0] + ccm[0, 1] * img[:, :, 1] + ccm[0, 2] * img[:, :, 2] 79 | img_out[:, :, 1] = ccm[1, 0] * img[:, :, 0] + ccm[1, 1] * img[:, :, 1] + ccm[1, 2] * img[:, :, 2] 80 | img_out[:, :, 2] = ccm[2, 0] * img[:, :, 0] + ccm[2, 1] * img[:, :, 1] + ccm[2, 2] * img[:, :, 2] 81 | return img_out 82 | 83 | def evaluate_net(): 84 | create_dir(args.real_dst_tiff_path) 85 | print('Testing path is %s' % args.real_blur_src_path) 86 | blurred_src_file_list = sorted(glob.glob(args.real_blur_src_path + '/*.tiff' )) 87 | blurred_head_file_list = sorted(glob.glob(args.real_blur_src_path + '/*.mat' )) 88 | 89 | # Build model 90 | input_channel, output_channel = 5, 3 91 | 92 | model = DFUNet(input_channel, output_channel, args.n_channel, args.offset_channel) 93 | 94 | if torch.cuda.is_available(): 95 | model_dict = torch.load(args.ckpt_dir_test_real + '/model_%04d_dict.pth' % args.epoch_test_real) 96 | model.load_state_dict(model_dict) 97 | model = model.cuda() 98 | print('Finish loading the model of the %dth epoch' % args.epoch_test_real) 99 | else: 100 | print('There are not available cuda devices !') 101 | 102 | model.eval() 103 | 104 | #=================# 105 | for index in range(len(blurred_src_file_list)): 106 | out_patch_list = [] 107 | img_name = os.path.split(blurred_src_file_list[index])[-1].split('.')[0] 108 | in_img = tifffile.imread(blurred_src_file_list[index]) 109 | head = sio.loadmat(blurred_head_file_list[index]) 110 | wb, ccm = head['wb'], head['ccm'] 111 | 112 | in_img = np.asarray(in_img / 65535, np.float32) 113 | 114 | # normalize to 0~1 115 | in_img = (in_img - np.min(in_img)) / (np.max(in_img) - np.min(in_img)) 116 | 117 | # compute field 118 | in_img_wz_fld = compute_fld_info(in_img) 119 | [h, w, c] = in_img_wz_fld.shape 120 | padded_in_img_wz_fld = np.pad(in_img_wz_fld, ((50, 50), (50, 50), (0, 0)), 'edge') 121 | # crop_patch 122 | patch_list = crop_patch(padded_in_img_wz_fld, patch_size=500, pad_size=100) 123 | # concat in and gt, gt->in 124 | print('process img: %s' % blurred_src_file_list[index]) 125 | for i in range(len(patch_list)): 126 | in_patch = patch_list[i].copy() 127 | in_patch = transforms.functional.to_tensor(in_patch) 128 | in_patch = in_patch.unsqueeze_(0).float() 129 | if torch.cuda.is_available(): 130 | in_patch = in_patch.cuda() 131 | 132 | torch.cuda.synchronize() 133 | with torch.no_grad(): 134 | out_patch = model(in_patch) 135 | torch.cuda.synchronize() 136 | 137 | rgb_patch = out_patch.cpu().detach().numpy().transpose((0, 2, 3, 1)) 138 | rgb_patch = np.clip(rgb_patch[0], 0, 1) 139 | out_patch_list.append(rgb_patch) 140 | 141 | rgb = sew_up_img(out_patch_list, patch_size=500, pad_size=100, img_size=[3000, 4000]) 142 | # postprocessing 143 | in_img = postprocessing(in_img, ccm) 144 | rgb = postprocessing(rgb, ccm) 145 | # save image 146 | cv2.imwrite(args.real_dst_tiff_path + '/' + img_name + '_ipt.png', (in_img[..., ::-1]*255).astype(np.uint8)) 147 | cv2.imwrite(args.real_dst_tiff_path + '/' + img_name + '_out.png', (rgb[..., ::-1]*255).astype(np.uint8)) 148 | #------------------------------------------------------------------------------- 149 | # for patch_index in range(len(out_patch_list)): 150 | # # save image 151 | # img_patch = patch_list[patch_index] 152 | # create_dir(args.real_dst_png_path + '/' + img_name) 153 | # imwrite(args.real_dst_png_path + '/' + img_name + '/real_test_deblur_%02d.png' %patch_index, np.uint8(out_patch_list[patch_index]*255)) 154 | # imwrite(args.real_dst_png_path + '/' + img_name + '/real_test_src_%02d.png' % patch_index, np.uint8(img_patch[:, :, 0:3]*255)) 155 | #------------------------------------------------------------------------------- 156 | print('real test img of %s saved!' %blurred_src_file_list[index]) 157 | 158 | return 0 159 | 160 | 161 | if __name__ == "__main__": 162 | 163 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu 164 | print('Use {} GPU, which order is {:s}th'.format(torch.cuda.device_count(), args.gpu)) 165 | 166 | evaluate_net() 167 | -------------------------------------------------------------------------------- /fov_deformable_net/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from math import exp 4 | from torch import nn 5 | from torchvision import models 6 | from torch.autograd import Variable 7 | from utils import normalize_tensor_transform 8 | 9 | class L2_wz_TV(nn.Module): 10 | def __init__(self, args): 11 | super(L2_wz_TV, self).__init__() 12 | self.mse_loss = nn.MSELoss() 13 | self.tv_loss = TVLoss() 14 | self.TV_WEIGHT = args.tv_weight 15 | 16 | def forward(self, out_images, target_images): 17 | # MSELoss 18 | image_loss = self.mse_loss(out_images, target_images) 19 | # TV Loss 20 | tv_loss = self.tv_loss(out_images) 21 | return image_loss + self.TV_WEIGHT * tv_loss 22 | 23 | class L2_wz_Perceptual(nn.Module): 24 | def __init__(self, args): 25 | super(L2_wz_Perceptual, self).__init__() 26 | self.mse_loss = nn.MSELoss() 27 | self.per_loss = PerceptualLoss() 28 | self.STYLE_WEIGHT = args.style_weight 29 | self.CONTENT_WEIGHT = args.content_weight 30 | 31 | def forward(self, out_images, target_images): 32 | # MSELoss 33 | image_loss = self.mse_loss(out_images, target_images) 34 | # Perceptual Loss 35 | out_images_norm, target_images_norm = normalize_tensor_transform(out_images, target_images) 36 | style_loss, content_loss = self.per_loss(out_images_norm, target_images_norm) 37 | # print(style_loss.data, content_loss.data) 38 | return image_loss + self.STYLE_WEIGHT * style_loss.data + self.CONTENT_WEIGHT * content_loss.data 39 | 40 | class L2_wz_SSIM(nn.Module): 41 | def __init__(self, args): 42 | super(L2_wz_SSIM, self).__init__() 43 | self.mse_loss = nn.MSELoss() 44 | self.ssim_loss = SSIMLoss() 45 | self.SSIM_WEIGHT = args.ssim_weight 46 | 47 | def forward(self, out_images, target_images): 48 | # MSELoss 49 | image_loss = self.mse_loss(out_images, target_images) 50 | # SSIMLoss 51 | ssim_loss = self.ssim_loss(out_images, target_images) 52 | return image_loss + self.SSIM_WEIGHT * ssim_loss 53 | 54 | #---------------------------------------------------------------------------------- 55 | 56 | class TVLoss(nn.Module): 57 | def __init__(self, tv_loss_weight=1): 58 | super(TVLoss, self).__init__() 59 | self.tv_loss_weight = tv_loss_weight 60 | 61 | def forward(self, x): 62 | batch_size = x.size()[0] 63 | h_x = x.size()[2] 64 | w_x = x.size()[3] 65 | count_h = self.tensor_size(x[:, :, 1:, :]) 66 | count_w = self.tensor_size(x[:, :, :, 1:]) 67 | h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum() 68 | w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum() 69 | return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size 70 | 71 | @staticmethod 72 | def tensor_size(t): 73 | return t.size()[1] * t.size()[2] * t.size()[3] 74 | 75 | class PerceptualLoss(nn.Module): 76 | def __init__(self): 77 | super(PerceptualLoss, self).__init__() 78 | features = models.vgg16(pretrained=True).features 79 | self.to_relu_1_2 = nn.Sequential() 80 | self.to_relu_2_2 = nn.Sequential() 81 | self.to_relu_3_3 = nn.Sequential() 82 | self.to_relu_4_3 = nn.Sequential() 83 | 84 | for x in range(4): 85 | self.to_relu_1_2.add_module(str(x), features[x]) 86 | for x in range(4, 9): 87 | self.to_relu_2_2.add_module(str(x), features[x]) 88 | for x in range(9, 16): 89 | self.to_relu_3_3.add_module(str(x), features[x]) 90 | for x in range(16, 23): 91 | self.to_relu_4_3.add_module(str(x), features[x]) 92 | 93 | self.mse_loss = nn.MSELoss() 94 | # don't need the gradients, just want the features 95 | for param in self.parameters(): 96 | param.requires_grad = False 97 | 98 | def _gram(self, x): 99 | (bs, ch, h, w) = x.size() 100 | f = x.view(bs, ch, w*h) 101 | f_T = f.transpose(1, 2) 102 | G = f.bmm(f_T) / (ch * h * w) 103 | return G 104 | 105 | def forward(self, pred_img, targ_img): 106 | h_relu_1_2_pred_img = self.to_relu_1_2(pred_img) 107 | h_relu_1_2_targ_img = self.to_relu_1_2(targ_img) 108 | style_loss_1_2 = self.mse_loss(self._gram(h_relu_1_2_pred_img), self._gram(h_relu_1_2_targ_img)) 109 | 110 | h_relu_2_2_pred_img = self.to_relu_2_2(h_relu_1_2_pred_img) 111 | h_relu_2_2_targ_img = self.to_relu_2_2(h_relu_1_2_targ_img) 112 | style_loss_2_2 = self.mse_loss(self._gram(h_relu_2_2_pred_img), self._gram(h_relu_2_2_targ_img)) 113 | 114 | h_relu_3_3_pred_img = self.to_relu_3_3(h_relu_2_2_pred_img) 115 | h_relu_3_3_targ_img = self.to_relu_3_3(h_relu_2_2_targ_img) 116 | style_loss_3_3 = self.mse_loss(self._gram(h_relu_3_3_pred_img), self._gram(h_relu_3_3_targ_img)) 117 | 118 | h_relu_4_3_pred_img = self.to_relu_4_3(h_relu_3_3_pred_img) 119 | h_relu_4_3_targ_img = self.to_relu_4_3(h_relu_3_3_targ_img) 120 | style_loss_4_3 = self.mse_loss(self._gram(h_relu_4_3_pred_img), self._gram(h_relu_4_3_targ_img)) 121 | 122 | style_loss_tol = style_loss_1_2 + style_loss_2_2 + style_loss_3_3 + style_loss_4_3 123 | # content loss (h_relu_2_2) 124 | content_loss_tol = style_loss_2_2 125 | return style_loss_tol, content_loss_tol 126 | 127 | class SSIMLoss(nn.Module): 128 | def __init__(self, window_size = 11, size_average = True): 129 | super(SSIMLoss, self).__init__() 130 | self.window_size = window_size 131 | self.size_average = size_average 132 | self.channel = 1 133 | self.window = self._create_window(window_size, self.channel) 134 | 135 | def _gaussian(self, window_size, sigma): 136 | gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) 137 | return gauss/gauss.sum() 138 | 139 | def _create_window(self, window_size, channel): 140 | _1D_window = self.gaussian(window_size, 1.5).unsqueeze(1) 141 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) 142 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) 143 | return window 144 | 145 | def _ssim(self, img1, img2, window, window_size, channel, size_average = True): 146 | mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel) 147 | mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel) 148 | 149 | mu1_sq = mu1.pow(2) 150 | mu2_sq = mu2.pow(2) 151 | mu1_mu2 = mu1*mu2 152 | 153 | sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq 154 | sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq 155 | sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2 156 | 157 | C1 = 0.01**2 158 | C2 = 0.03**2 159 | 160 | ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) 161 | 162 | if size_average: 163 | return ssim_map.mean() 164 | else: 165 | return ssim_map.mean(1).mean(1).mean(1) 166 | 167 | def forward(self, pred_img, targ_img): 168 | (_, channel, _, _) = pred_img.size() 169 | 170 | if channel == self.channel and self.window.data.type() == pred_img.data.type(): 171 | window = self.window 172 | else: 173 | window = self._create_window(self.window_size, channel) 174 | 175 | if pred_img.is_cuda(): 176 | window = window.cuda() 177 | window = window.type_as(pred_img) 178 | 179 | self.window = window 180 | self.channel = channel 181 | 182 | return self._ssim(pred_img, targ_img, window, self.window_size, channel, self.size_average) 183 | -------------------------------------------------------------------------------- /fov_deformable_net/deformable_unet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.init as init 4 | import torch.nn.functional as F 5 | import numpy as np 6 | try: 7 | from dcn.modules.modulated_deform_conv import ModulatedDeformConvPack2 as DCN 8 | except ImportError: 9 | raise ImportError('Failed to import DCNv2 module.') 10 | 11 | #==============================================================================# 12 | class ResBlock(nn.Module): 13 | 14 | def __init__(self, input_channel=32, output_channel=32): 15 | super().__init__() 16 | self.in_channel = input_channel 17 | self.out_channel = output_channel 18 | if self.in_channel != self.out_channel: 19 | self.conv0 = nn.Conv2d(input_channel, output_channel, 1, 1) 20 | self.conv1 = nn.Conv2d(output_channel, output_channel, 3, 1, 1) 21 | self.conv2 = nn.Conv2d(output_channel, output_channel, 3, 1, 1) 22 | 23 | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 24 | self.initialize_weights() 25 | 26 | def forward(self, x): 27 | if self.in_channel != self.out_channel: 28 | x = self.conv0(x) 29 | conv1 = self.lrelu(self.conv1(x)) 30 | conv2 = self.conv2(conv1) 31 | out = x + conv2 32 | return out 33 | def initialize_weights(self): 34 | for m in self.modules(): 35 | if isinstance(m, nn.Conv2d): 36 | torch.nn.init.xavier_uniform_(m.weight.data) 37 | if m.bias is not None: 38 | m.bias.data.zero_() 39 | 40 | class RSABlock(nn.Module): 41 | 42 | def __init__(self, input_channel=32, output_channel=32, offset_channel=32): 43 | super().__init__() 44 | self.in_channel = input_channel 45 | self.out_channel = output_channel 46 | if self.in_channel != self.out_channel: 47 | self.conv0 = nn.Conv2d(input_channel, output_channel, 1, 1) 48 | self.dcnpack = DCN(output_channel, output_channel, 3, stride=1, padding=1, dilation=1, deformable_groups=8, 49 | extra_offset_mask=True, offset_in_channel=offset_channel) 50 | self.conv1 = nn.Conv2d(output_channel, output_channel, 3, 1, 1) 51 | 52 | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 53 | self.initialize_weights() 54 | 55 | def forward(self, x, offset): 56 | if self.in_channel != self.out_channel: 57 | x = self.conv0(x) 58 | fea = self.lrelu(self.dcnpack([x, offset])) 59 | out = self.conv1(fea) + x 60 | return out 61 | def initialize_weights(self): 62 | for m in self.modules(): 63 | if isinstance(m, nn.Conv2d): 64 | torch.nn.init.xavier_uniform_(m.weight.data) 65 | if m.bias is not None: 66 | m.bias.data.zero_() 67 | 68 | class OffsetBlock(nn.Module): 69 | 70 | def __init__(self, input_channel=32, offset_channel=32, last_offset=False): 71 | super().__init__() 72 | self.offset_conv1 = nn.Conv2d(input_channel, offset_channel, 3, 1, 1) # concat for diff 73 | if last_offset: 74 | self.offset_conv2 = nn.Conv2d(offset_channel*2, offset_channel, 3, 1, 1) # concat for offset 75 | self.offset_conv3 = nn.Conv2d(offset_channel, offset_channel, 3, 1, 1) 76 | 77 | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 78 | self.initialize_weights() 79 | 80 | def forward(self, x, last_offset=None): 81 | offset = self.lrelu(self.offset_conv1(x)) 82 | if last_offset is not None: 83 | last_offset = F.interpolate(last_offset, scale_factor=2, mode='bilinear', align_corners=False) 84 | offset = self.lrelu(self.offset_conv2(torch.cat([offset, last_offset * 2], dim=1))) 85 | offset = self.lrelu(self.offset_conv3(offset)) 86 | return offset 87 | def initialize_weights(self): 88 | for m in self.modules(): 89 | if isinstance(m, nn.Conv2d): 90 | torch.nn.init.xavier_uniform_(m.weight.data) 91 | if m.bias is not None: 92 | m.bias.data.zero_() 93 | 94 | class ContextBlock(nn.Module): 95 | def __init__(self, input_channel=32, output_channel=32, square=False): 96 | super().__init__() 97 | self.conv0 = nn.Conv2d(input_channel, output_channel, 1, 1) 98 | if square: 99 | self.conv1 = nn.Conv2d(output_channel, output_channel, 3, 1, 1, 1) 100 | self.conv2 = nn.Conv2d(output_channel, output_channel, 3, 1, 2, 2) 101 | self.conv3 = nn.Conv2d(output_channel, output_channel, 3, 1, 4, 4) 102 | self.conv4 = nn.Conv2d(output_channel, output_channel, 3, 1, 8, 8) 103 | else: 104 | self.conv1 = nn.Conv2d(output_channel, output_channel, 3, 1, 1, 1) 105 | self.conv2 = nn.Conv2d(output_channel, output_channel, 3, 1, 2, 2) 106 | self.conv3 = nn.Conv2d(output_channel, output_channel, 3, 1, 3, 3) 107 | self.conv4 = nn.Conv2d(output_channel, output_channel, 3, 1, 4, 4) 108 | self.fusion = nn.Conv2d(4*output_channel, input_channel, 1, 1) 109 | 110 | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 111 | self.initialize_weights() 112 | 113 | def forward(self, x): 114 | x_reduce = self.conv0(x) 115 | conv1 = self.lrelu(self.conv1(x_reduce)) 116 | conv2 = self.lrelu(self.conv2(x_reduce)) 117 | conv3 = self.lrelu(self.conv3(x_reduce)) 118 | conv4 = self.lrelu(self.conv4(x_reduce)) 119 | out = torch.cat([conv1, conv2, conv3, conv4], 1) 120 | out = self.fusion(out) + x 121 | return out 122 | def initialize_weights(self): 123 | for m in self.modules(): 124 | if isinstance(m, nn.Conv2d): 125 | torch.nn.init.xavier_uniform_(m.weight.data) 126 | if m.bias is not None: 127 | m.bias.data.zero_() 128 | 129 | 130 | #===============================================================================# 131 | class DFUNet(nn.Module): 132 | 133 | def __init__(self, input_channel=3, output_channel=3, n_channel=32, offset_channel=32): 134 | super().__init__() 135 | 136 | self.res1 = ResBlock(input_channel, n_channel) 137 | self.down1 = nn.Conv2d(n_channel, n_channel*2, 2, 2) 138 | self.res2 = ResBlock(n_channel*2, n_channel*2) 139 | self.down2 = nn.Conv2d(n_channel*2, n_channel*4, 2, 2) 140 | self.res3 = ResBlock(n_channel*4, n_channel*4) 141 | self.down3 = nn.Conv2d(n_channel*4, n_channel*8, 2, 2) 142 | self.res4 = ResBlock(n_channel*8, n_channel*8) 143 | 144 | self.context = ContextBlock(n_channel*8, n_channel*2, square=True) 145 | self.offset4 = OffsetBlock(n_channel*8, offset_channel, False) 146 | self.dres4 = RSABlock(n_channel*8, n_channel*8, offset_channel) 147 | 148 | self.up3 = nn.ConvTranspose2d(n_channel*8, n_channel*4, 2, 2) 149 | self.dconv3_1 = nn.Conv2d(n_channel*8, n_channel*4, 1, 1) 150 | self.offset3 = OffsetBlock(n_channel*4, offset_channel, True) 151 | self.dres3 = RSABlock(n_channel*4, n_channel*4, offset_channel) 152 | 153 | self.up2 = nn.ConvTranspose2d(n_channel*4, n_channel*2, 2, 2) 154 | self.dconv2_1 = nn.Conv2d(n_channel*4, n_channel*2, 1, 1) 155 | self.offset2 = OffsetBlock(n_channel*2, offset_channel, True) 156 | self.dres2 = RSABlock(n_channel*2, n_channel*2, offset_channel) 157 | 158 | self.up1 = nn.ConvTranspose2d(n_channel*2, n_channel, 2, 2) 159 | self.dconv1_1 = nn.Conv2d(n_channel*2, n_channel, 1, 1) 160 | self.offset1 = OffsetBlock(n_channel, offset_channel, True) 161 | self.dres1 = RSABlock(n_channel, n_channel, offset_channel) 162 | 163 | self.out = nn.Conv2d(n_channel, output_channel, 3, 1, 1) 164 | 165 | self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 166 | 167 | def forward(self, x): 168 | conv1 = self.res1(x) 169 | pool1 = self.lrelu(self.down1(conv1)) 170 | conv2 = self.res2(pool1) 171 | pool2 = self.lrelu(self.down2(conv2)) 172 | conv3 = self.res3(pool2) 173 | pool3 = self.lrelu(self.down3(conv3)) 174 | conv4 = self.res4(pool3) 175 | conv4 = self.context(conv4) 176 | 177 | L4_offset = self.offset4(conv4, None) 178 | dconv4 = self.dres4(conv4, L4_offset) 179 | 180 | up3 = torch.cat([self.up3(dconv4), conv3], 1) 181 | up3 = self.dconv3_1(up3) 182 | L3_offset = self.offset3(up3, L4_offset) 183 | dconv3 = self.dres3(up3, L3_offset) 184 | 185 | up2 = torch.cat([self.up2(dconv3), conv2], 1) 186 | up2 = self.dconv2_1(up2) 187 | L2_offset = self.offset2(up2, L3_offset) 188 | dconv2 = self.dres2(up2, L2_offset) 189 | 190 | up1 = torch.cat([self.up1(dconv2), conv1], 1) 191 | up1 = self.dconv1_1(up1) 192 | L1_offset = self.offset1(up1, L2_offset) 193 | dconv1 = self.dres1(up1, L1_offset) 194 | 195 | out = self.out(dconv1) + x[:, 0:3, :, :] 196 | 197 | return out 198 | 199 | def initialize_weights(self): 200 | for m in self.modules(): 201 | if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): 202 | #torch.nn.init.xavier_normal_(m.weight.data) 203 | torch.nn.init.xavier_uniform_(m.weight.data) 204 | #torch.nn.init.kaiming_uniform_(m.weight.data) 205 | if m.bias is not None: 206 | m.bias.data.zero_() 207 | elif isinstance(m, nn.BatchNorm2d): 208 | m.weight.data.fill_(1) 209 | m.bias.data.zero_() 210 | elif isinstance(m, nn.Linear): 211 | torch.nn.init.normal_(m.weight.data, 0, 0.01) 212 | m.bias.data.zero_() 213 | #==============================================================================# 214 | 215 | if __name__ == "__main__": 216 | model = DFUNet(3, 3, 32, 32) 217 | print(model) 218 | model = model.cuda() 219 | input = torch.Tensor(torch.randn(16, 3, 64, 64)).cuda() 220 | out = model(input) 221 | print(out.shape) -------------------------------------------------------------------------------- /PSF_generation/PSF_coherent_superposition.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % variable declaration 3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 4 | % path of PSF_info 5 | PSF_info_folder = '.\PSF_info\'; 6 | % path to save PSF_cell 7 | PSF_cell_folder = '.\PSF_cell\'; 8 | % full field value of sensor 9 | full_field = 4.00; 10 | % wave number to synthetic a three channel PSF, which is defined by 11 | % (wave distribution range)/(wave_interval) 12 | wave_num = 340/10; 13 | % H, W resolutions of sensor 14 | img_h = 3000; img_w = 4000; 15 | % center of the image 16 | img_h_cent = (img_h + 1) / 2; img_w_cent = (img_w + 1) / 2; 17 | % pixel distance between two PSFs 18 | tile_length = 10; 19 | % pixel size in microns 20 | pixel_length = 1.60; 21 | % sample interval of field in millimeters 22 | fld_sample_interval = 0.02; 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | % sensor response integral path 25 | wave_dist_path = '.\wav_response\wav_dist_cell_avr_itvl_380_10nm_780.mat'; 26 | wave_dist_cell = load(wave_dist_path); 27 | wave_dist_cell = wave_dist_cell.wav_dist_cell; 28 | % three experiment results of sensor response, load one 29 | wave_dist_r = wave_dist_cell{1, 1}; wave_dist_g = wave_dist_cell{1, 2}; wave_dist_b = wave_dist_cell{1, 3}; 30 | IMA_response = cat(1, wave_dist_r, wave_dist_g, wave_dist_b); 31 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 32 | % whether load the illumination information 33 | load_illumination = false; 34 | % relative illumination information 35 | illumination_path = '.\illumination_info\illumination_info.xlsx'; 36 | Rlt_illumination = interp_relative_illumination(illumination_path, wave_num); 37 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 38 | 39 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 40 | % Point spread function calculation 41 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 | for h_index = tile_length/2 : tile_length : img_h/2 43 | for w_index = tile_length/2 : tile_length : img_w/2 44 | 45 | % print first mark information 46 | formatSpec_first = strcat('field of (h:%d, w:%d); (h:%d, w:%d); ' , ... 47 | '(h:%d, w:%d); (h:%d, w:%d) is processing!\n'); 48 | fprintf(formatSpec_first, h_index, w_index, ... 49 | h_index, img_w - w_index, ... 50 | img_h - h_index, w_index, ... 51 | img_h - h_index, img_w - w_index); 52 | 53 | % calculate the distance between sample position to the image center 54 | % the field_sample_dist is in millimeters 55 | fld_sample_delta_h = img_h_cent - h_index; 56 | fld_sample_delta_w = img_w_cent - w_index; 57 | fld_sample_delta = [fld_sample_delta_h, fld_sample_delta_w]; 58 | fld_sample_dist = sqrt((fld_sample_delta_h)^2 + ... 59 | (fld_sample_delta_w)^2) * ... 60 | pixel_length * 0.001; 61 | % calculate the field position 62 | [fld_index, fld_index_int] = compute_field_info(fld_sample_dist, fld_sample_interval); 63 | fld_sample_prepath = strcat(PSF_info_folder, fld_index); 64 | % judge the main light position in the imaging plane, set the 65 | % wavelength that closest to the image center to the main light 66 | [main_center_h, PSF_fld_tmp, PSF_wav_tmp] = judge_main_wav(fld_sample_pre, fld_index_int); 67 | 68 | % print the second mark information 69 | formatSpec_scd = strcat('main light wav is %3.1f nm;', ... 70 | ' field value is %3.3f; ', ... 71 | 'center of main light is %06.8f\n'); 72 | fprintf(formatSpec_scd, PSF_wav_tmp*1000, PSF_fld_tmp, main_center_h); 73 | 74 | % initialize the cell to store Point Spread Function 75 | % the first column is the top-left, the second column is the 76 | % top-right, the third column is the left-bottom, the fourth column 77 | % is the right-bottom 78 | PSF_cell = cell(wave_num, 4); 79 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 80 | for wave_index = 1:wave_num 81 | % load the PSF information, the wavelength varies from 400nm to 700nm 82 | wave_mat_path = strcat(fld_sample_prepath, '\wav_', ... 83 | num2str(39 + wave_index, '%03d'), '.mat'); 84 | wave_mat = load(wave_mat_path); 85 | wave_PSF = wave_mat.wav_PSF; 86 | wave_txt = wave_mat.wav_txt; 87 | PSF_area = wave_txt{11}; 88 | PSF_area_tmp = split(PSF_area, ' '); 89 | PSF_space = wave_txt{10}; 90 | PSF_space_tmp = split(PSF_space, ' '); 91 | PSF_center = wave_txt{16}; 92 | PSF_center_tmp = split(PSF_center, ' '); 93 | % PSF data interval, in microns 94 | PSF_data_space = str2double(PSF_space_tmp{4}); 95 | % n data in PSF cover the one pixel 96 | data_cell_length = round(pixel_length / PSF_data_space); 97 | % the distance of PSF to the center of image 98 | PSF_center_h = str2double(PSF_center_tmp{8}); 99 | % compute the deviation between this wavelength and the main 100 | % light wavelength 101 | PSF_center_delta_mm = PSF_center_h - main_center_h; 102 | % swift the PSF, to generate a central symmetric PSF 103 | [PSF_H, PSF_W] = size(wave_PSF); 104 | wave_PSF = cat(1, wave_PSF(PSF_H, :), wave_PSF); 105 | wave_PSF = cat(2, wave_PSF, wave_PSF(:, 1)); 106 | % rotate the PSF according to the field position 107 | % calculate four PSFs of different angle, 1 -> top-left, 108 | % 2 -> top-right, 3 -> left-bottom, 4 -> right-bottom 109 | for rotat_index = 1:4 110 | % calculate the rotate angle of PSF 111 | fld_sample_delta_angle = compute_delta_angle(fld_sample_delta, ... 112 | rotat_index); 113 | wave_PSF_rotat = imrotate(wave_PSF, fld_sample_delta_angle, ... 114 | 'bilinear','crop'); 115 | % compute the deviation after rotating, right-bot is 116 | % positive, left-top is negative 117 | PSF_center_delta_pixel = compute_h_w_delta(PSF_center_delta_mm, ... 118 | fld_sample_delta_angle, ... 119 | pixel_length); 120 | [PSF_H, PSF_W] = size(wave_PSF_rotat); 121 | % padding int pixel number 122 | pixel_num = ceil(PSF_H / data_cell_length); 123 | if mod(pixel_num, 2) == 0 124 | pixel_num = pixel_num + 1; 125 | end 126 | pad_pixel_num = (pixel_num * data_cell_length - PSF_H) / 2; 127 | % first pad the array 128 | wave_PSF_pad_fir = padarray(wave_PSF_rotat, [pad_pixel_num pad_pixel_num], 0, 'both'); 129 | % according to the delta_h and delta_w, judge the position. 130 | % pay attention to the different pad direction has 131 | % different start point!! 132 | [wave_PSF_pad_scd, ori_sample_pos] = pad_PSF(wave_PSF_pad_fir, ... 133 | PSF_center_delta_pixel, ... 134 | data_cell_length); 135 | % initialize the PSF 136 | pixel_PSF = zeros(pixel_num, pixel_num); 137 | for h_pixel_index = 1:pixel_num 138 | for w_pixel_index = 1:pixel_num 139 | % the h_range and w_range in wave_PSF 140 | h_wave_range = ori_sample_pos(1) + (h_pixel_index-1)*data_cell_length+1 : ... 141 | ori_sample_pos(1) + (h_pixel_index)*data_cell_length; 142 | w_wave_range = ori_sample_pos(2) + (w_pixel_index-1)*data_cell_length+1 : ... 143 | ori_sample_pos(2) + (w_pixel_index)*data_cell_length; 144 | pixel_cell_PSF = wave_PSF_pad_scd(h_wave_range, w_wave_range); 145 | % accumulate 146 | pixel_PSF(h_pixel_index, w_pixel_index) = sum(pixel_cell_PSF, 'all'); 147 | end 148 | end 149 | PSF_cell{wave_index, rotat_index} = pixel_PSF ./ sum(pixel_PSF, 'all'); 150 | end 151 | end 152 | % print the third mark information 153 | formatSpec_mid_one = strcat('field of (h:%d, w:%d); (h:%d, w:%d); ' , ... 154 | '(h:%d, w:%d); (h:%d, w:%d) is reunioned!\n'); 155 | fprintf(formatSpec_mid_one, h_index, w_index, ... 156 | h_index, img_w - w_index, ... 157 | img_h - h_index, w_index, ... 158 | img_h - h_index, img_w - w_index); 159 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 160 | % load the sensor response 161 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 162 | % initialize the sensor response PSF cell 163 | PSF_rsp_cell = cell(1, 4); 164 | for rotat_index = 1:4 165 | PSF_cell_tmp = PSF_cell(:, rotat_index); 166 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 167 | % pad the PSF_cell and make every PSF of different wavelength 168 | % to the same size 169 | % load illumination response 170 | if load_illumination 171 | [PSF_rch, PSF_gch, PSF_bch] = ... 172 | load_wave_response_illuminate(PSF_cell_tmp, IMA_response, ... 173 | Rlt_illumination, fld_index_int); 174 | else 175 | [PSF_rch, PSF_gch, PSF_bch] = ... 176 | load_wave_response(PSF_cell_tmp, IMA_response); 177 | end 178 | PSF_wave_response = cat(3, PSF_rch, PSF_gch, PSF_bch); 179 | PSF_rsp_cell{rotat_index} = PSF_wave_response; 180 | end 181 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 182 | % save the four direction PSFs 183 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 184 | PSF_info = PSF_rsp_cell{1}; 185 | save(strcat(PSF_cell_folder, 'PSF_cell_', ... 186 | num2str((h_index-5)/10+1, '%03d'), '_', ... 187 | num2str((w_index-5)/10+1, '%03d'), '.mat'), 'PSF_info'); 188 | PSF_info = PSF_rsp_cell{2}; 189 | save(strcat(PSF_cell_folder, 'PSF_cell_', ... 190 | num2str((h_index-5)/10+1, '%03d'), '_', ... 191 | num2str((img_w-w_index-5)/10+1, '%03d'), '.mat'), 'PSF_info'); 192 | PSF_info = PSF_rsp_cell{3}; 193 | save(strcat(PSF_cell_folder, 'PSF_cell_', ... 194 | num2str((img_h-h_index-5)/10+1, '%03d'), '_', ... 195 | num2str((w_index-5)/10+1, '%03d'), '.mat'), 'PSF_info'); 196 | PSF_info = PSF_rsp_cell{4}; 197 | save(strcat(PSF_cell_folder, 'PSF_cell_', ... 198 | num2str((img_h-h_index-5)/10+1, '%03d'), '_', ... 199 | num2str((img_w-w_index-5)/10+1, '%03d'), '.mat'), 'PSF_info'); 200 | % print the final mark information 201 | formatSpec_final = strcat('response and illumination of (h:%d, w:%d); (h:%d, w:%d); ' , ... 202 | '(h:%d, w:%d); (h:%d, w:%d) is saved!\n'); 203 | fprintf(formatSpec_final, h_index, w_index, ... 204 | h_index, img_w - w_index, ... 205 | img_h - h_index, w_index, ... 206 | img_h - h_index, img_w - w_index); 207 | end 208 | end 209 | -------------------------------------------------------------------------------- /PSF_generation/ray_tracing/difftrace/conjugates.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from .utils import PrettyPrinter, normalize, sagittal_meridional 4 | 5 | # ========================================================================================== 6 | # Basic 7 | # ========================================================================================== 8 | 9 | class NameMixin: 10 | _types = {} 11 | _default_type = None 12 | _nickname = None 13 | _type = None 14 | _typeletter = None 15 | 16 | @classmethod 17 | def register(cls, sub): 18 | if sub._type is None: 19 | sub._type = sub.__name__.lower() 20 | k = cls, sub._type 21 | assert k not in cls._types, (k, sub, cls._types) 22 | cls._types[k] = sub 23 | return sub 24 | 25 | def dict(self): 26 | dat = {} 27 | if self._type != self._default_type: 28 | dat["type"] = self._type 29 | if self._nickname: 30 | dat["nickname"] = self.nickname 31 | return dat 32 | 33 | @classmethod 34 | def make(cls, data): 35 | if isinstance(data, cls): 36 | return data 37 | typ = data.pop("type", cls._default_type) 38 | sub = cls._types[(cls, typ)] 39 | return sub(**data) 40 | 41 | @property 42 | def type(self): 43 | return self._type 44 | 45 | @property 46 | def typeletter(self): 47 | return self._typeletter or self._type[0].upper() 48 | 49 | @property 50 | def nickname(self): 51 | return self._nickname or hex(id(self)) 52 | 53 | @nickname.setter 54 | def nickname(self, name): 55 | self._nickname = name 56 | 57 | def __str__(self): 58 | return f"<{self.typeletter}/{self.nickname}>" 59 | 60 | # ========================================================================================== 61 | # Pupil 62 | # ========================================================================================== 63 | class Pupil(NameMixin): 64 | _default_type = 'radius' 65 | 66 | def __init__(self, distance=1., update_distance=True, 67 | update_radius=True, aim=True, telecentric=False, 68 | refractive_index=1., projection="rectilinear"): 69 | self.distance = distance 70 | self.update_distance = update_distance 71 | self.update_radius = update_radius 72 | self.refractive_index = refractive_index 73 | self.aim = aim 74 | self.telecentric = telecentric 75 | self.projection = projection 76 | 77 | def rescale(self, scale): 78 | self.distance *= scale 79 | 80 | def update(self, distance, radius): 81 | if self.update_distance: 82 | self.distance = distance 83 | if self.update_radius: 84 | self.radius = radius 85 | 86 | def dict(self): 87 | dat = super().dict() 88 | dat["distance"] = float(self.distance) 89 | if not self.update_distance: 90 | dat["update_distance"] = self.update_distance 91 | if self.update_radius: 92 | dat["update_radius"] = self.update_radius 93 | if self.aim: 94 | dat["aim"] = self.aim 95 | if self.projection != "rectilinear": 96 | dat["projection"] = self.projection 97 | if self.telecentric: 98 | dat["telecentric"] = self.telecentric 99 | if self.refractive_index != 1.: 100 | dat["refractive_index"] = float(self.refractive_index) 101 | return dat 102 | 103 | def text(self): 104 | yield "Pupil Distance: %g" % self.distance 105 | if self.telecentric: 106 | yield "Telecentric: %s" % self.telecentric 107 | if self.refractive_index != 1.: 108 | yield "Refractive Index: %g" % self.refractive_index 109 | if self.projection != "rectilinear": 110 | yield "Projection: %s" % self.projection 111 | if not self.update_distance: 112 | yield "Track Distance: %s" % self.update_distance 113 | if self.update_radius: 114 | yield "Update Radius: %s" % self.update_radius 115 | if self.aim: 116 | yield "Aim: %s" % self.aim 117 | 118 | @property 119 | def radius(self): 120 | return self.slope*self.distance 121 | 122 | @property 123 | def slope(self): 124 | return self.radius/self.distance 125 | 126 | @property 127 | def na(self): 128 | return self.sinarctan(self.slope)*self.refractive_index 129 | 130 | @property 131 | def fno(self): 132 | return 1/(2.*self.na) 133 | 134 | def sinarctan(self, u, v=None): 135 | u2 = np.square(u) 136 | if u2.ndim == 2: 137 | if u2.shape[1] >= 3: 138 | v = u[:, 3] 139 | u, u2 = u[:, :2], u2[:, :2] 140 | u2 = u2.sum(1)[:, None] 141 | u2 = 1/np.sqrt(1 + u2) 142 | u1 = u*u2 143 | if v is not None: 144 | u1 = np.concatenate((u1, np.sign(v)[:, None]*u2), axis=1) 145 | return u1 146 | 147 | def map(self, y, a, filter=True): 148 | # FIXME: projection 149 | # a = [[-sag, -mer], [+sag, +mer]] 150 | am = np.fabs(a).max() 151 | y = np.atleast_2d(y)*am 152 | if filter: 153 | c = np.sum(a, axis=0)/2 154 | d = np.diff(a, axis=0)/2 155 | r = ((y - c)**2/d**2).sum(1) 156 | y = y[r <= 1] 157 | return y 158 | 159 | @Pupil.register 160 | class RadiusPupil(Pupil): 161 | _type = "radius" 162 | radius = None 163 | 164 | def __init__(self, radius=1., **kwargs): 165 | super().__init__(**kwargs) 166 | self.radius = radius 167 | 168 | def dict(self): 169 | dat = super().dict() 170 | dat["radius"] = float(self.radius) 171 | return dat 172 | 173 | def text(self): 174 | yield from super().text() 175 | yield "Radius: %g" % self.radius 176 | 177 | def rescale(self, scale): 178 | super().rescale(scale) 179 | self.radius *= scale 180 | 181 | # ========================================================================================== 182 | # Conjugates 183 | # ========================================================================================== 184 | class Conjugate(PrettyPrinter): 185 | _types = {} 186 | _default_type = 'infinite' 187 | _nickname = None 188 | _type = None 189 | _typeletter = None 190 | finite = None 191 | 192 | def __init__(self, 193 | pupil=None, 194 | projection='rectilinear', 195 | update_radius=False, 196 | dtype=torch.float64, 197 | device=torch.device('cpu')): 198 | if pupil is None: 199 | self.pupil = RadiusPupil(radius=0.) 200 | self.projection = projection 201 | self.update_radius = update_radius 202 | self.dtype = dtype 203 | self.device = device 204 | 205 | @property 206 | def wideangle(self): 207 | # FIXME: elaborate this 208 | return self.projection != "rectilinear" 209 | 210 | def text(self): 211 | if self.projection != "rectilinear": 212 | yield "Projection: %s" % self.projection 213 | if self.update_radius: 214 | yield "Update Radius: %s" % self.update_radius 215 | yield "Pupil:" 216 | for _ in self.pupil.text(): 217 | yield " %s" % _ 218 | 219 | def dict(self): 220 | dat = super().dict() 221 | dat["pupil"] = self.pupil.dict() 222 | if self.projection != "rectilinear": 223 | dat["projection"] = self.projection 224 | return dat 225 | 226 | def rescale(self, scale): 227 | self.pupil.rescale(scale) 228 | 229 | def aim(self, xy, pq, z=None, a=None): 230 | """ 231 | xy 2d fractional xy object coordinate (object knows meaning) 232 | pq 2d fractional sagittal/meridional pupil coordinate 233 | 234 | aiming should be aplanatic (the grid is by solid angle 235 | in object space) and not paraxaxial (equal area in entrance 236 | beam plane) 237 | 238 | z pupil distance from "surface 0 apex" (also for infinite object) 239 | a pupil aperture (also for infinite object or telecentric pupils, 240 | then from z=0) 241 | 242 | if z, a are not provided they are takes from the (paraxial data) stored 243 | in object/pupil 244 | """ 245 | raise NotImplementedError 246 | 247 | @Conjugate.register 248 | class FiniteConjugate(Conjugate): 249 | _type = "finite" 250 | finite = True 251 | 252 | def __init__(self, radius=0., **kwargs): 253 | super().__init__(**kwargs) 254 | self.radius = radius 255 | 256 | @property 257 | def point(self): 258 | return not self.radius 259 | 260 | @property 261 | def slope(self): 262 | return self.radius/self.pupil.distance 263 | 264 | @slope.setter 265 | def slope(self, c): 266 | self.radius = self.pupil.distance*c 267 | 268 | def dict(self): 269 | dat = super().dict() 270 | if self.radius: 271 | dat["radius"] = float(self.radius) 272 | return dat 273 | 274 | def text(self): 275 | yield "Radius: %.3g" % self.radius 276 | yield from super().text() 277 | 278 | def update(self, radius, pupil_distance, pupil_radius): 279 | self.pupil.update(pupil_distance, pupil_radius) 280 | if self.update_radius: 281 | self.radius = radius 282 | 283 | def rescale(self, scale): 284 | super().rescale(scale) 285 | self.radius *= scale 286 | 287 | def aim(self, yo, yp=None, z=None, a=None, surface=None, filter=True): 288 | if z is None: 289 | z = self.pupil.distance 290 | yo = np.atleast_2d(yo) 291 | if yp is not None: 292 | if a is None: 293 | a = self.pupil.radius 294 | a = np.array(((-a, -a), (a, a))) 295 | a = np.arctan2(a, z) 296 | yp = np.atleast_2d(yp) 297 | yp = self.pupil.map(yp, a, filter) 298 | yp = z*np.tan(yp) 299 | yo, yp = np.broadcast_arrays(yo, yp) 300 | 301 | y = np.zeros((yo.shape[0], 3)) 302 | y[..., :2] = -yo*self.radius 303 | if surface is not None: 304 | y[..., 2] = -surface.surface_sag(y) 305 | uz = (0, 0, z) 306 | if self.pupil.telecentric: 307 | u = uz 308 | else: 309 | u = uz - y 310 | if yp is not None: 311 | s, m = sagittal_meridional(u, uz) 312 | u += yp[..., 0, None]*s + yp[..., 1, None]*m 313 | normalize(u) 314 | if z < 0: 315 | u *= -1 316 | return y, u 317 | 318 | @Conjugate.register 319 | class InfiniteConjugate(Conjugate): 320 | _type = "infinite" 321 | finite = False 322 | 323 | def __init__(self, index=0, angle_fov=14., angle_azimuth=None, **kwargs): 324 | super().__init__(**kwargs) 325 | self.index = index 326 | self.angle_fov = angle_fov * np.pi / 180. 327 | if angle_azimuth is None: 328 | self.azimuth = 0. # direction from -y to +y direction 329 | else: 330 | # angle from the direction vector's projecton to -y axis 331 | self.azimuth = angle_azimuth * np.pi / 180. 332 | 333 | @property 334 | def slope(self): 335 | return np.tan(self.angle_fov) 336 | 337 | def dict(self): 338 | dat = super().dict() 339 | if self.angle_fov: 340 | dat["angle_fov"] = float(self.angle_fov) 341 | if self.angle_azimuth: 342 | dat["angle_azimuth"] = float(self.angle_azimuth) 343 | return dat 344 | 345 | def update(self, angle_fov, angle_azimuth=None): 346 | self.angle_fov = torch.tensor((angle_fov * np.pi / 180.).item(), dtype=self.dtype, device=self.device) 347 | if angle_azimuth is None: 348 | self.azimuth = torch.tensor(0., dtype=self.dtype, device=self.device) # direction from -y to +y direction 349 | else: 350 | # angle from the direction vector's projecton to -y axis 351 | self.azimuth = torch.tensor(angle_azimuth * np.pi / 180., dtype=self.dtype, device=self.device) 352 | 353 | def text(self): 354 | yield "Semi-Angle: %.3g deg" % self.angle_fov 355 | yield from super().text() 356 | 357 | def map(self): 358 | """ 359 | Get the direction from the fov and the azimuth 360 | """ 361 | # assuming the length of direction vector is 1 362 | z_proj = 1 * np.cos(self.angle_fov) 363 | x_proj = 1 * np.sin(self.angle_fov) * np.sin(self.azimuth) 364 | y_proj = 1 * np.sin(self.angle_fov) * np.cos(self.azimuth) 365 | # form tensor of direction (not need for normalization) 366 | return normalize(torch.tensor([x_proj, y_proj, z_proj], dtype=self.dtype, device=self.device)) 367 | 368 | def aim(self, yo, yp=None, z=None, a=None, surface=None, filter=True, l=None, n0=1.000): 369 | if z is None: 370 | z = self.pupil.distance 371 | yo = np.atleast_2d(yo) 372 | u = self.map(yo, self.angle) 373 | if yp is not None: 374 | if a is None: 375 | a = self.pupil.radius + 0.6412080331894 * np.tan(np.arccos(u[0, -1])) 376 | a = np.array(((-a, -a), (a, a))) 377 | 378 | yp = np.atleast_2d(yp) 379 | yp = self.pupil.map(yp, a, filter) 380 | yo, yp = np.broadcast_arrays(yo, yp) 381 | # u = np.expand_dims(u, 0).repeat(num_rays, axis=0).squeeze(1) # 不用这么麻烦,直接重新算! 382 | u = self.map(yo, self.angle) 383 | # print('u:', u) 384 | yz = (0, 0, z) 385 | y = yz - z * u 386 | y[:, 0:2] += yp 387 | # if yp is not None: 388 | # s, m = sagittal_meridional(u, yz) 389 | # y += yp[..., 0, None]*s + yp[..., 1, None]*m 390 | 391 | if surface is not None: 392 | y += surface.intercept(y, u)[..., None]*u 393 | i = u 394 | n, mu = surface.get_n_mu(n0, l) 395 | if mu: 396 | u = surface.refract(y, i, mu) 397 | 398 | return y, u 399 | --------------------------------------------------------------------------------