├── .github
└── FUNDING.yml
├── .gitignore
├── 0_hello_deeplens.py
├── 1_end2end_5lines.py
├── 2_autolens_rms.py
├── 3_psf_net.py
├── 4_tasklens_img_classi.py
├── 5_pupil_field.py
├── 6_hybridlens_design.py
├── 7_comp_photography.py
├── LICENSE
├── README.md
├── ckpts
└── psfnet
│ └── ef50mm_f1.8_1000x1000_ks128_mlpconv.pth
├── configs
├── 1_end2end_5lines.yml
├── 2_auto_lens_design.yml
├── 4_tasklens.yml
└── 7_comp_photography.yml
├── datasets
├── IQ
│ ├── img1.png
│ ├── img2.png
│ ├── img3.png
│ ├── img4.png
│ ├── img5.png
│ ├── readme.md
│ └── usaf1951.png
└── bird.png
├── deeplens
├── __init__.py
├── camera.py
├── diffraclens.py
├── geolens.py
├── geolens_eval.py
├── geolens_io.py
├── geolens_optim.py
├── geolens_utils.py
├── geolens_vis.py
├── hybridlens.py
├── lens.py
├── network
│ ├── __init__.py
│ ├── dataset.py
│ ├── loss
│ │ ├── __init__.py
│ │ ├── perceptual_loss.py
│ │ ├── psnr_loss.py
│ │ └── ssim_loss.py
│ ├── reconstruction
│ │ ├── __init__.py
│ │ ├── nafnet.py
│ │ ├── restormer.py
│ │ └── unet.py
│ └── surrogate
│ │ ├── __init__.py
│ │ ├── mlp.py
│ │ ├── mlpconv.py
│ │ ├── modulate_siren.py
│ │ └── siren.py
├── optics
│ ├── __init__.py
│ ├── basics.py
│ ├── diffractive_surface
│ │ ├── __init__.py
│ │ ├── binary2.py
│ │ ├── diffractive.py
│ │ ├── fresnel.py
│ │ ├── pixel2d.py
│ │ ├── thinlens.py
│ │ └── zernike.py
│ ├── geometric_surface
│ │ ├── __init__.py
│ │ ├── aperture.py
│ │ ├── aspheric.py
│ │ ├── base.py
│ │ ├── cubic.py
│ │ ├── mirror.py
│ │ ├── phase.py
│ │ ├── plane.py
│ │ ├── spheric.py
│ │ └── thinlens.py
│ ├── loss.py
│ ├── material
│ │ ├── CDGM.AGF
│ │ ├── MISC.AGF
│ │ ├── PLASTIC2022.AGF
│ │ └── SCHOTT.AGF
│ ├── materials.py
│ ├── materials_data.json
│ ├── monte_carlo.py
│ ├── ray.py
│ ├── render_psf.py
│ ├── surfaces.py
│ ├── surfaces_diffractive.py
│ ├── utils.py
│ ├── wave.py
│ └── waveoptics_utils.py
├── psfnet.py
├── sensor
│ ├── __init__.py
│ ├── event_sensor.py
│ ├── isp.py
│ ├── isp_modules
│ │ ├── __init__.py
│ │ ├── anti_alising.py
│ │ ├── black_level.py
│ │ ├── color_matrix.py
│ │ ├── color_space.py
│ │ ├── dead_pixel.py
│ │ ├── demosaic.py
│ │ ├── denoise.py
│ │ ├── gamma_correction.py
│ │ ├── lens_shading.py
│ │ └── white_balance.py
│ ├── mono_sensor.py
│ ├── rgb_sensor.py
│ └── sensor.py
├── utils.py
├── version.py
└── view_3d.py
├── environment.yml
├── imgs
├── autolens1.gif
├── autolens2.gif
├── end2end.gif
├── hybridlens.png
├── implicit_net.png
├── logo.png
├── paper_deeplens.png
└── paper_dff.png
├── lenses
├── camera
│ ├── ef100mm_f2.8.json
│ ├── ef100mm_f2.8.png
│ ├── ef35mm_f2.0.json
│ ├── ef35mm_f2.0.png
│ ├── ef35mm_f2.0.zmx
│ ├── ef40mm_f2.8.json
│ ├── ef40mm_f2.8.png
│ ├── ef50mm_f1.8.json
│ ├── ef50mm_f1.8.png
│ ├── ef85mm_f1.8.json
│ ├── ef85mm_f1.8.png
│ ├── rf16mm_f2.8.json
│ ├── rf16mm_f2.8.png
│ ├── rf24mm_f1.8.json
│ ├── rf24mm_f1.8.png
│ ├── rf35mm_f1.8.json
│ ├── rf50mm_f1.8.json
│ ├── rf50mm_f1.8.png
│ ├── sigma70mm_f2.8.json
│ ├── sigma70mm_f2.8.png
│ ├── yongnuo_50mm_f1.8.json
│ └── yongnuo_50mm_f1.8.png
├── cellphone
│ ├── 3P_blank.json
│ ├── cellphone68deg.json
│ ├── cellphone68deg.png
│ ├── cellphone80deg.json
│ └── cellphone80deg.png
├── cooke.json
├── cooke.png
├── cooke40_inferior.json
├── cooke40_inferior.png
├── hybridlens
│ └── a489_doe.json
├── paraxiallens
│ ├── doelens.json
│ └── doethinlens.json
├── readme.md
├── thorlabs
│ ├── acl12708u.json
│ ├── acl12708u.png
│ └── acl12708u_psf20000mm.png
└── zemax_double_gaussian.zmx
├── misc
├── aatdff_bibtex.txt
├── deeplens_bibtex.txt
├── do_bibtex.txt
├── fluidiclens_bibtext.txt
└── hybridlens_bibtex.txt
├── setup.py
└── visualization_demo.py
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [singer-yang] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12 | polar: # Replace with a single Polar username
13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
14 | thanks_dev: # Replace with a single thanks.dev username
15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
16 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode/
2 | __pycache__/
3 | .DS_Store
4 | deeplens.egg-info/
5 | 0_test.py
6 | *.png
7 | *.sh
8 | results/
9 | deeplens/do_code/
10 | !lenses/*.png
11 | !lenses/**/*.png
12 | !images/*.png
13 | debug.py
14 | *.log
15 | temp/*
16 |
17 | lenses/pancake/
18 |
19 |
20 | # add my own files
21 | *.ipynb
--------------------------------------------------------------------------------
/0_hello_deeplens.py:
--------------------------------------------------------------------------------
1 | """
2 | "Hello, world!" for DeepLens.
3 |
4 | In this code, we will load a lens from a file. Then we will plot the lens setup and render a sample image.
5 |
6 | Technical Paper:
7 | [1] Xinge Yang, Qiang Fu and Wolfgang Heidrich, "Curriculum learning for ab initio deep learned refractive optics," Nature Communications 2024.
8 | [2] Congli Wang, Ni Chen, and Wolfgang Heidrich, "dO: A differentiable engine for Deep Lens design of computational imaging systems," IEEE TCI 2023.
9 |
10 | This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
11 | # The license is only for non-commercial use (commercial licenses can be obtained from authors).
12 | # The material is provided as-is, with no warranties whatsoever.
13 | # If you publish any code, data, or scientific work based on this, please cite our work.
14 | """
15 |
16 | from deeplens import GeoLens
17 |
18 |
19 | def main():
20 | lens = GeoLens(filename="./lenses/camera/ef35mm_f2.0.json")
21 | # lens = GeoLens(filename="./lenses/camera/ef35mm_f2.0.zmx")
22 | # lens = GeoLens(filename='./lenses/cellphone/cellphone80deg.json')
23 | # lens = GeoLens(filename='./lenses/zemax_double_gaussian.zmx')
24 |
25 | lens.analysis(render=True)
26 |
27 | lens.write_lens_zmx()
28 | lens.write_lens_json()
29 |
30 | if __name__ == "__main__":
31 | main()
32 |
--------------------------------------------------------------------------------
/3_psf_net.py:
--------------------------------------------------------------------------------
1 | """
2 | Implicit representation for a realistic lens (PSFs). In this code, we will train a neural network to represent the PSF of a lens system. Then we can fast calculate the spatially-varying, focus-dependent PSF of the lens for image simulation.
3 |
4 | Input: [x, y, z, focus_distance]
5 | Output: [3, ks, ks] PSF
6 |
7 | Technical Paper:
8 | Xinge Yang, Qiang Fu, Mohammed Elhoseiny and Wolfgang Heidrich, "Aberration-Aware Depth-from-Focus" IEEE-TPAMI 2023.
9 |
10 | This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
11 | # The license is only for non-commercial use (commercial licenses can be obtained from authors).
12 | # The material is provided as-is, with no warranties whatsoever.
13 | # If you publish any code, data, or scientific work based on this, please cite our work.
14 | """
15 |
16 | import os
17 | from datetime import datetime
18 |
19 | from deeplens.psfnet import PSFNet
20 | from deeplens.utils import set_logger, set_seed
21 |
22 | result_dir = "./results/" + datetime.now().strftime("%m%d-%H%M%S") + "-PSFNet"
23 | os.makedirs(result_dir, exist_ok=True)
24 | set_logger(result_dir)
25 | set_seed(0)
26 |
27 | if __name__ == "__main__":
28 | # Init PSFNet (I changed the network archietecture to mlpconv for better performance on large PSF kernels.)
29 | psfnet = PSFNet(
30 | filename="./lenses/camera/ef50mm_f1.8.json",
31 | model_name="mlpconv",
32 | sensor_res=(1000, 1000),
33 | kernel_size=128,
34 | )
35 | psfnet.lens.analysis(save_name=f"{result_dir}/lens")
36 | psfnet.lens.write_lens_json(f"{result_dir}/lens.json")
37 |
38 | # Train PSFNet
39 | psfnet.load_net("./ckpts/psfnet/ef50mm_f1.8_1000x1000_ks128_mlpconv.pth")
40 | psfnet.train_psfnet(
41 | iters=20000,
42 | bs=64,
43 | lr=1e-3,
44 | spp=100000,
45 | evaluate_every=1000,
46 | result_dir=result_dir,
47 | )
48 | psfnet.evaluate_psf(result_dir=result_dir)
49 |
50 | print("Finish PSF net fitting.")
51 |
--------------------------------------------------------------------------------
/5_pupil_field.py:
--------------------------------------------------------------------------------
1 | """
2 | Calculates the pupil field of a lens at a given point in space by coherent ray tracing.
3 |
4 | Technical Paper:
5 | Xinge Yang, Matheus Souza, Kunyi Wang, Praneeth Chakravarthula, Qiang Fu and Wolfgang Heidrich, "End-to-End Hybrid Refractive-Diffractive Lens Design with Differentiable Ray-Wave Model," Siggraph Asia 2024.
6 |
7 | This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
8 | # The license is only for non-commercial use (commercial licenses can be obtained from authors).
9 | # The material is provided as-is, with no warranties whatsoever.
10 | # If you publish any code, data, or scientific work based on this, please cite our work.
11 | """
12 |
13 | import torch
14 | from torchvision.utils import save_image
15 |
16 | from deeplens import GeoLens
17 |
18 |
19 | def main():
20 | # Better to use a high sensor resolution (4000x4000 is small!)
21 | lens = GeoLens(filename="./lenses/cellphone/cellphone80deg.json")
22 | lens.set_sensor(sensor_res=[4000, 4000], sensor_size=lens.sensor_size)
23 | lens.double()
24 |
25 | # Calculate the pupil field
26 | wavefront, _ = lens.pupil_field(
27 | point=torch.tensor([0.0, 0.0, -10000.0]), spp=10000000
28 | )
29 | save_image(wavefront.angle(), "./wavefront_phase.png")
30 | save_image(torch.abs(wavefront), "./wavefront_amp.png")
31 |
32 | # Compare coherent and incoherent PSFs
33 | psf_coherent = lens.psf_coherent(torch.tensor([0.0, 0.0, -10000.0]), ks=101)
34 | save_image(psf_coherent, "./psf_coherent.png", normalize=True)
35 | psf_incoherent = lens.psf(torch.tensor([0.0, 0.0, -10000.0]), ks=101)
36 | save_image(psf_incoherent, "./psf_incoherent.png", normalize=True)
37 |
38 |
39 | if __name__ == "__main__":
40 | main()
41 |
--------------------------------------------------------------------------------
/6_hybridlens_design.py:
--------------------------------------------------------------------------------
1 | """
2 | Jointly optimize refractive-diffractive lens with a differentiable ray-wave model. This code can be easily extended to end-to-end refractive-diffractive lens and network design.
3 |
4 | Technical Paper:
5 | Xinge Yang, Matheus Souza, Kunyi Wang, Praneeth Chakravarthula, Qiang Fu and Wolfgang Heidrich, "End-to-End Hybrid Refractive-Diffractive Lens Design with Differentiable Ray-Wave Model," Siggraph Asia 2024.
6 |
7 | This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
8 | # The license is only for non-commercial use (commercial licenses can be obtained from authors).
9 | # The material is provided as-is, with no warranties whatsoever.
10 | # If you publish any code, data, or scientific work based on this, please cite our work.
11 | """
12 |
13 | import logging
14 | import os
15 | import random
16 | import string
17 | from datetime import datetime
18 |
19 | import torch
20 | import yaml
21 | from torchvision.utils import save_image
22 | from tqdm import tqdm
23 |
24 | from deeplens.hybridlens import HybridLens
25 | from deeplens.optics.loss import PSFLoss
26 | from deeplens.utils import set_logger, set_seed
27 |
28 |
29 | def config():
30 | # ==> Config
31 | args = {"seed": 0, "DEBUG": True}
32 |
33 | # ==> Result folder
34 | characters = string.ascii_letters + string.digits
35 | random_string = "".join(random.choice(characters) for i in range(4))
36 | result_dir = (
37 | "./results/"
38 | + datetime.now().strftime("%m%d-%H%M%S")
39 | + "-HybridLens"
40 | + "-"
41 | + random_string
42 | )
43 | args["result_dir"] = result_dir
44 | os.makedirs(result_dir, exist_ok=True)
45 | print(f"Result folder: {result_dir}")
46 |
47 | if args["seed"] is None:
48 | seed = random.randint(0, 100)
49 | args["seed"] = seed
50 | set_seed(args["seed"])
51 |
52 | # ==> Log
53 | set_logger(result_dir)
54 | if not args["DEBUG"]:
55 | raise Exception("Add your wandb logging config here.")
56 |
57 | # ==> Device
58 | if torch.cuda.is_available():
59 | args["device"] = torch.device("cuda")
60 | args["num_gpus"] = torch.cuda.device_count()
61 | logging.info(f"Using {args['num_gpus']} {torch.cuda.get_device_name(0)} GPU(s)")
62 | else:
63 | args["device"] = torch.device("cpu")
64 | logging.info("Using CPU")
65 |
66 | # ==> Save config
67 | with open(f"{result_dir}/config.yml", "w") as f:
68 | yaml.dump(args, f)
69 |
70 | with open(f"{result_dir}/6_hybridlens_design.py", "w") as f:
71 | with open("6_hybridlens_design.py", "r") as code:
72 | f.write(code.read())
73 |
74 | return args
75 |
76 |
77 | def main(args):
78 | # Create a hybrid refractive-diffractive lens
79 | lens = HybridLens(filename="./lenses/hybridlens/a489_doe.json")
80 | lens.refocus(foc_dist=-1000.0)
81 | lens.double()
82 |
83 | # PSF optimization loop to focus blue light
84 | optimizer = lens.get_optimizer(doe_lr=0.1, lens_lr=[1e-4, 1e-4, 1e-1, 1e-5])
85 | loss_fn = PSFLoss()
86 | iterations = 1000
87 | pbar = tqdm(total=iterations + 1, desc="Progress", postfix={"loss": 0})
88 | for i in range(iterations + 1):
89 | psf = lens.psf(points=[0.0, 0.0, -10000.0], ks=101, wvln=0.489)
90 |
91 | optimizer.zero_grad()
92 | loss = loss_fn(psf)
93 | loss.backward()
94 | optimizer.step()
95 |
96 | if i % 100 == 0:
97 | lens.write_lens_json(f"{args['result_dir']}/lens_iter{i}.json")
98 | lens.analysis(save_name=f"{args['result_dir']}/lens_iter{i}.png")
99 | save_image(
100 | psf.detach().clone(),
101 | f"{args['result_dir']}/psf_iter{i}.png",
102 | normalize=True,
103 | )
104 |
105 | pbar.set_postfix({"loss": loss.item()})
106 | pbar.update(1)
107 |
108 | if __name__ == "__main__":
109 | args = config()
110 | main(args)
111 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 | **DeepLens** is an open-source differentiable lens simulator. It is designed for automated optical design and end-to-end optics-sensor-network optimization. DeepLens helps researchers build custom differentiable optical systems and computational imaging pipelines with minimal effort.
6 |
7 | ## Contact
8 |
9 | * Welcome to contribute to DeepLens! If you don't know where to start, check out some [open questions](https://github.com/users/singer-yang/projects/2).
10 | * Contact Xinge Yang (xinge.yang@kaust.edu.sa) for any inquiries. DeepLens is also looking for sponsors!
11 | * We have a [Slack group](https://join.slack.com/t/deeplens/shared_invite/zt-2wz3x2n3b-plRqN26eDhO2IY4r_gmjOw) and a WeChat group (add singeryang1999 to join) for discussion.
12 | * The DeepLens paper is published in [Nature Communications](https://www.nature.com/articles/s41467-024-50835-7)!
13 |
14 | ## What is DeepLens
15 |
16 | DeepLens combines **deep learning** and **optical design** for:
17 |
18 | 1. More powerful **optical design algorithms** enhanced by deep learning.
19 | 2. Next-generation **computational cameras** integrating optical encoding with deep learning decoding.
20 |
21 | ## Key Features
22 |
23 | DeepLens differs from other optical software in:
24 |
25 | 1. **Differentiable** design with outstanding optimization capabilities.
26 | 2. **Open-source** optical simulator (ray-tracing, wave optics) with validated accuracy.
27 | 3. **End-to-end imaging** with sensor and image signal processing (ISP) simulation.
28 | 4. **GPU parallelization** with customized core functions.
29 |
30 | Additional features:
31 |
32 | 1. **Physical optics** simulations including polarization tracing and film design.
33 | 2. **Complex optical systems** including non-sequential and non-coaxial optics.
34 | 3. **Neural representations** for efficient implicit optical models.
35 | 4. **Faster and better efficience** through GPU kernel customization.
36 | 5. **Large-scale** optimization with multi-machine distribution.
37 |
38 | ## Applications
39 |
40 | #### 1. Automated lens design
41 |
42 | Fully automated lens design from scratch. Try it with [AutoLens](https://github.com/vccimaging/AutoLens)!
43 |
44 | [](https://www.nature.com/articles/s41467-024-50835-7) [](https://github.com/vccimaging/AutoLens)
45 |
46 |
47 |

48 |

49 |
50 |
51 | #### 2. End-to-End lens design
52 |
53 | Lens-network co-design from scratch using final images (or classification/detection/segmentation) as objective.
54 |
55 | [](https://www.nature.com/articles/s41467-024-50835-7)
56 |
57 |
58 |

59 |
60 |
61 | #### 3. Implicit Lens Representation
62 |
63 | A surrogate network for fast (aberration + defocus) image simulation.
64 |
65 | [](https://ieeexplore.ieee.org/document/10209238) [](https://github.com/vccimaging/Aberration-Aware-Depth-from-Focus)
66 |
67 |
68 |

69 |
70 |
71 | #### 4. Hybrid Refractive-Difractive Lens Model
72 |
73 | Design hybrid refractive-diffractive lenses with a new ray-wave model.
74 |
75 | [](https://arxiv.org/abs/2406.00834)
76 |
77 |
78 |

79 |
80 |
81 | ## How to use
82 |
83 | We recommend cloning this repository and writing your code directly within it:
84 |
85 | ```
86 | git clone deeplens
87 | cd deeplens
88 |
89 | conda env create -f environment.yml -n deeplens
90 |
91 | python 0_hello_deeplens.py
92 | python your_optical_design_pipeline.py
93 | ```
94 |
95 | DeepLens repo is structured as follows:
96 |
97 | ```
98 | DeepLens/
99 | │
100 | ├── deeplens/
101 | │ ├── optics/ (optics simulation)
102 | | ├── sensor/ (sensor simulation)
103 | | ├── network/ (network architectures)
104 | | ├── ...
105 | | ├── geolens.py (refractive lens system using ray tracing)
106 | | ├── diffraclens.py (diffractive lens system using wave optics)
107 | | └── your_own_optical_system.py (your own optical system)
108 | │
109 | ├── 0_hello_deeplens.py (code tutorials)
110 | ├── ...
111 | └── your_optical_design_pipeline.py (your own optical design pipeline)
112 | ```
113 |
114 | ## Reference
115 |
116 | This code is first developed by [Dr. Congli Wang](https://congliwang.github.io/) (previously named **dO**), then developed (currently named **DeepLens**) and maintained by [Xinge Yang](https://singer-yang.github.io/).
117 |
118 | If you use DeepLens in your research, please cite the corresponding papers:
119 |
120 | - [TCI 2022] dO: A differentiable engine for deep lens design of computational imaging systems. [Paper](https://ieeexplore.ieee.org/document/9919421), [BibTex](./misc/do_bibtex.txt)
121 | - [NatComm 2024] Curriculum Learning for ab initio Deep Learned Refractive Optics. [Paper](https://www.nature.com/articles/s41467-024-50835-7), [BibTex](./misc/deeplens_bibtex.txt)
122 | - [SiggraphAsia 2024] End-to-End Hybrid Refractive-Diffractive Lens Design with Differentiable Ray-Wave Model. [Paper](https://arxiv.org/abs/2406.00834), [BibTex](./misc/hybridlens_bibtex.txt)
123 |
--------------------------------------------------------------------------------
/ckpts/psfnet/ef50mm_f1.8_1000x1000_ks128_mlpconv.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/ckpts/psfnet/ef50mm_f1.8_1000x1000_ks128_mlpconv.pth
--------------------------------------------------------------------------------
/configs/1_end2end_5lines.yml:
--------------------------------------------------------------------------------
1 | DEBUG: True
2 | EXP_NAME: 'End2End lens design for image reconstruction.'
3 | seed: ~
4 |
5 | lens:
6 | path: './lenses/cooke40_inferior.json'
7 | lr: !!python/tuple [1e-4, 1e-4, 0, 0]
8 |
9 | network:
10 | pretrained: False
11 | lr: !!float 1e-3
12 |
13 | train:
14 | # train_dir: './datasets/DIV2K_train_HR'
15 | train_dir: './datasets/BSDS300/images/train'
16 | epochs: 10
17 | bs: 8
18 | img_res: !!python/tuple [256, 256]
19 |
20 | noise: 0.001
21 | depth: -20000
--------------------------------------------------------------------------------
/configs/2_auto_lens_design.yml:
--------------------------------------------------------------------------------
1 | DEBUG: True
2 | seed: ~
3 |
4 | # experiment settings
5 | EXP_NAME: 'Auto lens design'
6 |
7 | # lens target example 1 (camera lens)
8 | foclen: 85.0
9 | fov: 40.0
10 | fnum: 4.0
11 | flange: 18.0
12 | thickness: 120.0
13 | lens_type: [["Spheric", "Spheric"], ["Spheric", "Spheric"], ["Spheric", "Spheric", "Spheric"], ["Aperture"], ["Spheric", "Spheric"], ["Spheric", "Aspheric"], ["Spheric", "Aspheric"]]
14 | lrs: [5e-4, 1e-3, 1e-1, 1e-3]
15 | decay: 0.001
16 |
17 | # # lens target example 2 (mobile lens)
18 | # foclen: 5.5
19 | # fov: 70.0
20 | # fnum: 2.0
21 | # flange: 1.0
22 | # thickness: 9.0
23 | # lens_type: [["Aperture"], ["Aspheric", "Aspheric"], ["Aspheric", "Aspheric"], ["Aspheric", "Aspheric"], ["Aspheric", "Aspheric"], ["Aspheric", "Aspheric"]]
24 | # lrs: [3e-4, 1e-4, 1e-1, 1e-3]
25 | # decay: 0.01
--------------------------------------------------------------------------------
/configs/4_tasklens.yml:
--------------------------------------------------------------------------------
1 | DEBUG: True
2 | EXP_NAME: 'TaskLens design - image classification'
3 | seed: 1
4 |
5 | train:
6 | seed: 26
7 | dataset: 'imagenet'
8 | warm_up: 0.3
9 | epochs: 3
10 | bs: 32
11 | img_res: !!python/tuple [224, 224]
12 | spp: 1024
13 |
14 | psf_ks: 101
15 | psf_grid: 7
16 | depth: -10000.
17 | noise: 0.001
18 |
19 | lens:
20 | path: './lenses/cellphone/3P_blank.json'
21 | target_hfov: 0.6
22 | target_fnum: 2.8
23 | lr: !!python/tuple [1e-4, 1e-4, 1e-1, 1e-3]
24 | ai_lr_decay: 0.02
25 | sensor_res: !!python/tuple [512, 512]
26 |
27 |
28 | network:
29 | model: 'resnet50'
30 | pretrained: './ckpt/f2.8/doublet/classi1_resnet_high.pth'
31 | lr: !!float 1e-5
32 |
33 | imagenet_train_dir: TODO:
34 | imagenet_val_dir: TODO:
35 | imagenet_test_dir: TODO:
--------------------------------------------------------------------------------
/configs/7_comp_photography.yml:
--------------------------------------------------------------------------------
1 | exp_name: "Computational photography"
2 | is_debug: True
3 | seed: 42
4 |
5 | camera:
6 | lens_file: "./lenses/camera/ef50mm_f1.8.json"
7 | sensor_size: [20, 20]
8 | sensor_res: [1000, 1000]
9 |
10 | network:
11 | name: NAFNet
12 | in_chan: 5
13 | out_chan: 4
14 | width: 32
15 | middle_blk_num: 1
16 | enc_blk_nums: [1, 1, 1, 18]
17 | dec_blk_nums: [1, 1, 1, 1]
18 | ckpt_path: null
19 |
20 | train:
21 | epochs: 100
22 | eval_every_n_epochs: 5
23 | log_every_n_steps: 100
24 | lr: 1e-4
25 |
26 | train_set:
27 | dataset: "./datasets/DIV2K_train_HR"
28 | res: [512, 512]
29 | output_type: "rggbi"
30 | batch_size: 4
31 | num_workers: 3
32 |
33 | eval_set:
34 | dataset: "./datasets/DIV2K_valid_HR"
35 | res: [512, 512]
36 | output_type: "rggbi"
37 | batch_size: 1
38 | num_workers: 3
--------------------------------------------------------------------------------
/datasets/IQ/img1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/img1.png
--------------------------------------------------------------------------------
/datasets/IQ/img2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/img2.png
--------------------------------------------------------------------------------
/datasets/IQ/img3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/img3.png
--------------------------------------------------------------------------------
/datasets/IQ/img4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/img4.png
--------------------------------------------------------------------------------
/datasets/IQ/img5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/img5.png
--------------------------------------------------------------------------------
/datasets/IQ/readme.md:
--------------------------------------------------------------------------------
1 | ### Image Quality (IQ) Evaluation
2 |
3 | Reference: https://www.imatest.com/
4 |
5 | In the realm of computer science, image quality evaluation is commonly conducted using pixel-level metrics, such as PSNR, and perceptual metrics, like LPIPS. These metrics serve as crucial tools for quantifying image fidelity; however, the landscape of image quality assessment encompasses a broader array of evaluation metrics. This includes both non-subjective measures, which rely on objective calculations, and subjective assessments, which incorporate human judgment to gauge visual appeal and perception.
6 |
7 | Links for high-resolution camera test chart:
8 |
9 | - https://www.bealecorner.org/red/test-patterns/
10 | - https://www.photographynews.co.uk/learning/free-lens-testing-chart-download/
11 | - https://www.pointsinfocus.com/learning/cameras-lenses/testing-and-acclimating-to-a-new-camera/
12 | - https://www.graphics.cornell.edu/~westin/misc/res-chart.html
13 |
--------------------------------------------------------------------------------
/datasets/IQ/usaf1951.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/IQ/usaf1951.png
--------------------------------------------------------------------------------
/datasets/bird.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/datasets/bird.png
--------------------------------------------------------------------------------
/deeplens/__init__.py:
--------------------------------------------------------------------------------
1 | # optics
2 | from .optics import *
3 |
4 | # network
5 | from .network import *
6 |
7 | # utilities
8 | from .utils import *
9 |
10 | # doelens
11 | from .geolens import GeoLens
12 | from .psfnet import PSFNet
13 | from .camera import Camera
14 |
--------------------------------------------------------------------------------
/deeplens/geolens_io.py:
--------------------------------------------------------------------------------
1 | """ZEMAX file IO
2 | """
3 |
4 | import torch
5 |
6 | from deeplens.optics.geometric_surface.aperture import Aperture
7 | from deeplens.optics.geometric_surface.spheric import Spheric
8 | from deeplens.optics.geometric_surface.aspheric import Aspheric
9 |
10 | class GeoLensIO:
11 | def read_zmx(self, filename="./test.zmx"):
12 | """Load the lens from .zmx file."""
13 | # Read .zmx file
14 | try:
15 | with open(filename, "r", encoding="utf-8") as file:
16 | lines = file.readlines()
17 | except UnicodeDecodeError:
18 | with open(filename, "r", encoding="utf-16") as file:
19 | lines = file.readlines()
20 |
21 | # Iterate through the lines and extract SURF dict
22 | surfs_dict = {}
23 | current_surf = None
24 | for line in lines:
25 | if line.startswith("SURF"):
26 | current_surf = int(line.split()[1])
27 | surfs_dict[current_surf] = {}
28 |
29 | elif current_surf is not None and line.strip() != "":
30 | if len(line.strip().split(maxsplit=1)) == 1:
31 | continue
32 | else:
33 | key, value = line.strip().split(maxsplit=1)
34 | if key == "PARM":
35 | new_key = "PARM" + value.split()[0]
36 | new_value = value.split()[1]
37 | surfs_dict[current_surf][new_key] = new_value
38 | else:
39 | surfs_dict[current_surf][key] = value
40 |
41 | elif line.startswith("FLOA") or line.startswith("ENPD"):
42 | if line.startswith("FLOA"):
43 | self.float_enpd = True
44 | self.enpd = None
45 | else:
46 | self.float_enpd = False
47 | self.enpd = float(line.split()[1])
48 |
49 | self.float_foclen = False
50 | self.float_hfov = False
51 |
52 | # Read the extracted data from each SURF
53 | self.surfaces = []
54 | d = 0.0
55 | for surf_idx, surf_dict in surfs_dict.items():
56 | if surf_idx > 0 and surf_idx < current_surf:
57 | # Lens surface parameters
58 | mat2 = (
59 | f"{surf_dict['GLAS'].split()[3]}/{surf_dict['GLAS'].split()[4]}"
60 | if "GLAS" in surf_dict
61 | else "air"
62 | )
63 | surf_r = float(surf_dict["DIAM"].split()[0]) if "DIAM" in surf_dict else 1.0
64 | surf_c = float(surf_dict["CURV"].split()[0]) if "CURV" in surf_dict else 0.0
65 | surf_d_next = (
66 | float(surf_dict["DISZ"].split()[0]) if "DISZ" in surf_dict else 0.0
67 | )
68 | breakpoint()
69 | surf_conic = surf_dict.get("CONI", 0.0)
70 | surf_param2 = surf_dict.get("PARM2", 0.0)
71 | surf_param3 = surf_dict.get("PARM3", 0.0)
72 | surf_param4 = surf_dict.get("PARM4", 0.0)
73 | surf_param5 = surf_dict.get("PARM5", 0.0)
74 | surf_param6 = surf_dict.get("PARM6", 0.0)
75 | surf_param7 = surf_dict.get("PARM7", 0.0)
76 | surf_param8 = surf_dict.get("PARM8", 0.0)
77 |
78 | if surf_dict["TYPE"] == "STANDARD":
79 | # Aperture
80 | if surf_c == 0.0 and mat2 == "air":
81 | s = Aperture(r=surf_r, d=d)
82 |
83 | # Spherical surface
84 | else:
85 | s = Spheric(c=surf_c, r=surf_r, d=d, mat2=mat2)
86 |
87 | # Aspherical surface
88 | elif surf_dict["TYPE"] == "EVENASPH":
89 | s = Aspheric(c=surf_c, r=surf_r, d=d, ai=[surf_param2, surf_param3, surf_param4, surf_param5, surf_param6, surf_param7, surf_param8], k=surf_conic, mat2=mat2)
90 |
91 | else:
92 | print(f"Surface type {surf_dict['TYPE']} not implemented.")
93 | continue
94 |
95 | self.surfaces.append(s)
96 | d += surf_d_next
97 |
98 | elif surf_idx == current_surf:
99 | # Image sensor
100 | self.r_sensor = float(surf_dict["DIAM"].split()[0])
101 |
102 | else:
103 | pass
104 |
105 | self.d_sensor = torch.tensor(d)
106 | breakpoint()
107 | return self
108 |
109 |
110 | def write_zmx(self, filename="./test.zmx"):
111 | """Write the lens into .zmx file."""
112 | lens_zmx_str = ""
113 | if self.float_enpd:
114 | enpd_str = 'FLOA'
115 | else:
116 | enpd_str = f'ENPD {self.enpd}'
117 | # Head string
118 | head_str = f"""VERS 190513 80 123457 L123457
119 | MODE SEQ
120 | NAME
121 | PFIL 0 0 0
122 | LANG 0
123 | UNIT MM X W X CM MR CPMM
124 | {enpd_str}
125 | ENVD 2.0E+1 1 0
126 | GFAC 0 0
127 | GCAT OSAKAGASCHEMICAL MISC
128 | XFLN 0. 0. 0.
129 | YFLN 0.0 {0.707 * self.hfov * 57.3} {0.99 * self.hfov * 57.3}
130 | WAVL 0.4861327 0.5875618 0.6562725
131 | RAIM 0 0 1 1 0 0 0 0 0
132 | PUSH 0 0 0 0 0 0
133 | SDMA 0 1 0
134 | FTYP 0 0 3 3 0 0 0
135 | ROPD 2
136 | PICB 1
137 | PWAV 2
138 | POLS 1 0 1 0 0 1 0
139 | GLRS 1 0
140 | GSTD 0 100.000 100.000 100.000 100.000 100.000 100.000 0 1 1 0 0 1 1 1 1 1 1
141 | NSCD 100 500 0 1.0E-3 5 1.0E-6 0 0 0 0 0 0 1000000 0 2
142 | COFN QF "COATING.DAT" "SCATTER_PROFILE.DAT" "ABG_DATA.DAT" "PROFILE.GRD"
143 | COFN COATING.DAT SCATTER_PROFILE.DAT ABG_DATA.DAT PROFILE.GRD
144 | SURF 0
145 | TYPE STANDARD
146 | CURV 0.0
147 | DISZ INFINITY
148 | """
149 | lens_zmx_str += head_str
150 |
151 | # Surface string
152 | for i, s in enumerate(self.surfaces):
153 | d_next = (
154 | self.surfaces[i + 1].d - self.surfaces[i].d
155 | if i < len(self.surfaces) - 1
156 | else self.d_sensor - self.surfaces[i].d
157 | )
158 | surf_str = s.zmx_str(surf_idx=i + 1, d_next=d_next)
159 | lens_zmx_str += surf_str
160 |
161 | # Sensor string
162 | sensor_str = f"""SURF {i + 2}
163 | TYPE STANDARD
164 | CURV 0.
165 | DISZ 0.0
166 | DIAM {self.r_sensor}
167 | """
168 | lens_zmx_str += sensor_str
169 |
170 | # Write lens zmx string into file
171 | with open(filename, "w") as f:
172 | f.writelines(lens_zmx_str)
173 | f.close()
174 |
--------------------------------------------------------------------------------
/deeplens/geolens_utils.py:
--------------------------------------------------------------------------------
1 | """Utils for GeoLens class."""
2 |
3 | import os
4 | import random
5 |
6 | import numpy as np
7 | import torch
8 | import matplotlib.pyplot as plt
9 | from mpl_toolkits.mplot3d import Axes3D
10 |
11 | from .optics.geometric_surface import Aperture, Aspheric, Spheric, ThinLens
12 | from .optics.materials import MATERIAL_data
13 | from .optics.basics import WAVE_RGB
14 | from deeplens.geolens import GeoLens
15 |
16 |
17 | # ====================================================================================
18 | # Lens starting point generation
19 | # ====================================================================================
20 | def create_lens(
21 | foclen,
22 | fov,
23 | fnum,
24 | flange,
25 | enpd=None,
26 | thickness=None,
27 | lens_type=[["Spheric", "Spheric"], ["Aperture"], ["Spheric", "Aspheric"]],
28 | save_dir="./",
29 | ):
30 | """Create a lens design starting point with flat surfaces.
31 |
32 | Contributor: Rayengineer
33 |
34 | Args:
35 | foclen: Focal length in mm.
36 | fov: Diagonal field of view in degrees.
37 | fnum: Maximum f number.
38 | flange: Distance from last surface to sensor.
39 | thickness: Total thickness if specified.
40 | lens_type: List of surface types defining each lens element and aperture.
41 | """
42 | from .geolens import GeoLens
43 |
44 | # Compute lens parameters
45 | aper_r = foclen / fnum / 2
46 | imgh = 2 * foclen * float(np.tan(np.deg2rad(fov / 2)))
47 | if thickness is None:
48 | thickness = foclen + flange
49 | d_opt = thickness - flange
50 |
51 | # Materials
52 | mat_names = list(MATERIAL_data.keys())
53 | for mat in ["air", "vacuum", "occluder"]:
54 | if mat in mat_names:
55 | mat_names.remove(mat)
56 |
57 | # Create lens
58 | lens = GeoLens()
59 | surfaces = lens.surfaces
60 |
61 | d_total = 0.0
62 | for elem_type in lens_type:
63 | if elem_type == "Aperture":
64 | d_next = (torch.rand(1) + 0.5).item()
65 | surfaces.append(Aperture(r=aper_r, d=d_total))
66 | d_total += d_next
67 |
68 | elif isinstance(elem_type, list):
69 | if len(elem_type) == 1 and elem_type[0] == "Aperture":
70 | d_next = (torch.rand(1) + 0.5).item()
71 | surfaces.append(Aperture(r=aper_r, d=d_total))
72 | d_total += d_next
73 |
74 | elif len(elem_type) == 1 and elem_type[0] == "ThinLens":
75 | d_next = (torch.rand(1) + 1.0).item()
76 | surfaces.append(ThinLens(r=aper_r, d=d_total))
77 | d_total += d_next
78 |
79 | elif len(elem_type) in [2, 3]:
80 | for i, surface_type in enumerate(elem_type):
81 | if i == len(elem_type) - 1:
82 | mat = "air"
83 | d_next = (torch.rand(1) + 0.5).item()
84 | else:
85 | mat = random.choice(mat_names)
86 | d_next = (torch.rand(1) + 1.0).item()
87 |
88 | surfaces.append(
89 | create_surface(surface_type, d_total, aper_r, imgh, mat)
90 | )
91 | d_total += d_next
92 | else:
93 | raise Exception("Lens element type not supported yet.")
94 | else:
95 | raise Exception("Lens type format not correct.")
96 |
97 | # Normalize optical part total thickness
98 | d_opt_actual = d_total - d_next
99 | for s in surfaces:
100 | s.d = s.d / d_opt_actual * d_opt
101 |
102 | # Lens calculation
103 | lens = lens.to(lens.device)
104 | lens.d_sensor = torch.tensor(thickness).to(lens.device)
105 | lens.enpd = enpd
106 | lens.float_enpd = True if enpd is None else False
107 | lens.float_foclen = False
108 | lens.float_hfov = False
109 | lens.set_sensor(sensor_res=lens.sensor_res, r_sensor=imgh / 2)
110 | lens.post_computation()
111 |
112 | # Save lens
113 | filename = f"starting_point_f{foclen}mm_imgh{imgh}_fnum{fnum}"
114 | lens.write_lens_json(os.path.join(save_dir, f"{filename}.json"))
115 | lens.analysis(os.path.join(save_dir, f"{filename}"))
116 |
117 | return lens
118 |
119 |
120 | def create_surface(surface_type, d_total, aper_r, imgh, mat):
121 | """Create a surface object based on the surface type."""
122 | if mat == "air":
123 | c = -float(np.random.rand()) * 0.001
124 | else:
125 | c = float(np.random.rand()) * 0.001
126 | r = max(imgh / 2, aper_r)
127 |
128 | if surface_type == "Spheric":
129 | return Spheric(r=r, d=d_total, c=c, mat2=mat)
130 | elif surface_type == "Aspheric":
131 | ai = np.random.randn(7).astype(np.float32) * 1e-30
132 | k = float(np.random.rand()) * 0.001
133 | return Aspheric(r=r, d=d_total, c=c, ai=ai, k=k, mat2=mat)
134 | else:
135 | raise Exception("Surface type not supported yet.")
136 |
137 |
138 |
--------------------------------------------------------------------------------
/deeplens/network/__init__.py:
--------------------------------------------------------------------------------
1 | from .dataset import ImageDataset, PhotographicDataset
2 | from .loss import PerceptualLoss, PSNRLoss, SSIMLoss
3 | from .reconstruction import NAFNet, Restormer, UNet
4 | from .surrogate import MLP, MLPConv, ModulateSiren, Siren
5 |
--------------------------------------------------------------------------------
/deeplens/network/dataset.py:
--------------------------------------------------------------------------------
1 | """Basic and common dataset classes."""
2 |
3 | import glob
4 | import os
5 | import zipfile
6 |
7 | import requests
8 | import torch
9 | from PIL import Image
10 | from torch.utils.data import Dataset
11 | from torchvision import transforms
12 |
13 |
14 | # ======================================
15 | # Basic dataset class
16 | # ======================================
17 | class ImageDataset(Dataset):
18 | """Basic dataset class for image data. Loads images from a directory."""
19 |
20 | def __init__(self, img_dir, img_res=None):
21 | super(ImageDataset, self).__init__()
22 | self.img_paths = glob.glob(f"{img_dir}/**.png") + glob.glob(f"{img_dir}/**.jpg")
23 | if isinstance(img_res, int):
24 | img_res = [img_res, img_res]
25 |
26 | self.transform = transforms.Compose(
27 | [
28 | transforms.AutoAugment(
29 | transforms.AutoAugmentPolicy.IMAGENET,
30 | transforms.InterpolationMode.BILINEAR,
31 | ),
32 | transforms.RandomResizedCrop(img_res),
33 | transforms.ToTensor(),
34 | transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
35 | ]
36 | )
37 |
38 | def __len__(self):
39 | return len(self.img_paths)
40 |
41 | def __getitem__(self, idx):
42 | img = Image.open(self.img_paths[idx])
43 | img = self.transform(img)
44 | return img
45 |
46 |
47 | class PhotographicDataset(Dataset):
48 | """Loads images and samples ISO values from a directory. The data dict will be used for image simulation, then network training."""
49 |
50 | def __init__(self, img_dir, output_type="rgb", img_res=(512, 512), is_train=True):
51 | """Initialize the Photographic Dataset.
52 |
53 | Args:
54 | img_dir: Directory containing the images
55 | output_type: Type of output image format
56 | img_res: Image resolution. If int, creates square image of [img_res, img_res]
57 | is_train: Whether this is for training (with augmentation) or testing
58 | """
59 | super(PhotographicDataset, self).__init__()
60 | self.img_paths = glob.glob(f"{img_dir}/**.png") + glob.glob(f"{img_dir}/**.jpg")
61 | # print(f"Found {len(self.img_paths)} images in {img_dir}")
62 |
63 | if isinstance(img_res, int):
64 | img_res = (img_res, img_res)
65 | self.is_train = is_train
66 |
67 | # Training transform with augmentation
68 | self.train_transform = transforms.Compose(
69 | [
70 | transforms.RandomResizedCrop(img_res),
71 | transforms.ColorJitter(
72 | brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1
73 | ),
74 | transforms.RandomHorizontalFlip(),
75 | transforms.ToTensor(),
76 | transforms.Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
77 | ]
78 | )
79 |
80 | # Test transform without augmentation
81 | self.test_transform = transforms.Compose(
82 | [
83 | transforms.Resize(img_res),
84 | transforms.CenterCrop(img_res),
85 | transforms.ToTensor(),
86 | transforms.Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
87 | ]
88 | )
89 |
90 | self.output_type = output_type
91 |
92 | def __len__(self):
93 | return len(self.img_paths)
94 |
95 | def sample_iso(self):
96 | return torch.randint(100, 400, (1,))[0].float()
97 |
98 | def __getitem__(self, idx):
99 | # Load image
100 | img = Image.open(self.img_paths[idx]).convert("RGB")
101 |
102 | # Transform
103 | if self.is_train:
104 | img = self.train_transform(img)
105 | else:
106 | img = self.test_transform(img)
107 |
108 | # Random ISO value
109 | iso = self.sample_iso()
110 |
111 | return {
112 | "img": img,
113 | "iso": iso,
114 | "output_type": self.output_type,
115 | }
116 |
117 |
118 | # ======================================
119 | # Download datasets
120 | # ======================================
121 | def download_and_unzip_div2k(destination_folder):
122 | urls = {
123 | "DIV2K_train_HR.zip": "http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip",
124 | "DIV2K_valid_HR.zip": "http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip",
125 | }
126 |
127 | # Create destination folder if it doesn't exist
128 | if not os.path.exists(destination_folder):
129 | os.makedirs(destination_folder)
130 |
131 | for filename, url in urls.items():
132 | zip_path = os.path.join(destination_folder, filename)
133 |
134 | # Download the dataset
135 | print(f"Downloading {filename}...")
136 | response = requests.get(url, stream=True)
137 | block_size = 1024 * 1024 # 1 MB
138 |
139 | with open(zip_path, "wb") as f:
140 | for data in response.iter_content(block_size):
141 | f.write(data)
142 | print(f"Download of {filename} complete.")
143 |
144 | # Unzip the dataset
145 | print(f"Unzipping {filename}...")
146 | with zipfile.ZipFile(zip_path, "r") as zip_ref:
147 | zip_ref.extractall(destination_folder)
148 | print(f"Unzipping of {filename} complete.")
149 |
150 | # Remove the zip files
151 | os.remove(zip_path)
152 |
153 |
154 | def download_bsd300(destination_folder="./datasets"):
155 | """Download the BSDS300 dataset.
156 |
157 | Reference:
158 | [1] https://github.com/pytorch/examples/blob/main/super_resolution/data.py#L10
159 | """
160 | import tarfile
161 | import urllib.request
162 | from os import remove
163 | from os.path import basename, exists, join
164 |
165 | output_image_dir = join(destination_folder, "BSDS300/images")
166 |
167 | if not exists(output_image_dir):
168 | url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
169 | print("downloading url ", url)
170 |
171 | data = urllib.request.urlopen(url)
172 |
173 | file_path = join(destination_folder, basename(url))
174 | with open(file_path, "wb") as f:
175 | f.write(data.read())
176 |
177 | print("Extracting data")
178 | with tarfile.open(file_path) as tar:
179 | for item in tar:
180 | tar.extract(item, destination_folder)
181 |
182 | remove(file_path)
183 |
184 | return output_image_dir
185 |
--------------------------------------------------------------------------------
/deeplens/network/loss/__init__.py:
--------------------------------------------------------------------------------
1 | """Loss functions for neural networks."""
2 |
3 | from .psnr_loss import PSNRLoss
4 | from .ssim_loss import SSIMLoss
5 | from .perceptual_loss import PerceptualLoss
6 |
7 | __all__ = [
8 | "PSNRLoss",
9 | "SSIMLoss",
10 | "PerceptualLoss",
11 | ]
--------------------------------------------------------------------------------
/deeplens/network/loss/perceptual_loss.py:
--------------------------------------------------------------------------------
1 | """Perceptual loss function."""
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torchvision.models as models
6 |
7 |
8 | class PerceptualLoss(nn.Module):
9 | """Perceptual loss based on VGG16 features."""
10 |
11 | def __init__(self, device=None, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
12 | """Initialize perceptual loss.
13 |
14 | Args:
15 | device: Device to put the VGG model on. If None, uses cuda if available.
16 | weights: Weights for different feature layers.
17 | """
18 | super(PerceptualLoss, self).__init__()
19 |
20 | if device is None:
21 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
22 |
23 | self.vgg = models.vgg16(pretrained=True).features.to(device)
24 | self.layer_name_mapping = {
25 | '3': "relu1_2",
26 | '8': "relu2_2",
27 | '15': "relu3_3",
28 | '22': "relu4_3",
29 | '29': "relu5_3"
30 | }
31 |
32 | self.weights = weights
33 |
34 | for param in self.vgg.parameters():
35 | param.requires_grad = False
36 |
37 | def forward(self, x, y):
38 | """Calculate perceptual loss.
39 |
40 | Args:
41 | x: Predicted tensor.
42 | y: Target tensor.
43 |
44 | Returns:
45 | Perceptual loss.
46 | """
47 | x_vgg, y_vgg = self._get_features(x), self._get_features(y)
48 |
49 | content_loss = 0.0
50 | for i, (key, value) in enumerate(x_vgg.items()):
51 | content_loss += self.weights[i] * torch.mean((value - y_vgg[key]) ** 2)
52 |
53 | return content_loss
54 |
55 | def _get_features(self, x):
56 | """Extract features from VGG network.
57 |
58 | Args:
59 | x: Input tensor.
60 |
61 | Returns:
62 | Dictionary of feature tensors.
63 | """
64 | features = {}
65 | for name, layer in self.vgg._modules.items():
66 | x = layer(x)
67 | if name in self.layer_name_mapping:
68 | features[self.layer_name_mapping[name]] = x
69 |
70 | return features
--------------------------------------------------------------------------------
/deeplens/network/loss/psnr_loss.py:
--------------------------------------------------------------------------------
1 | """PSNR loss function."""
2 |
3 | import numpy as np
4 | import torch
5 | import torch.nn as nn
6 |
7 |
8 | class PSNRLoss(nn.Module):
9 | """Peak Signal-to-Noise Ratio (PSNR) loss."""
10 |
11 | def __init__(self, loss_weight=1.0, reduction="mean", toY=False):
12 | """Initialize PSNR loss.
13 |
14 | Args:
15 | loss_weight: Weight for the loss.
16 | reduction: Reduction method, only "mean" is supported.
17 | toY: Whether to convert RGB to Y channel.
18 | """
19 | super(PSNRLoss, self).__init__()
20 | assert reduction == "mean"
21 | self.loss_weight = loss_weight
22 | self.scale = 10 / np.log(10)
23 | self.toY = toY
24 | self.coef = torch.tensor([65.481, 128.553, 24.966]).reshape(1, 3, 1, 1)
25 | self.first = True
26 |
27 | def forward(self, pred, target):
28 | """Calculate PSNR loss.
29 |
30 | Args:
31 | pred: Predicted tensor.
32 | target: Target tensor.
33 |
34 | Returns:
35 | PSNR loss.
36 | """
37 | assert len(pred.size()) == 4
38 | if self.toY:
39 | if self.first:
40 | self.coef = self.coef.to(pred.device)
41 | self.first = False
42 |
43 | pred = (pred * self.coef).sum(dim=1).unsqueeze(dim=1) + 16.0
44 | target = (target * self.coef).sum(dim=1).unsqueeze(dim=1) + 16.0
45 |
46 | pred, target = pred / 255.0, target / 255.0
47 | pass
48 | assert len(pred.size()) == 4
49 |
50 | return (
51 | self.loss_weight
52 | * self.scale
53 | * torch.log(((pred - target) ** 2).mean(dim=(1, 2, 3)) + 1e-8).mean()
54 | )
--------------------------------------------------------------------------------
/deeplens/network/loss/ssim_loss.py:
--------------------------------------------------------------------------------
1 | """SSIM loss function."""
2 |
3 | import math
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 |
9 | class SSIMLoss(nn.Module):
10 | """Structural Similarity Index (SSIM) loss."""
11 |
12 | def __init__(self, window_size=11, size_average=True):
13 | """Initialize SSIM loss.
14 |
15 | Args:
16 | window_size: Size of the window.
17 | size_average: Whether to average the loss.
18 | """
19 | super(SSIMLoss, self).__init__()
20 | self.window_size = window_size
21 | self.size_average = size_average
22 | self.channel = 1
23 | self.window = self._create_window(window_size, self.channel)
24 |
25 | def forward(self, pred, target):
26 | """Calculate SSIM loss.
27 |
28 | Args:
29 | pred: Predicted tensor.
30 | target: Target tensor.
31 |
32 | Returns:
33 | 1 - SSIM value.
34 | """
35 | return 1 - self._ssim(pred, target)
36 |
37 | def _gaussian(self, window_size, sigma):
38 | """Create a Gaussian window.
39 |
40 | Args:
41 | window_size: Size of the window.
42 | sigma: Standard deviation.
43 |
44 | Returns:
45 | Gaussian window.
46 | """
47 | gauss = torch.Tensor(
48 | [
49 | math.exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2))
50 | for x in range(window_size)
51 | ]
52 | )
53 | return gauss / gauss.sum()
54 |
55 | def _create_window(self, window_size, channel):
56 | """Create a window for SSIM calculation.
57 |
58 | Args:
59 | window_size: Size of the window.
60 | channel: Number of channels.
61 |
62 | Returns:
63 | Window tensor.
64 | """
65 | _1D_window = self._gaussian(window_size, 1.5).unsqueeze(1)
66 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
67 | window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
68 | return window
69 |
70 | def _ssim(self, img1, img2):
71 | """Calculate SSIM value.
72 |
73 | Args:
74 | img1: First image.
75 | img2: Second image.
76 |
77 | Returns:
78 | SSIM value.
79 | """
80 | (_, channel, _, _) = img1.size()
81 | window = self.window
82 | window = window.to(img1.device)
83 |
84 | mu1 = F.conv2d(img1, window, padding=self.window_size//2, groups=channel)
85 | mu2 = F.conv2d(img2, window, padding=self.window_size//2, groups=channel)
86 |
87 | mu1_sq = mu1.pow(2)
88 | mu2_sq = mu2.pow(2)
89 | mu1_mu2 = mu1 * mu2
90 |
91 | sigma1_sq = F.conv2d(img1 * img1, window, padding=self.window_size//2, groups=channel) - mu1_sq
92 | sigma2_sq = F.conv2d(img2 * img2, window, padding=self.window_size//2, groups=channel) - mu2_sq
93 | sigma12 = F.conv2d(img1 * img2, window, padding=self.window_size//2, groups=channel) - mu1_mu2
94 |
95 | C1 = 0.01 ** 2
96 | C2 = 0.03 ** 2
97 |
98 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
99 |
100 | if self.size_average:
101 | return ssim_map.mean()
102 | else:
103 | return ssim_map.mean(1).mean(1).mean(1)
--------------------------------------------------------------------------------
/deeplens/network/reconstruction/__init__.py:
--------------------------------------------------------------------------------
1 | from .nafnet import NAFNet
2 | from .unet import UNet
3 | from .restormer import Restormer
4 |
--------------------------------------------------------------------------------
/deeplens/network/reconstruction/unet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class UNet(nn.Module):
6 | def __init__(self, in_channels=3, out_channels=3):
7 | super().__init__()
8 | self.pre = self.pre = nn.Sequential(
9 | nn.Conv2d(in_channels, 16, kernel_size=3, stride=1, padding=1), nn.PReLU(16)
10 | )
11 | self.conv00 = BasicBlock(16, 32)
12 | self.down0 = nn.MaxPool2d((2, 2))
13 | self.conv10 = BasicBlock(32, 64)
14 | self.down1 = nn.MaxPool2d((2, 2))
15 | self.conv20 = BasicBlock(64, 128)
16 | self.down2 = nn.MaxPool2d((2, 2))
17 | self.conv30 = BasicBlock(128, 256)
18 | self.conv31 = BasicBlock(256, 512)
19 | self.up2 = nn.PixelShuffle(2)
20 | self.conv21 = BasicBlock(128, 256)
21 | self.up1 = nn.PixelShuffle(2)
22 | self.conv11 = BasicBlock(64, 128)
23 | self.up0 = nn.PixelShuffle(2)
24 | self.conv01 = BasicBlock(32, 64)
25 |
26 | self.post = nn.Sequential(
27 | nn.Conv2d(64, 16, kernel_size=3, stride=1, padding=1),
28 | nn.PReLU(16),
29 | nn.Conv2d(16, out_channels, kernel_size=3, stride=1, padding=1),
30 | )
31 |
32 | def forward(self, x):
33 | x0 = self.pre(x)
34 | x0 = self.conv00(x0)
35 | x1 = self.down0(x0)
36 | x1 = self.conv10(x1)
37 | x2 = self.down1(x1)
38 | x2 = self.conv20(x2)
39 | x3 = self.down2(x2)
40 | x3 = self.conv30(x3)
41 | x3 = self.conv31(x3)
42 | x2 = x2 + self.up2(x3)
43 | x2 = self.conv21(x2)
44 | x1 = x1 + self.up1(x2)
45 | x1 = self.conv11(x1)
46 | x0 = x0 + self.up0(x1)
47 | x0 = self.conv01(x0)
48 | x = self.post(x0)
49 | return x
50 |
51 |
52 | class BasicBlock(nn.Module):
53 | def __init__(self, in_channels, out_channels):
54 | super().__init__()
55 | layers = []
56 | for _ in range(3):
57 | layers.append(
58 | nn.Sequential(
59 | nn.Conv2d(
60 | in_channels, in_channels, kernel_size=3, padding=1, stride=1
61 | ),
62 | nn.PReLU(in_channels),
63 | )
64 | )
65 | self.main = nn.Sequential(*layers)
66 | self.post = nn.Sequential(
67 | nn.Conv2d(
68 | in_channels * 4, out_channels, kernel_size=3, padding=1, stride=1
69 | ),
70 | nn.PReLU(out_channels),
71 | )
72 |
73 | def forward(self, x):
74 | out = []
75 | out.append(x)
76 | for layers in self.main:
77 | x = layers(x)
78 | out.append(x)
79 | x = torch.concat(out, axis=1)
80 | x = self.post(x)
81 | return x
82 |
83 |
84 | if __name__ == "__main__":
85 | model = UNet()
86 | input = torch.rand(size=(16, 3, 384, 512))
87 | output = model(input)
88 | print(output.shape)
89 |
--------------------------------------------------------------------------------
/deeplens/network/surrogate/__init__.py:
--------------------------------------------------------------------------------
1 | from .mlp import MLP
2 | from .mlpconv import MLPConv
3 | from .modulate_siren import ModulateSiren
4 | from .siren import Siren
5 |
--------------------------------------------------------------------------------
/deeplens/network/surrogate/mlp.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class MLP(nn.Module):
7 | """All-linear layer. This network suits for low-k intensity/amplitude PSF function prediction."""
8 |
9 | def __init__(self, in_features, out_features, hidden_features=64, hidden_layers=3):
10 | super(MLP, self).__init__()
11 |
12 | layers = [
13 | nn.Linear(in_features, hidden_features // 4, bias=True),
14 | nn.ReLU(inplace=True),
15 | nn.Linear(hidden_features // 4, hidden_features, bias=True),
16 | nn.ReLU(inplace=True),
17 | ]
18 |
19 | for _ in range(hidden_layers):
20 | layers.extend(
21 | [
22 | nn.Linear(hidden_features, hidden_features, bias=True),
23 | nn.ReLU(inplace=True),
24 | ]
25 | )
26 |
27 | layers.extend(
28 | [nn.Linear(hidden_features, out_features, bias=True), nn.Sigmoid()]
29 | )
30 |
31 | self.net = nn.Sequential(*layers)
32 |
33 | def forward(self, x):
34 | x = self.net(x)
35 | x = F.normalize(x, p=1, dim=-1)
36 | return x
37 |
38 |
39 | if __name__ == "__main__":
40 | # Test the network
41 | mlp = MLP(4, 64, hidden_features=64, hidden_layers=3)
42 | print(mlp)
43 | x = torch.rand(100, 4)
44 | y = mlp(x)
45 | print(y.size())
46 |
--------------------------------------------------------------------------------
/deeplens/network/surrogate/mlpconv.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 |
6 |
7 | class MLPConv(nn.Module):
8 | """MLP encoder + convolutional decoder proposed in "Differentiable Compound Optics and Processing Pipeline Optimization for End-To-end Camera Design". This network suits for high-k intensity/amplitude PSF function prediction.
9 |
10 | Input:
11 | in_features (int): Input features, shape of [batch_size, in_features].
12 | ks (int): The size of the output image.
13 | channels (int): The number of output channels. Defaults to 3.
14 | activation (str): The activation function. Defaults to 'relu'.
15 |
16 | Output:
17 | x (Tensor): The output image. Shape of [batch_size, channels, ks, ks].
18 | """
19 |
20 | def __init__(self, in_features, ks, channels=3, activation="relu"):
21 | super(MLPConv, self).__init__()
22 |
23 | self.ks_mlp = min(ks, 32)
24 | if ks > 32:
25 | assert ks % 32 == 0, "ks must be 32n"
26 | upsample_times = int(math.log(ks / 32, 2))
27 |
28 | linear_output = channels * self.ks_mlp**2
29 | self.ks = ks
30 | self.channels = channels
31 |
32 | # MLP encoder
33 | self.encoder = nn.Sequential(
34 | nn.Linear(in_features, 256),
35 | nn.ReLU(),
36 | nn.Linear(256, 256),
37 | nn.ReLU(),
38 | nn.Linear(256, 512),
39 | nn.ReLU(),
40 | nn.Linear(512, linear_output),
41 | )
42 |
43 | # Conv decoder
44 | conv_layers = []
45 | conv_layers.append(
46 | nn.ConvTranspose2d(channels, 64, kernel_size=3, stride=1, padding=1)
47 | )
48 | conv_layers.append(nn.ReLU())
49 | for _ in range(upsample_times):
50 | conv_layers.append(
51 | nn.ConvTranspose2d(64, 64, kernel_size=3, stride=1, padding=1)
52 | )
53 | conv_layers.append(nn.ReLU())
54 | conv_layers.append(nn.Upsample(scale_factor=2))
55 |
56 | conv_layers.append(
57 | nn.ConvTranspose2d(64, 64, kernel_size=3, stride=1, padding=1)
58 | )
59 | conv_layers.append(nn.ReLU())
60 | conv_layers.append(
61 | nn.ConvTranspose2d(64, channels, kernel_size=3, stride=1, padding=1)
62 | )
63 | self.decoder = nn.Sequential(*conv_layers)
64 |
65 | if activation == "relu":
66 | self.activation = nn.ReLU()
67 | elif activation == "sigmoid":
68 | self.activation = nn.Sigmoid()
69 |
70 | def forward(self, x):
71 | # Encode the input using the MLP
72 | encoded = self.encoder(x)
73 |
74 | # Reshape the output from the MLP to feed to the CNN
75 | decoded_input = encoded.view(
76 | -1, self.channels, self.ks_mlp, self.ks_mlp
77 | ) # reshape to (batch_size, channels, height, width)
78 |
79 | # Decode the output using the CNN
80 | decoded = self.decoder(decoded_input)
81 | # decoded = self.activation(decoded)
82 |
83 | # This normalization only works for PSF network
84 | decoded = F.normalize(decoded, p=1, dim=[-1, -2])
85 |
86 | return decoded
87 |
--------------------------------------------------------------------------------
/deeplens/network/surrogate/modulate_siren.py:
--------------------------------------------------------------------------------
1 | import math
2 | from os.path import exists
3 |
4 | import numpy as np
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | from einops import rearrange
9 |
10 |
11 | class ModulateSiren(nn.Module):
12 | def __init__(
13 | self,
14 | dim_in,
15 | dim_hidden,
16 | dim_out,
17 | dim_latent,
18 | num_layers,
19 | image_width,
20 | image_height,
21 | w0=1.0,
22 | w0_initial=30.0,
23 | use_bias=True,
24 | final_activation=None,
25 | outermost_linear=True,
26 | ):
27 | super().__init__()
28 | self.num_layers = num_layers
29 | self.dim_hidden = dim_hidden
30 | self.img_width = image_width
31 | self.img_height = image_height
32 |
33 | # ==> Synthesizer
34 | synthesizer_layers = nn.ModuleList([])
35 | for ind in range(num_layers):
36 | is_first = ind == 0
37 | layer_w0 = w0_initial if is_first else w0
38 | layer_dim_in = dim_in if is_first else dim_hidden
39 |
40 | synthesizer_layers.append(
41 | SineLayer(
42 | in_features=layer_dim_in,
43 | out_features=dim_hidden,
44 | omega_0=layer_w0,
45 | bias=use_bias,
46 | is_first=is_first,
47 | )
48 | )
49 |
50 | if outermost_linear:
51 | last_layer = nn.Linear(dim_hidden, dim_out)
52 | with torch.no_grad():
53 | # w_std = math.sqrt(6 / dim_hidden) / w0
54 | # self.last_layer.weight.uniform_(- w_std, w_std)
55 | nn.init.kaiming_normal_(
56 | last_layer.weight, a=0.0, nonlinearity="relu", mode="fan_in"
57 | )
58 | else:
59 | final_activation = (
60 | nn.Identity() if not exists(final_activation) else final_activation
61 | )
62 | last_layer = Siren(
63 | dim_in=dim_hidden,
64 | dim_out=dim_out,
65 | w0=w0,
66 | use_bias=use_bias,
67 | activation=final_activation,
68 | )
69 | synthesizer_layers.append(last_layer)
70 |
71 | self.synthesizer = synthesizer_layers
72 | # self.synthesizer = nn.Sequential(*synthesizer)
73 |
74 | # ==> Modulator
75 | modulator_layers = nn.ModuleList([])
76 | for ind in range(num_layers):
77 | is_first = ind == 0
78 | dim = dim_latent if is_first else (dim_hidden + dim_latent)
79 |
80 | modulator_layers.append(
81 | nn.Sequential(nn.Linear(dim, dim_hidden), nn.ReLU())
82 | )
83 |
84 | with torch.no_grad():
85 | # self.layers[-1][0].weight.uniform_(-1 / dim_hidden, 1 / dim_hidden)
86 | nn.init.kaiming_normal_(
87 | modulator_layers[-1][0].weight,
88 | a=0.0,
89 | nonlinearity="relu",
90 | mode="fan_in",
91 | )
92 |
93 | self.modulator = modulator_layers
94 | # self.modulator = nn.Sequential(*modulator_layers)
95 |
96 | # ==> Positions
97 | tensors = [
98 | torch.linspace(-1, 1, steps=image_height),
99 | torch.linspace(-1, 1, steps=image_width),
100 | ]
101 | mgrid = torch.stack(torch.meshgrid(*tensors, indexing="ij"), dim=-1)
102 | mgrid = rearrange(mgrid, "h w c -> (h w) c")
103 | self.register_buffer("grid", mgrid)
104 |
105 | def forward(self, latent):
106 | x = self.grid.clone().detach().requires_grad_()
107 |
108 | for i in range(self.num_layers):
109 | if i == 0:
110 | z = self.modulator[i](latent)
111 | else:
112 | z = self.modulator[i](torch.cat((latent, z), dim=-1))
113 |
114 | x = self.synthesizer[i](x)
115 | x = x * z
116 |
117 | x = self.synthesizer[-1](x) # shape of (h*w, 1)
118 | x = torch.tanh(x)
119 | x = x.view(
120 | -1, self.img_height, self.img_width, 1
121 | ) # reshape to (batch_size, height, width, channels)
122 | x = x.permute(0, 3, 1, 2) # reshape to (batch_size, channels, height, width)
123 | return x
124 |
125 |
126 | class SineLayer(nn.Module):
127 | def __init__(
128 | self, in_features, out_features, bias=True, is_first=False, omega_0=30
129 | ):
130 | super().__init__()
131 | self.omega_0 = omega_0
132 | self.is_first = is_first
133 |
134 | self.in_features = in_features
135 | self.linear = nn.Linear(in_features, out_features, bias=bias)
136 |
137 | self.init_weights()
138 |
139 | def init_weights(self):
140 | with torch.no_grad():
141 | if self.is_first:
142 | self.linear.weight.uniform_(-1 / self.in_features, 1 / self.in_features)
143 | else:
144 | self.linear.weight.uniform_(
145 | -np.sqrt(6 / self.in_features) / self.omega_0,
146 | np.sqrt(6 / self.in_features) / self.omega_0,
147 | )
148 |
149 | def forward(self, input):
150 | return torch.sin(self.omega_0 * self.linear(input))
151 |
152 |
153 | class Siren(nn.Module):
154 | def __init__(
155 | self,
156 | dim_in,
157 | dim_out,
158 | w0=1.0,
159 | c=6.0,
160 | is_first=False,
161 | use_bias=True,
162 | activation=None,
163 | ):
164 | super().__init__()
165 | self.dim_in = dim_in
166 | self.is_first = is_first
167 |
168 | weight = torch.zeros(dim_out, dim_in)
169 | bias = torch.zeros(dim_out) if use_bias else None
170 | self.init_(weight, bias, c=c, w0=w0)
171 |
172 | self.weight = nn.Parameter(weight)
173 | self.bias = nn.Parameter(bias) if use_bias else None
174 | self.activation = Sine(w0) if activation is None else activation
175 |
176 | def init_(self, weight, bias, c, w0):
177 | dim = self.dim_in
178 |
179 | w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
180 | weight.uniform_(-w_std, w_std)
181 |
182 | def forward(self, x):
183 | out = F.linear(x, self.weight, self.bias)
184 | out = self.activation(out)
185 | return out
186 |
187 |
188 | class Sine(nn.Module):
189 | def __init__(self, w0=1.0):
190 | super().__init__()
191 | self.w0 = w0
192 |
193 | def forward(self, x):
194 | return torch.sin(self.w0 * x)
195 |
--------------------------------------------------------------------------------
/deeplens/network/surrogate/siren.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 | class Siren(nn.Module):
9 | def __init__(
10 | self,
11 | dim_in,
12 | dim_out,
13 | w0=1.0,
14 | c=6.0,
15 | is_first=False,
16 | use_bias=True,
17 | activation=None,
18 | ):
19 | super().__init__()
20 | self.dim_in = dim_in
21 | self.is_first = is_first
22 |
23 | weight = torch.zeros(dim_out, dim_in)
24 | bias = torch.zeros(dim_out) if use_bias else None
25 | self.init_(weight, bias, c=c, w0=w0)
26 |
27 | self.weight = nn.Parameter(weight)
28 | self.bias = nn.Parameter(bias) if use_bias else None
29 | self.activation = Sine(w0) if activation is None else activation
30 |
31 | def init_(self, weight, bias, c, w0):
32 | dim = self.dim_in
33 |
34 | w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
35 | weight.uniform_(-w_std, w_std)
36 |
37 | def forward(self, x):
38 | out = F.linear(x, self.weight, self.bias)
39 | out = self.activation(out)
40 | return out
41 |
42 |
43 | class Sine(nn.Module):
44 | def __init__(self, w0=1.0):
45 | super().__init__()
46 | self.w0 = w0
47 |
48 | def forward(self, x):
49 | return torch.sin(self.w0 * x)
50 |
--------------------------------------------------------------------------------
/deeplens/optics/__init__.py:
--------------------------------------------------------------------------------
1 | from .basics import *
2 | from .materials import *
3 | from .ray import *
4 | from .surfaces import *
5 | from .surfaces_diffractive import *
6 | from .wave import *
7 |
--------------------------------------------------------------------------------
/deeplens/optics/diffractive_surface/__init__.py:
--------------------------------------------------------------------------------
1 | """Diffractive surface module."""
2 |
3 | from .diffractive import DiffractiveSurface
4 | from .binary2 import Binary2
5 | from .fresnel import Fresnel
6 | from .pixel2d import Pixel2D
7 | from .thinlens import ThinLens
8 | from .zernike import Zernike
9 |
10 | __all__ = ["DOE", "Fresnel", "Pixel2D", "ThinLens", "Zernike", "Binary2"]
--------------------------------------------------------------------------------
/deeplens/optics/diffractive_surface/binary2.py:
--------------------------------------------------------------------------------
1 | """Binary2 DOE parameterization."""
2 |
3 | import torch
4 | from .diffractive import DiffractiveSurface
5 |
6 |
7 | class Binary2(DiffractiveSurface):
8 | def __init__(
9 | self,
10 | d,
11 | res=(2000, 2000),
12 | mat="fused_silica",
13 | wvln0=0.55,
14 | fab_ps=0.001,
15 | device="cpu",
16 | ):
17 | """Initialize Binary DOE."""
18 | super().__init__(
19 | d=d, res=res, mat=mat, wvln0=wvln0, fab_ps=fab_ps, device=device
20 | )
21 |
22 | # Initialize with random small values
23 | self.alpha2 = (torch.rand(1) - 0.5) * 0.02
24 | self.alpha4 = (torch.rand(1) - 0.5) * 0.002
25 | self.alpha6 = (torch.rand(1) - 0.5) * 0.0002
26 | self.alpha8 = (torch.rand(1) - 0.5) * 0.00002
27 | self.alpha10 = (torch.rand(1) - 0.5) * 0.000002
28 |
29 | self.x, self.y = torch.meshgrid(
30 | torch.linspace(-self.w / 2, self.w / 2, self.res[1]),
31 | torch.linspace(self.h / 2, -self.h / 2, self.res[0]),
32 | indexing="xy",
33 | )
34 |
35 | self.to(device)
36 |
37 | @classmethod
38 | def init_from_dict(cls, doe_dict):
39 | """Initialize Binary DOE from a dict."""
40 | d = doe_dict["d"]
41 | res = doe_dict.get("res", (2000, 2000))
42 | fab_ps = doe_dict.get("fab_ps", 0.001)
43 | wvln0 = doe_dict.get("wvln0", 0.55)
44 | mat = doe_dict.get("mat", "fused_silica")
45 | return cls(
46 | d=d,
47 | res=res,
48 | mat=mat,
49 | wvln0=wvln0,
50 | fab_ps=fab_ps,
51 | )
52 |
53 | def _phase_map0(self):
54 | """Get the phase map at design wavelength."""
55 | # Calculate radial distance
56 | r2 = self.x**2 + self.y**2
57 |
58 | # Calculate phase using Binary DOE formula
59 | phase = torch.pi * (
60 | self.alpha2 * r2
61 | + self.alpha4 * r2**2
62 | + self.alpha6 * r2**3
63 | + self.alpha8 * r2**4
64 | + self.alpha10 * r2**5
65 | )
66 |
67 | return phase
68 |
69 | # =======================================
70 | # Optimization
71 | # =======================================
72 | def get_optimizer_params(self, lr=0.001):
73 | """Get parameters for optimization.
74 |
75 | Args:
76 | lr (float): Base learning rate for alpha2. Learning rates for higher-order parameters will be scaled progressively (10x, 100x, 1000x, 10000x).
77 | """
78 | self.alpha2.requires_grad = True
79 | self.alpha4.requires_grad = True
80 | self.alpha6.requires_grad = True
81 | self.alpha8.requires_grad = True
82 | self.alpha10.requires_grad = True
83 |
84 | optimizer_params = [
85 | {"params": [self.alpha2], "lr": lr},
86 | {"params": [self.alpha4], "lr": lr * 10},
87 | {"params": [self.alpha6], "lr": lr * 100},
88 | {"params": [self.alpha8], "lr": lr * 1000},
89 | {"params": [self.alpha10], "lr": lr * 10000},
90 | ]
91 |
92 | return optimizer_params
93 |
94 | # =======================================
95 | # IO
96 | # =======================================
97 | def surf_dict(self):
98 | """Return a dict of surface."""
99 | surf_dict = super().surf_dict()
100 | surf_dict["alpha2"] = round(self.alpha2.item(), 6)
101 | surf_dict["alpha4"] = round(self.alpha4.item(), 6)
102 | surf_dict["alpha6"] = round(self.alpha6.item(), 6)
103 | surf_dict["alpha8"] = round(self.alpha8.item(), 6)
104 | surf_dict["alpha10"] = round(self.alpha10.item(), 6)
105 | return surf_dict
106 |
--------------------------------------------------------------------------------
/deeplens/optics/diffractive_surface/fresnel.py:
--------------------------------------------------------------------------------
1 | """Fresnel DOE. Phase fresnel lens has an inverse dispersion property compared to refractive lens.
2 |
3 | Reference:
4 | [1] https://www.nikonusa.com/learn-and-explore/c/ideas-and-inspiration/phase-fresnel-from-wildlife-photography-to-portraiture
5 | """
6 |
7 | import torch
8 | from .diffractive import DiffractiveSurface
9 |
10 |
11 | class Fresnel(DiffractiveSurface):
12 | def __init__(
13 | self,
14 | d,
15 | f0=None,
16 | wvln0=0.55,
17 | res=(2000, 2000),
18 | mat="fused_silica",
19 | fab_ps=0.001,
20 | device="cpu",
21 | ):
22 | """Initialize Fresnel DOE. A diffractive Fresnel lens shows inverse dispersion property compared to refractive lens.
23 |
24 | Args:
25 | f0 (float): Initial focal length. [mm]
26 | d (float): Distance of the DOE surface. [mm]
27 | res (tuple or int): Resolution of the DOE, [w, h]. [pixel]
28 | wvln0 (float): Design wavelength. [um]
29 | mat (str): Material of the DOE.
30 | fab_ps (float): Fabrication pixel size. [mm]
31 | device (str): Device to run the DOE.
32 | """
33 | super().__init__(
34 | d=d, res=res, wvln0=wvln0, mat=mat, fab_ps=fab_ps, device=device
35 | )
36 |
37 | # Initial focal length
38 | if f0 is None:
39 | self.f0 = torch.randn(1) * 1e6
40 | else:
41 | self.f0 = torch.tensor(f0)
42 |
43 | self.to(device)
44 |
45 | @classmethod
46 | def init_from_dict(cls, doe_dict):
47 | """Initialize Fresnel DOE from a dict."""
48 | d = doe_dict["d"]
49 | res = doe_dict.get("res", (2000, 2000))
50 | fab_ps = doe_dict.get("fab_ps", 0.001)
51 | f0 = doe_dict.get("f0", None)
52 | wvln0 = doe_dict.get("wvln0", 0.55)
53 | mat = doe_dict.get("mat", "fused_silica")
54 | return cls(
55 | d=d,
56 | res=res,
57 | fab_ps=fab_ps,
58 | f0=f0,
59 | wvln0=wvln0,
60 | mat=mat,
61 | )
62 |
63 | def _phase_map0(self):
64 | """Get the phase map at design wavelength."""
65 | wvln0_mm = self.wvln0 * 1e-3
66 | phase = -2 * torch.pi * (self.x**2 + self.y**2) / (2 * self.f0 * wvln0_mm)
67 | return phase
68 |
69 | # =======================================
70 | # Optimization
71 | # =======================================
72 | def get_optimizer_params(self, lr=0.001):
73 | """Get parameters for optimization."""
74 | self.f0.requires_grad = True
75 | optimizer_params = [{"params": [self.f0], "lr": lr}]
76 | return optimizer_params
77 |
78 | # =======================================
79 | # IO
80 | # =======================================
81 | def surf_dict(self):
82 | """Return a dict of surface."""
83 | surf_dict = super().surf_dict()
84 | surf_dict["f0"] = self.f0.item()
85 | surf_dict["wvln0"] = self.wvln0
86 | return surf_dict
87 |
--------------------------------------------------------------------------------
/deeplens/optics/diffractive_surface/pixel2d.py:
--------------------------------------------------------------------------------
1 | """Pixel2D DOE parameterization. Each pixel is an independent parameter."""
2 |
3 | import torch
4 | from .diffractive import DiffractiveSurface
5 |
6 |
7 | class Pixel2D(DiffractiveSurface):
8 | """Pixel2D DOE parameterization - direct phase map representation."""
9 |
10 | def __init__(
11 | self,
12 | d,
13 | phase_map_path=None,
14 | res=(2000, 2000),
15 | mat="fused_silica",
16 | wvln0=0.55,
17 | fab_ps=0.001,
18 | device="cpu",
19 | ):
20 | """Initialize Pixel2D DOE, where each pixel is independent parameter.
21 |
22 | Args:
23 | d (float): Distance of the DOE surface. [mm]
24 | size (tuple or int): Size of the DOE, [w, h]. [mm]
25 | res (tuple or int): Resolution of the DOE, [w, h]. [pixel]
26 | mat (str): Material of the DOE.
27 | fab_ps (float): Fabrication pixel size. [mm]
28 | device (str): Device to run the DOE.
29 | """
30 | super().__init__(d=d, res=res, mat=mat, fab_ps=fab_ps, wvln0=wvln0, device=device)
31 |
32 | # Initialize phase map with random values
33 | if phase_map_path is None:
34 | self.phase_map = torch.randn(self.res, device=self.device) * 1e-3
35 | elif isinstance(phase_map_path, str):
36 | self.phase_map = torch.load(phase_map_path, map_location=device)
37 | else:
38 | raise ValueError(f"Invalid phase_map_path: {phase_map_path}")
39 |
40 | self.to(device)
41 |
42 | @classmethod
43 | def init_from_dict(cls, doe_dict):
44 | """Initialize Pixel2D DOE from a dict."""
45 | d = doe_dict["d"]
46 | res = doe_dict.get("res", (2000, 2000))
47 | fab_ps = doe_dict.get("fab_ps", 0.001)
48 | phase_map_path = doe_dict.get("phase_map_path", None)
49 | wvln0 = doe_dict.get("wvln0", 0.55)
50 | mat = doe_dict.get("mat", "fused_silica")
51 | return cls(
52 | d=d,
53 | res=res,
54 | mat=mat,
55 | fab_ps=fab_ps,
56 | phase_map_path=phase_map_path,
57 | wvln0=wvln0,
58 | )
59 |
60 | def _phase_map0(self):
61 | """Get the phase map at design wavelength."""
62 | return self.phase_map
63 |
64 | # =======================================
65 | # Optimization
66 | # =======================================
67 | def get_optimizer_params(self, lr=0.01):
68 | """Get parameters for optimization."""
69 | self.phase_map.requires_grad = True
70 | optimizer_params = [{"params": [self.phase_map], "lr": lr}]
71 | return optimizer_params
72 |
73 | # =======================================
74 | # IO
75 | # =======================================
76 | def surf_dict(self, phase_map_path):
77 | """Return a dict of surface."""
78 | surf_dict = super().surf_dict()
79 | surf_dict["phase_map_path"] = phase_map_path
80 | torch.save(self.phase_map.clone().detach().cpu(), phase_map_path)
81 | return surf_dict
82 |
--------------------------------------------------------------------------------
/deeplens/optics/diffractive_surface/thinlens.py:
--------------------------------------------------------------------------------
1 | """An ideal thin lens without any chromatic aberration."""
2 |
3 | import torch
4 | import torch.nn.functional as F
5 | from .diffractive import DiffractiveSurface
6 |
7 |
8 | class ThinLens(DiffractiveSurface):
9 | def __init__(
10 | self,
11 | d,
12 | f0=None,
13 | res=(2000, 2000),
14 | mat="fused_silica",
15 | fab_ps=0.001,
16 | device="cpu",
17 | ):
18 | """Initialize a thin lens. A thin lens focuses all wavelengths to the same point.
19 |
20 | Args:
21 | d (float): Distance of the DOE surface. [mm]
22 | f0 (float): Initial focal length. [mm]
23 | res (tuple or int): Resolution of the DOE, [w, h]. [pixel]
24 | mat (str): Material of the DOE.
25 | fab_ps (float): Fabrication pixel size. [mm]
26 | device (str): Device to run the DOE.
27 | """
28 | super().__init__(d=d, res=res, mat=mat, fab_ps=fab_ps, device=device)
29 |
30 | # Initial focal length
31 | if f0 is None:
32 | self.f0 = (
33 | torch.randn(1, device=self.device) * 1e6
34 | ) # [mm], initial a very large focal length
35 | else:
36 | self.f0 = torch.tensor(f0, device=self.device)
37 |
38 | self.to(device)
39 |
40 | @classmethod
41 | def init_from_dict(cls, doe_dict):
42 | """Initialize a thin lens from a dict."""
43 | d = doe_dict["d"]
44 | f0 = doe_dict.get("f0", None)
45 | res = doe_dict.get("res", (2000, 2000))
46 | mat = doe_dict.get("mat", "fused_silica")
47 | fab_ps = doe_dict.get("fab_ps", 0.001)
48 | return cls(
49 | d=d,
50 | f0=f0,
51 | res=res,
52 | mat=mat,
53 | fab_ps=fab_ps,
54 | )
55 |
56 | def get_phase_map(self, wvln=0.55):
57 | """Get the phase map at the given wavelength."""
58 |
59 | # Same focal length for all wavelengths
60 | wvln_mm = wvln * 1e-3
61 | phase_map = -2 * torch.pi * (self.x**2 + self.y**2) / (2 * self.f0 * wvln_mm)
62 | phase_map = torch.remainder(phase_map, 2 * torch.pi)
63 |
64 | # Interpolate to the desired resolution
65 | phase_map = (
66 | F.interpolate(
67 | phase_map.unsqueeze(0).unsqueeze(0), size=self.res, mode="nearest"
68 | )
69 | .squeeze(0)
70 | .squeeze(0)
71 | )
72 |
73 | return phase_map
74 |
75 | # =======================================
76 | # Optimization
77 | # =======================================
78 | def get_optimizer_params(self, lr=0.1):
79 | """Get parameters for optimization."""
80 | self.f0.requires_grad = True
81 | optimizer_params = [{"params": [self.f0], "lr": lr}]
82 | return optimizer_params
83 |
84 | # =======================================
85 | # IO
86 | # =======================================
87 | def surf_dict(self):
88 | """Return a dict of surface."""
89 | surf_dict = super().surf_dict()
90 | surf_dict["f0"] = self.f0.item()
91 | return surf_dict
92 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import Surface
2 |
3 | from .aperture import Aperture
4 | from .aspheric import Aspheric
5 | from .cubic import Cubic
6 | from .phase import Phase
7 | from .mirror import Mirror
8 | from .plane import Plane
9 | from .spheric import Spheric
10 | from .thinlens import ThinLens
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/aperture.py:
--------------------------------------------------------------------------------
1 | """Aperture surface."""
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from .base import Surface
7 |
8 |
9 | class Aperture(Surface):
10 | def __init__(self, r, d, diffraction=False, device="cpu"):
11 | """Aperture surface."""
12 | Surface.__init__(self, r, d, mat2="air", is_square=False, device=device)
13 | self.diffraction = diffraction
14 | self.to(device)
15 |
16 | @classmethod
17 | def init_from_dict(cls, surf_dict):
18 | if "diffraction" in surf_dict:
19 | diffraction = surf_dict["diffraction"]
20 | else:
21 | diffraction = False
22 | return cls(surf_dict["r"], surf_dict["d"], diffraction)
23 |
24 | def ray_reaction(self, ray, n1=1.0, n2=1.0, refraction=False):
25 | """Compute output ray after intersection and refraction."""
26 | # Intersection
27 | t = (self.d - ray.o[..., 2]) / ray.d[..., 2]
28 | new_o = ray.o + t.unsqueeze(-1) * ray.d
29 | valid = (torch.sqrt(new_o[..., 0] ** 2 + new_o[..., 1] ** 2) <= self.r) & (
30 | ray.valid > 0
31 | )
32 |
33 | # Update position
34 | new_o[~valid] = ray.o[~valid]
35 | ray.o = new_o
36 | ray.valid = ray.valid * valid
37 |
38 | # Update phase
39 | if ray.coherent:
40 | new_opl = ray.opl + t
41 | new_opl[~valid] = ray.opl[~valid]
42 | ray.opl = new_opl
43 |
44 | # Diffraction
45 | if self.diffraction:
46 | raise Exception("Diffraction is not implemented for aperture.")
47 |
48 | return ray
49 |
50 | def _sag(self, x, y):
51 | """Compute surface height (always zero for aperture)."""
52 | return torch.zeros_like(x)
53 |
54 | # =======================================
55 | # Visualization
56 | # =======================================
57 | def draw_widget(self, ax, color="orange", linestyle="solid"):
58 | """Draw aperture wedge on the figure."""
59 | d = self.d.item()
60 | aper_wedge_l = 0.05 * self.r # [mm]
61 | aper_wedge_h = 0.15 * self.r # [mm]
62 |
63 | # Parallel edges
64 | z = np.linspace(d - aper_wedge_l, d + aper_wedge_l, 3)
65 | x = -self.r * np.ones(3)
66 | ax.plot(z, x, color=color, linestyle=linestyle, linewidth=0.8)
67 | x = self.r * np.ones(3)
68 | ax.plot(z, x, color=color, linestyle=linestyle, linewidth=0.8)
69 |
70 | # Vertical edges
71 | z = d * np.ones(3)
72 | x = np.linspace(self.r, self.r + aper_wedge_h, 3)
73 | ax.plot(z, x, color=color, linestyle=linestyle, linewidth=0.8)
74 | x = np.linspace(-self.r - aper_wedge_h, -self.r, 3)
75 | ax.plot(z, x, color=color, linestyle=linestyle, linewidth=0.8)
76 |
77 | def draw_widget3D(self, ax, color="black"):
78 | """Draw the aperture as a circle in a 3D plot."""
79 | # Draw the edge circle
80 | theta = np.linspace(0, 2 * np.pi, 100)
81 | edge_x = self.r * np.cos(theta)
82 | edge_y = self.r * np.sin(theta)
83 | edge_z = np.full_like(edge_x, self.d.item()) # Constant z at aperture position
84 |
85 | # Plot the edge circle
86 | line = ax.plot(edge_z, edge_x, edge_y, color=color, linewidth=1.5)
87 |
88 | return line
89 |
90 | # =========================================
91 | # Optimization
92 | # =========================================
93 | def get_optimizer_params(self, lr=[0.001, 0.001], optim_mat=False):
94 | """Activate gradient computation for d and return optimizer parameters."""
95 | self.d.requires_grad_(True)
96 |
97 | params = []
98 | params.append({"params": [self.d], "lr": lr[1]})
99 |
100 | return params
101 |
102 | # =======================================
103 | # IO
104 | # =======================================
105 | def surf_dict(self):
106 | """Dict of surface parameters."""
107 | surf_dict = {
108 | "type": "Aperture",
109 | "r": round(self.r, 4),
110 | "(d)": round(self.d.item(), 4),
111 | "mat2": "air",
112 | "is_square": self.is_square,
113 | "diffraction": self.diffraction,
114 | }
115 | return surf_dict
116 |
117 | def zmx_str(self, surf_idx, d_next):
118 | """Zemax surface string."""
119 | zmx_str = f"""SURF {surf_idx}
120 | STOP
121 | TYPE STANDARD
122 | CURV 0.0
123 | DISZ {d_next.item()}
124 | """
125 | return zmx_str
126 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/cubic.py:
--------------------------------------------------------------------------------
1 | """Cubic surface.
2 |
3 | Typical equation: z(x,y) = b3 * (x^3 + y^3)
4 | """
5 | import numpy as np
6 | import torch
7 |
8 | from .base import Surface
9 |
10 |
11 | class Cubic(Surface):
12 | def __init__(self, r, d, b, mat2, is_square=False, device="cpu"):
13 | Surface.__init__(self, r, d, mat2, is_square=is_square, device=device)
14 | self.b = torch.tensor(b)
15 |
16 | if len(b) == 1:
17 | self.b3 = torch.tensor(b[0])
18 | self.b_degree = 1
19 | elif len(b) == 2:
20 | self.b3 = torch.tensor(b[0])
21 | self.b5 = torch.tensor(b[1])
22 | self.b_degree = 2
23 | elif len(b) == 3:
24 | self.b3 = torch.tensor(b[0])
25 | self.b5 = torch.tensor(b[1])
26 | self.b7 = torch.tensor(b[2])
27 | self.b_degree = 3
28 | else:
29 | raise ValueError("Unsupported cubic degree!")
30 |
31 | self.rotate_angle = 0.0
32 | self.to(device)
33 |
34 | @classmethod
35 | def init_from_dict(cls, surf_dict):
36 | return cls(surf_dict["r"], surf_dict["d"], surf_dict["b"], surf_dict["mat2"])
37 |
38 | def _sag(self, x, y):
39 | """Compute surface height z(x, y)."""
40 | if self.rotate_angle != 0:
41 | x = x * float(np.cos(self.rotate_angle)) - y * float(
42 | np.sin(self.rotate_angle)
43 | )
44 | y = x * float(np.sin(self.rotate_angle)) + y * float(
45 | np.cos(self.rotate_angle)
46 | )
47 |
48 | if self.b_degree == 1:
49 | z = self.b3 * (x**3 + y**3)
50 | elif self.b_degree == 2:
51 | z = self.b3 * (x**3 + y**3) + self.b5 * (x**5 + y**5)
52 | elif self.b_degree == 3:
53 | z = (
54 | self.b3 * (x**3 + y**3)
55 | + self.b5 * (x**5 + y**5)
56 | + self.b7 * (x**7 + y**7)
57 | )
58 | else:
59 | raise ValueError("Unsupported cubic degree!")
60 |
61 | if len(z.size()) == 0:
62 | z = torch.tensor(z).to(self.device)
63 |
64 | if self.rotate_angle != 0:
65 | x = x * float(np.cos(self.rotate_angle)) + y * float(
66 | np.sin(self.rotate_angle)
67 | )
68 | y = -x * float(np.sin(self.rotate_angle)) + y * float(
69 | np.cos(self.rotate_angle)
70 | )
71 |
72 | return z
73 |
74 | def _dfdxy(self, x, y):
75 | """Compute surface height derivatives to x and y."""
76 | if self.rotate_angle != 0:
77 | x = x * float(np.cos(self.rotate_angle)) - y * float(
78 | np.sin(self.rotate_angle)
79 | )
80 | y = x * float(np.sin(self.rotate_angle)) + y * float(
81 | np.cos(self.rotate_angle)
82 | )
83 |
84 | if self.b_degree == 1:
85 | sx = 3 * self.b3 * x**2
86 | sy = 3 * self.b3 * y**2
87 | elif self.b_degree == 2:
88 | sx = 3 * self.b3 * x**2 + 5 * self.b5 * x**4
89 | sy = 3 * self.b3 * y**2 + 5 * self.b5 * y**4
90 | elif self.b_degree == 3:
91 | sx = 3 * self.b3 * x**2 + 5 * self.b5 * x**4 + 7 * self.b7 * x**6
92 | sy = 3 * self.b3 * y**2 + 5 * self.b5 * y**4 + 7 * self.b7 * y**6
93 | else:
94 | raise ValueError("Unsupported cubic degree!")
95 |
96 | if self.rotate_angle != 0:
97 | x = x * float(np.cos(self.rotate_angle)) + y * float(
98 | np.sin(self.rotate_angle)
99 | )
100 | y = -x * float(np.sin(self.rotate_angle)) + y * float(
101 | np.cos(self.rotate_angle)
102 | )
103 |
104 | return sx, sy
105 |
106 | def get_optimizer_params(self, lr, optim_mat=False):
107 | """Return parameters for optimizer."""
108 | params = []
109 |
110 | self.d.requires_grad_(True)
111 | params.append({"params": [self.d], "lr": lr})
112 |
113 | if self.b_degree == 1:
114 | self.b3.requires_grad_(True)
115 | params.append({"params": [self.b3], "lr": lr})
116 | elif self.b_degree == 2:
117 | self.b3.requires_grad_(True)
118 | self.b5.requires_grad_(True)
119 | params.append({"params": [self.b3], "lr": lr})
120 | params.append({"params": [self.b5], "lr": lr * 0.1})
121 | elif self.b_degree == 3:
122 | self.b3.requires_grad_(True)
123 | self.b5.requires_grad_(True)
124 | self.b7.requires_grad_(True)
125 | params.append({"params": [self.b3], "lr": lr})
126 | params.append({"params": [self.b5], "lr": lr * 0.1})
127 | params.append({"params": [self.b7], "lr": lr * 0.01})
128 | else:
129 | raise ValueError("Unsupported cubic degree!")
130 |
131 | if optim_mat and self.mat2.get_name() != "air":
132 | params += self.mat2.get_optimizer_params()
133 |
134 | return params
135 |
136 |
137 | # =========================================
138 | # Manufacturing
139 | # =========================================
140 | def perturb(self, tolerance):
141 | """Perturb the surface"""
142 | self.r_offset = np.random.randn() * tolerance.get("r", 0.001)
143 | if self.d != 0:
144 | self.d_offset = np.random.randn() * tolerance.get("d", 0.0005)
145 |
146 | if self.b_degree == 1:
147 | self.b3_offset = np.random.randn() * tolerance.get("b3", 0.001)
148 | elif self.b_degree == 2:
149 | self.b3_offset = np.random.randn() * tolerance.get("b3", 0.001)
150 | self.b5_offset = np.random.randn() * tolerance.get("b5", 0.001)
151 | elif self.b_degree == 3:
152 | self.b3_offset = np.random.randn() * tolerance.get("b3", 0.001)
153 | self.b5_offset = np.random.randn() * tolerance.get("b5", 0.001)
154 | self.b7_offset = np.random.randn() * tolerance.get("b7", 0.001)
155 |
156 | self.rotate_angle = np.random.randn() * tolerance.get("angle", 0.01)
157 |
158 |
159 | # =========================================
160 | # IO
161 | # =========================================
162 | def surf_dict(self):
163 | """Return surface parameters."""
164 | return {
165 | "type": "Cubic",
166 | "b3": self.b3.item(),
167 | "b5": self.b5.item(),
168 | "b7": self.b7.item(),
169 | "r": self.r,
170 | "(d)": round(self.d.item(), 4),
171 | }
172 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/mirror.py:
--------------------------------------------------------------------------------
1 | """Mirror surface."""
2 | import numpy as np
3 | import torch
4 |
5 | from .base import Surface
6 |
7 |
8 | class Mirror(Surface):
9 | def __init__(self, l, d, device="cpu"):
10 | """Mirror surface."""
11 | Surface.__init__(
12 | self, l / np.sqrt(2), d, mat2="air", is_square=True, device=device
13 | )
14 | self.l = l
15 |
16 | @classmethod
17 | def init_from_dict(cls, surf_dict):
18 | return cls(surf_dict["l"], surf_dict["d"], surf_dict["mat2"])
19 |
20 | def intersect(self, ray, **kwargs):
21 | # Solve intersection
22 | t = (self.d - ray.o[..., 2]) / ray.d[..., 2]
23 | new_o = ray.o + t.unsqueeze(-1) * ray.d
24 | valid = (
25 | (torch.abs(new_o[..., 0]) < self.w / 2)
26 | & (torch.abs(new_o[..., 1]) < self.h / 2)
27 | & (ray.valid > 0)
28 | )
29 |
30 | # Update ray position
31 | new_o = ray.o + ray.d * t.unsqueeze(-1)
32 |
33 | new_o[~valid] = ray.o[~valid]
34 | ray.o = new_o
35 | ray.valid = ray.valid * valid
36 |
37 | if ray.coherent:
38 | new_opl = ray.opl + 1.0 * t
39 | new_opl[~valid] = ray.opl[~valid]
40 | ray.opl = new_opl
41 |
42 | return ray
43 |
44 | def ray_reaction(self, ray, **kwargs):
45 | """Compute output ray after intersection and refraction with the mirror surface."""
46 | # Intersection
47 | ray = self.intersect(ray)
48 |
49 | # Reflection
50 | ray = self.reflect(ray)
51 |
52 | return ray
53 |
54 | # =========================================
55 | # IO
56 | # =========================================
57 | def surf_dict(self):
58 | """Return surface parameters."""
59 | surf_dict = {
60 | "type": self.__class__.__name__,
61 | "l": self.l,
62 | "d": self.d,
63 | "mat2": self.mat2.get_name(),
64 | }
65 | return surf_dict
66 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/plane.py:
--------------------------------------------------------------------------------
1 | """Plane surface, typically rectangle. Working as IR filter, lens cover glass or DOE base."""
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from .base import Surface
7 |
8 |
9 | class Plane(Surface):
10 | def __init__(self, r, d, mat2, is_square=False, device="cpu"):
11 | """Plane surface, typically rectangle. Working as IR filter, lens cover glass or DOE base."""
12 | Surface.__init__(self, r, d, mat2=mat2, is_square=is_square, device=device)
13 | self.l = r * np.sqrt(2)
14 |
15 | @classmethod
16 | def init_from_dict(cls, surf_dict):
17 | return cls(surf_dict["r"], surf_dict["d"], surf_dict["mat2"])
18 |
19 | def intersect(self, ray, n=1.0):
20 | """Solve ray-surface intersection and update ray data."""
21 | # Solve intersection
22 | t = (self.d - ray.o[..., 2]) / ray.d[..., 2]
23 | new_o = ray.o + t.unsqueeze(-1) * ray.d
24 | if self.is_square:
25 | valid = (
26 | (torch.abs(new_o[..., 0]) < self.w / 2)
27 | & (torch.abs(new_o[..., 1]) < self.h / 2)
28 | & (ray.valid > 0)
29 | )
30 | else:
31 | valid = (torch.sqrt(new_o[..., 0] ** 2 + new_o[..., 1] ** 2) < self.r) & (
32 | ray.valid > 0
33 | )
34 |
35 | # Update rays
36 | new_o = ray.o + ray.d * t.unsqueeze(-1)
37 |
38 | new_o[~valid] = ray.o[~valid]
39 | ray.o = new_o
40 | ray.valid = ray.valid * valid
41 |
42 | if ray.coherent:
43 | new_opl = ray.opl + n * t
44 | new_opl[~valid] = ray.opl[~valid]
45 | ray.opl = new_opl
46 |
47 | return ray
48 |
49 | def normal_vec(self, ray):
50 | """Calculate surface normal vector at intersection points.
51 |
52 | Normal vector points from the surface toward the side where the light is coming from.
53 | """
54 | normal_vec = torch.zeros_like(ray.d)
55 | normal_vec[..., 2] = -1
56 | normal_vec = torch.where(ray.is_forward, normal_vec, -normal_vec)
57 | return normal_vec
58 |
59 | def _sag(self, x, y):
60 | return torch.zeros_like(x)
61 |
62 | def _dfdxy(self, x, y):
63 | return torch.zeros_like(x), torch.zeros_like(x)
64 |
65 | def _d2fdxy(self, x, y):
66 | return torch.zeros_like(x), torch.zeros_like(x), torch.zeros_like(x)
67 |
68 | # =========================================
69 | # IO
70 | # =========================================
71 | def surf_dict(self):
72 | surf_dict = {
73 | "type": "Plane",
74 | "(l)": self.l,
75 | "r": self.r,
76 | "(d)": round(self.d.item(), 4),
77 | "is_square": True,
78 | "mat2": self.mat2.get_name(),
79 | }
80 |
81 | return surf_dict
82 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/spheric.py:
--------------------------------------------------------------------------------
1 | """Spheric surface."""
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from .base import EPSILON, Surface
7 |
8 |
9 | class Spheric(Surface):
10 | def __init__(self, c, r, d, mat2, device="cpu"):
11 | super(Spheric, self).__init__(r, d, mat2, is_square=False, device=device)
12 | self.c = torch.tensor(c)
13 |
14 | self.c_perturb = 0.0
15 | self.d_perturb = 0.0
16 | self.to(device)
17 |
18 | @classmethod
19 | def init_from_dict(cls, surf_dict):
20 | if "roc" in surf_dict and surf_dict["roc"] != 0:
21 | c = 1 / surf_dict["roc"]
22 | else:
23 | c = surf_dict["c"]
24 | return cls(c, surf_dict["r"], surf_dict["d"], surf_dict["mat2"])
25 |
26 | def _sag(self, x, y):
27 | """Compute surfaces sag z = r**2 * c / (1 - sqrt(1 - r**2 * c**2))"""
28 | c = self.c + self.c_perturb
29 |
30 | r2 = x**2 + y**2
31 | sag = c * r2 / (1 + torch.sqrt(1 - r2 * c**2))
32 | return sag
33 |
34 | def _dfdxy(self, x, y):
35 | """Compute surface sag derivatives to x and y: dz / dx, dz / dy."""
36 | c = self.c + self.c_perturb
37 |
38 | r2 = x**2 + y**2
39 | sf = torch.sqrt(1 - r2 * c**2 + EPSILON)
40 | dfdr2 = c / (2 * sf)
41 |
42 | dfdx = dfdr2 * 2 * x
43 | dfdy = dfdr2 * 2 * y
44 |
45 | return dfdx, dfdy
46 |
47 | def _d2fdxy(self, x, y):
48 | """Compute second-order derivatives of the surface sag z = sag(x, y).
49 |
50 | Args:
51 | x (tensor): x coordinate
52 | y (tensor): y coordinate
53 |
54 | Returns:
55 | d2f_dx2 (tensor): ∂²f / ∂x²
56 | d2f_dxdy (tensor): ∂²f / ∂x∂y
57 | d2f_dy2 (tensor): ∂²f / ∂y²
58 | """
59 | c = self.c + self.c_perturb
60 | r2 = x**2 + y**2
61 | sf = torch.sqrt(1 - r2 * c**2 + EPSILON)
62 |
63 | # First derivative (df/dr2)
64 | dfdr2 = c / (2 * sf)
65 |
66 | # Second derivative (d²f/dr2²)
67 | d2f_dr2_dr2 = (c**3) / (4 * sf**3)
68 |
69 | # Compute second-order partial derivatives using the chain rule
70 | d2f_dx2 = 4 * x**2 * d2f_dr2_dr2 + 2 * dfdr2
71 | d2f_dxdy = 4 * x * y * d2f_dr2_dr2
72 | d2f_dy2 = 4 * y**2 * d2f_dr2_dr2 + 2 * dfdr2
73 |
74 | return d2f_dx2, d2f_dxdy, d2f_dy2
75 |
76 | def is_within_data_range(self, x, y):
77 | """Invalid when shape is non-defined."""
78 | c = self.c + self.c_perturb
79 |
80 | valid = (x**2 + y**2) < 1 / c**2
81 | return valid
82 |
83 | def max_height(self):
84 | """Maximum valid height."""
85 | c = self.c + self.c_perturb
86 |
87 | max_height = torch.sqrt(1 / c**2).item() - 0.01
88 | return max_height
89 |
90 | # =========================================
91 | # Manufacturing
92 | # =========================================
93 | def perturb(self, tolerance):
94 | """Randomly perturb surface parameters to simulate manufacturing errors."""
95 | self.r_offset = np.random.randn() * tolerance.get("r", 0.001)
96 | self.d_offset = np.random.randn() * tolerance.get("d", 0.001)
97 |
98 | # =========================================
99 | # Optimization
100 | # =========================================
101 | def get_optimizer_params(self, lr=[0.001, 0.001], optim_mat=False):
102 | """Activate gradient computation for c and d and return optimizer parameters."""
103 | self.c.requires_grad_(True)
104 | self.d.requires_grad_(True)
105 |
106 | params = []
107 | params.append({"params": [self.c], "lr": lr[0]})
108 | params.append({"params": [self.d], "lr": lr[1]})
109 |
110 | if optim_mat and self.mat2.get_name() != "air":
111 | params += self.mat2.get_optimizer_params()
112 |
113 | return params
114 |
115 | # =========================================
116 | # IO
117 | # =========================================
118 | def surf_dict(self):
119 | """Return surface parameters."""
120 | roc = 1 / self.c.item() if self.c.item() != 0 else 0.0
121 | surf_dict = {
122 | "type": "Spheric",
123 | "r": round(self.r, 4),
124 | "(c)": round(self.c.item(), 4),
125 | "roc": round(roc, 4),
126 | "(d)": round(self.d.item(), 4),
127 | "mat2": self.mat2.get_name(),
128 | }
129 |
130 | return surf_dict
131 |
132 | def zmx_str(self, surf_idx, d_next):
133 | """Return Zemax surface string."""
134 | if self.mat2.get_name() == "air":
135 | zmx_str = f"""SURF {surf_idx}
136 | TYPE STANDARD
137 | CURV {self.c.item()}
138 | DISZ {d_next.item()}
139 | DIAM {self.r} 1 0 0 1 ""
140 | """
141 | else:
142 | zmx_str = f"""SURF {surf_idx}
143 | TYPE STANDARD
144 | CURV {self.c.item()}
145 | DISZ {d_next.item()}
146 | GLAS ___BLANK 1 0 {self.mat2.n} {self.mat2.V}
147 | DIAM {self.r} 1 0 0 1 ""
148 | """
149 | return zmx_str
150 |
--------------------------------------------------------------------------------
/deeplens/optics/geometric_surface/thinlens.py:
--------------------------------------------------------------------------------
1 | """Thin lens element. Both sides are in air."""
2 |
3 | import torch
4 | import torch.nn.functional as F
5 |
6 | from .base import Surface
7 |
8 |
9 | class ThinLens(Surface):
10 | def __init__(self, r, d, f=100.0, device="cpu"):
11 | """Thin lens surface."""
12 | Surface.__init__(self, r, d, mat2="air", is_square=False, device=device)
13 | self.f = torch.tensor(f)
14 |
15 | def set_f(self,f):
16 | self.f = torch.tensor(f).to(self.device)
17 |
18 | @classmethod
19 | def init_from_dict(cls, surf_dict):
20 | return cls( surf_dict["r"], surf_dict["d"], surf_dict["f"])
21 |
22 | # =========================================
23 | # Optimization
24 | # =========================================
25 | def get_optimizer_params(self, lr=[0.001, 0.001], optim_mat=False):
26 | """Activate gradient computation for f and d and return optimizer parameters."""
27 | self.f.requires_grad_(True)
28 | self.d.requires_grad_(True)
29 |
30 | params = []
31 | params.append({"params": [self.f], "lr": lr[0]})
32 | params.append({"params": [self.d], "lr": lr[1]})
33 |
34 | return params
35 |
36 | def intersect(self, ray, n=1.0):
37 | """Solve ray-surface intersection and update rays."""
38 | # Solve intersection
39 | t = (self.d - ray.o[..., 2]) / ray.d[..., 2]
40 | new_o = ray.o + t.unsqueeze(-1) * ray.d
41 | valid = (torch.sqrt(new_o[..., 0] ** 2 + new_o[..., 1] ** 2) < self.r) & (
42 | ray.valid > 0
43 | )
44 |
45 | # Update ray position
46 | new_o = ray.o + ray.d * t.unsqueeze(-1)
47 | ray.o = torch.where(valid.unsqueeze(-1), new_o, ray.o)
48 | ray.valid = ray.valid * valid
49 |
50 | if ray.coherent:
51 | new_opl = ray.opl + t
52 | new_opl[~valid] = ray.opl[~valid]
53 | ray.opl = new_opl
54 |
55 | return ray
56 |
57 | def refract(self, ray, n=1.0):
58 | """For a thin lens, all rays will converge to z = f plane. Therefore we trace the chief-ray (parallel-shift to surface center) to find the final convergence point for each ray.
59 |
60 | For coherent ray tracing, we can think it as a Fresnel lens with infinite refractive index.
61 | (1) Lens maker's equation
62 | (2) Spherical lens function
63 | """
64 | forward = (ray.d * ray.valid.unsqueeze(-1))[..., 2].sum() > 0
65 |
66 | # Calculate convergence point
67 | if forward:
68 | t0 = self.f / ray.d[..., 2]
69 | xy_final = ray.d[..., :2] * t0.unsqueeze(-1)
70 | z_final = (self.d + self.f).view(1).expand_as(xy_final[..., 0].unsqueeze(-1))
71 | o_final = torch.cat([xy_final, z_final], dim=-1)
72 | else:
73 | t0 = -self.f / ray.d[..., 2]
74 | xy_final = ray.d[..., :2] * t0.unsqueeze(-1)
75 | z_final = (self.d - self.f).view(1).expand_as(xy_final[..., 0].unsqueeze(-1))
76 | o_final = torch.cat([xy_final, z_final], dim=-1)
77 |
78 | # New ray direction
79 | if self.f > 0:
80 | new_d = o_final - ray.o
81 | else:
82 | new_d = ray.o - o_final
83 | new_d = F.normalize(new_d, p=2, dim=-1)
84 | ray.d = new_d
85 |
86 | # Optical path length change
87 | if ray.coherent:
88 | if forward:
89 | ray.opl = (
90 | ray.opl
91 | - (ray.o[..., 0] ** 2 + ray.o[..., 1] ** 2)
92 | / self.f
93 | / 2
94 | / ray.d[..., 2]
95 | )
96 | else:
97 | ray.opl = (
98 | ray.opl
99 | + (ray.o[..., 0] ** 2 + ray.o[..., 1] ** 2)
100 | / self.f
101 | / 2
102 | / ray.d[..., 2]
103 | )
104 |
105 | return ray
106 |
107 | def _sag(self, x, y):
108 | return torch.zeros_like(x)
109 |
110 | def _dfdxy(self, x, y):
111 | return torch.zeros_like(x), torch.zeros_like(x)
112 |
113 | # =========================================
114 | # Visualization
115 | # =========================================
116 | def draw_widget(self, ax, color="black", linestyle="-"):
117 | d = self.d.item()
118 | r = self.r
119 |
120 | # Draw a vertical line to represent the thin lens
121 | ax.plot([d, d], [-r, r], color=color, linestyle=linestyle, linewidth=0.75)
122 |
123 | # Draw arrow to indicate the focal length
124 | arrowstyle = '<->' if self.f > 0 else ']-['
125 | ax.annotate(
126 | "",
127 | xy=(d, r),
128 | xytext=(d, -r),
129 | arrowprops=dict(
130 | arrowstyle=arrowstyle, color=color, linestyle=linestyle, linewidth=0.75
131 | ),
132 | )
133 |
134 | # =========================================
135 | # IO
136 | # =========================================
137 | def surf_dict(self):
138 | surf_dict = {
139 | "type": "ThinLens",
140 | "f": round(self.f.item(), 4),
141 | "r": round(self.r, 4),
142 | "(d)": round(self.d.item(), 4),
143 | "mat2": "air",
144 | }
145 |
146 | return surf_dict
147 |
--------------------------------------------------------------------------------
/deeplens/optics/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class PSFLoss(nn.Module):
6 | def __init__(self, w_achromatic=1.0, w_psf_size=1.0):
7 | super(PSFLoss, self).__init__()
8 | self.w_achromatic = w_achromatic
9 | self.w_psf_size = w_psf_size
10 |
11 | def forward(self, psf):
12 | # Ensure psf has shape [batch, channels, height, width]
13 | if psf.dim() == 3:
14 | psf = psf.unsqueeze(0) # Add batch dimension
15 | elif psf.dim() == 2:
16 | psf = (
17 | psf.unsqueeze(0).unsqueeze(0).repeat(1, 3, 1, 1)
18 | ) # Add batch and channel dimensions
19 |
20 | batch, channels, height, width = psf.shape
21 |
22 | # Normalize PSF across spatial dimensions
23 | psf_normalized = psf / psf.view(batch, channels, -1).sum(
24 | dim=2, keepdim=True
25 | ).view(batch, channels, 1, 1)
26 |
27 | # Concentration Loss: Minimize the spatial variance
28 | # Compute coordinates
29 | x = torch.linspace(-1, 1, steps=width, device=psf.device, dtype=torch.float32)
30 | y = torch.linspace(-1, 1, steps=height, device=psf.device, dtype=torch.float32)
31 | xv, yv = torch.meshgrid(x, y, indexing="ij")
32 | xv = xv.unsqueeze(0).unsqueeze(0) # Shape [1, 1, H, W]
33 | yv = yv.unsqueeze(0).unsqueeze(0)
34 |
35 | # Calculate mean positions
36 | mean_x = (psf_normalized * xv).sum(dim=(2, 3))
37 | mean_y = (psf_normalized * yv).sum(dim=(2, 3))
38 |
39 | # Calculate variance
40 | var_x = ((xv - mean_x.view(batch, channels, 1, 1)) ** 2 * psf_normalized).sum(
41 | dim=(2, 3)
42 | )
43 | var_y = ((yv - mean_y.view(batch, channels, 1, 1)) ** 2 * psf_normalized).sum(
44 | dim=(2, 3)
45 | )
46 | concentration_loss = var_x + var_y
47 | concentration_loss = concentration_loss.mean()
48 |
49 | # Achromatic Loss: Minimize differences between channels
50 | channel_diff = 0
51 | for i in range(channels):
52 | for j in range(i + 1, channels):
53 | channel_diff += torch.mean((psf[:, i, :, :] - psf[:, j, :, :]) ** 2)
54 | channel_diff = channel_diff / (channels * (channels - 1) / 2)
55 |
56 | total_loss = (
57 | self.w_psf_size * concentration_loss + self.w_achromatic * channel_diff
58 | )
59 | return total_loss
60 |
--------------------------------------------------------------------------------
/deeplens/optics/material/CDGM.AGF:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/deeplens/optics/material/CDGM.AGF
--------------------------------------------------------------------------------
/deeplens/optics/material/PLASTIC2022.AGF:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/deeplens/optics/material/PLASTIC2022.AGF
--------------------------------------------------------------------------------
/deeplens/optics/ray.py:
--------------------------------------------------------------------------------
1 | """Optical ray class.
2 |
3 | Copyright (c) 2025 Xinge Yang (xinge.yang@kaust.edu.sa)
4 |
5 | This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
6 | # The license is only for non-commercial use (commercial licenses can be obtained from authors).
7 | # The material is provided as-is, with no warranties whatsoever.
8 | # If you publish any code, data, or scientific work based on this, please cite our work.
9 | """
10 |
11 | import copy
12 |
13 | import torch
14 | import torch.nn.functional as F
15 |
16 | from .basics import DEFAULT_WAVE, EPSILON, DeepObj
17 |
18 |
19 | class Ray(DeepObj):
20 | def __init__(self, o, d, wvln=DEFAULT_WAVE, coherent=False, device="cpu"):
21 | """Optical ray class."""
22 | assert wvln > 0.1 and wvln < 1, "Ray wavelength unit should be [um]"
23 |
24 | # Ray parameters
25 | self.o = o.to(device) if torch.is_tensor(o) else torch.tensor(o, device=device)
26 | self.d = d.to(device) if torch.is_tensor(d) else torch.tensor(d, device=device)
27 | self.d = F.normalize(self.d, p=2, dim=-1)
28 | if isinstance(wvln, float):
29 | self.wvln = torch.full_like(self.o[..., 0].unsqueeze(-1), wvln, device=device)
30 | else:
31 | self.wvln = wvln.to(device)
32 |
33 | # Auxiliary parameters
34 | self.valid = torch.ones(o.shape[:-1], device=device)
35 | self.en = torch.ones_like(self.valid).unsqueeze(-1)
36 | self.obliq = torch.ones_like(self.en)
37 | self.is_forward = self.d[..., 2].unsqueeze(-1) > 0
38 |
39 | # Coherent ray tracing (initialize coherent light)
40 | self.coherent = coherent # bool
41 | self.opl = torch.zeros_like(self.en)
42 |
43 | self.device = device
44 |
45 | def prop_to(self, z, n=1):
46 | """Ray propagates to a given depth plane.
47 |
48 | Args:
49 | z (float): depth.
50 | n (float, optional): refractive index. Defaults to 1.
51 | """
52 | t = (z - self.o[..., 2]) / self.d[..., 2]
53 | new_o = self.o + self.d * t.unsqueeze(-1)
54 |
55 | is_valid = (self.valid > 0) & (torch.abs(t) >= 0)
56 | new_o[~is_valid] = self.o[~is_valid]
57 | self.o = new_o
58 |
59 | if self.coherent:
60 | if t.min().abs() > 100 and torch.get_default_dtype() == torch.float32:
61 | raise Warning(
62 | "Should use float64 in coherent ray tracing for precision."
63 | )
64 | else:
65 | self.opl = self.opl + n * t
66 |
67 | return self
68 |
69 | def centroid(self):
70 | """Calculate the centroid of the ray, shape (..., num_rays, 3)
71 |
72 | Returns:
73 | torch.Tensor: Centroid of the ray, shape (..., 3)
74 | """
75 | return (self.o * self.valid.unsqueeze(-1)).sum(-2) / self.valid.sum(-1).add(
76 | EPSILON
77 | ).unsqueeze(-1)
78 |
79 | def rms_error(self, center_ref=None):
80 | """Calculate the RMS error of the ray.
81 |
82 | Args:
83 | center_ref (torch.Tensor): Reference center of the ray, shape (..., 3). If None, use the centroid of the ray as reference.
84 | Returns:
85 | torch.Tensor: average RMS error of the ray
86 | """
87 | # Calculate the centroid of the ray as reference
88 | if center_ref is None:
89 | with torch.no_grad():
90 | center_ref = self.centroid()
91 |
92 | center_ref = center_ref.unsqueeze(-2)
93 |
94 | # Calculate RMS error for each region
95 | rms_error = ((self.o[..., :2] - center_ref[..., :2])**2).sum(-1)
96 | rms_error = (rms_error * self.valid).sum(-1) / (self.valid.sum(-1) + EPSILON)
97 | rms_error = rms_error.sqrt()
98 |
99 | # Average RMS error
100 | return rms_error.mean()
101 |
102 | def clone(self, device=None):
103 | """Clone the ray.
104 |
105 | Can spercify which device we want to clone. Sometimes we want to store all rays in CPU, and when using it, we move it to GPU.
106 | """
107 | if device is None:
108 | return copy.deepcopy(self).to(self.device)
109 | else:
110 | return copy.deepcopy(self).to(device)
111 |
112 | def squeeze(self, dim=None):
113 | """Squeeze the ray.
114 |
115 | Args:
116 | dim (int, optional): dimension to squeeze. Defaults to None.
117 | """
118 | self.o = self.o.squeeze(dim)
119 | self.d = self.d.squeeze(dim)
120 | self.wvln = self.wvln.squeeze(dim)
121 | self.valid = self.valid.squeeze(dim)
122 | self.en = self.en.squeeze(dim)
123 | self.opl = self.opl.squeeze(dim)
124 | self.obliq = self.obliq.squeeze(dim)
125 | self.is_forward = self.is_forward.squeeze(dim)
126 | return self
127 |
--------------------------------------------------------------------------------
/deeplens/optics/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class DiffFloat(torch.autograd.Function):
5 | """Convert double precision tensor to float precision with gradient calculation.
6 |
7 | Args:
8 | input (tensor): Double precision tensor.
9 | """
10 |
11 | @staticmethod
12 | def forward(ctx, x):
13 | ctx.save_for_backward(x)
14 | return x.float()
15 |
16 | @staticmethod
17 | def backward(ctx, grad_output):
18 | (x,) = ctx.saved_tensors
19 | grad_input = grad_output.double()
20 | return grad_input
21 |
22 |
23 | def diff_float(input):
24 | return DiffFloat.apply(input)
25 |
--------------------------------------------------------------------------------
/deeplens/sensor/__init__.py:
--------------------------------------------------------------------------------
1 | from .sensor import Sensor
2 | from .rgb_sensor import RGBSensor
3 | from .mono_sensor import MonoSensor
4 | from .event_sensor import EventSensor
5 |
--------------------------------------------------------------------------------
/deeplens/sensor/event_sensor.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .sensor import Sensor
4 |
5 | class EventSensor(Sensor):
6 | """Event sensor"""
7 |
8 | def __init__(self, bit=10, black_level=64):
9 | super().__init__(bit, black_level)
10 |
11 | def forward(self, I_t, I_t_1):
12 | """Converts light illuminance to event stream.
13 |
14 | Args:
15 | I_t: Current frame
16 | I_t_1: Previous frame
17 |
18 | Returns:
19 | Event stream
20 | """
21 | # Converts light illuminance to event stream.
22 | pass
23 |
24 | def forward_video(self, frames):
25 | """Simulate sensor output from a video.
26 |
27 | Args:
28 | frames: Tensor of shape (B, T, 3, H, W), range [0, 1]
29 |
30 | Returns:
31 | Event stream for the video sequence
32 | """
33 | pass
34 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp.py:
--------------------------------------------------------------------------------
1 | """Image Signal Processing (ISP) pipeline converts RAW bayer images to RGB images.
2 |
3 | Reference:
4 | [1] Architectural Analysis of a Baseline ISP Pipeline. https://link.springer.com/chapter/10.1007/978-94-017-9987-4_2.
5 | """
6 |
7 | import torch
8 | import torch.nn as nn
9 |
10 | from .isp_modules import (
11 | AntiAliasingFilter,
12 | AutoWhiteBalance,
13 | BlackLevelCompensation,
14 | ColorCorrectionMatrix,
15 | DeadPixelCorrection,
16 | Demosaic,
17 | Denoise,
18 | GammaCorrection,
19 | LensShadingCorrection,
20 | )
21 |
22 |
23 | class SimpleISP(nn.Module):
24 | """Simple ISP pipeline with the most basic modules."""
25 |
26 | def __init__(self, bit=10, black_level=64, bayer_pattern="rggb"):
27 | super().__init__()
28 |
29 | self.bit = bit
30 | self.black_level = black_level
31 | self.bayer_pattern = bayer_pattern
32 |
33 | self.isp = nn.Sequential(
34 | BlackLevelCompensation(bit=bit, black_level=black_level),
35 | Demosaic(bayer_pattern=bayer_pattern, method="bilinear"),
36 | AutoWhiteBalance(awb_method="gray_world"),
37 | ColorCorrectionMatrix(ccm_matrix=None),
38 | GammaCorrection(gamma_param=2.2),
39 | )
40 |
41 | def forward(self, bayer_nbit):
42 | """Simulate sensor output.
43 |
44 | Args:
45 | bayer_nbit: Input bayer pattern tensor.
46 |
47 | Returns:
48 | Processed RGB image.
49 | """
50 | return self.isp(bayer_nbit)
51 |
52 |
53 | class InvertibleISP(nn.Module):
54 | """Invertible and differentiable ISP pipeline.
55 |
56 | Rerference:
57 | [1] Architectural Analysis of a Baseline ISP Pipeline. https://link.springer.com/chapter/10.1007/978-94-017-9987-4_2. (page 23, 50)
58 | """
59 |
60 | def __init__(self, bit=10, black_level=64, bayer_pattern="rggb"):
61 | super().__init__()
62 |
63 | self.bit = bit
64 | self.black_level = black_level
65 | self.bayer_pattern = bayer_pattern
66 |
67 | self.blc = BlackLevelCompensation(bit=bit, black_level=black_level)
68 | self.demosaic = Demosaic(bayer_pattern=bayer_pattern, method="3x3")
69 | self.awb = AutoWhiteBalance(awb_method="manual", gains=(2.0, 1.0, 1.8))
70 | self.ccm = ColorCorrectionMatrix(ccm_matrix=None)
71 | self.gamma = GammaCorrection(gamma_param=2.2)
72 |
73 | self.isp = nn.Sequential(
74 | self.blc,
75 | self.demosaic,
76 | self.awb,
77 | self.ccm,
78 | self.gamma,
79 | )
80 |
81 | def forward(self, bayer_nbit):
82 | """A basic differentiable and invertible ISP pipeline.
83 |
84 | Args:
85 | bayer_Nbit: Input tensor of shape [B, 1, H, W], data range [~black_level, 2^bit-1].
86 |
87 | Returns:
88 | rgb: Output tensor of shape [B, 3, H, W], data range [0, 1].
89 | """
90 | img = self.isp(bayer_nbit)
91 | return img
92 |
93 | def reverse(self, img):
94 | """Inverse ISP.
95 |
96 | Args:
97 | img: Input tensor of shape [B, 3, H, W], data range [0, 1].
98 |
99 | Returns:
100 | bayer_Nbit: Output tensor of shape [B, 1, H, W], data range [~black_level, 2^bit-1].
101 | """
102 | img = self.gamma.reverse(img)
103 | img = self.ccm.reverse(img)
104 | img = self.awb.reverse(img)
105 | bayer = self.demosaic.reverse(img)
106 | bayer = self.blc.reverse(bayer)
107 | return bayer
108 |
109 |
110 | class OpenISP(nn.Module):
111 | """Image Signal Processing (ISP).
112 |
113 | Reference:
114 | [1] Architectural Analysis of a Baseline ISP Pipeline. https://link.springer.com/chapter/10.1007/978-94-017-9987-4_2.
115 | [2] https://github.com/QiuJueqin/fast-openISP/tree/master
116 | """
117 |
118 | def __init__(self, bit=10, black_level=64, bayer_pattern="rggb"):
119 | self.bit = bit
120 | self.black_level = black_level
121 | self.bayer_pattern = bayer_pattern
122 |
123 | # DPC
124 | self.dead_pixel_threshold = 30
125 |
126 | # AAF
127 | self.raw_denoise_method = "none" # "bilateral"
128 |
129 | # CFA
130 | self.bayer_pattern = bayer_pattern
131 | self.demosaic_method = "bilinear" # "malvar"
132 |
133 | # AWB
134 | self.awb_method = "naive"
135 | self.awb_gains = [2.0, 1.0, 1.8]
136 |
137 | # CCM
138 | # Reference data from https://github.com/QiuJueqin/fast-openISP/blob/master/configs/nikon_d3200.yaml#L57
139 | # Alternative data from https://github.com/timothybrooks/hdr-plus/blob/master/src/finish.cpp#L626
140 | self.ccm_matrix = torch.tensor(
141 | [
142 | [1.8506, -0.7920, -0.0605],
143 | [-0.1562, 1.6455, -0.4912],
144 | [0.0176, -0.5439, 1.5254],
145 | [0.0, 0.0, 0.0],
146 | ]
147 | )
148 |
149 | # GC
150 | self.gamma_param = 2.2
151 |
152 | self.isp_pipeline = nn.Sequential(
153 | # 1. Sensor correction
154 | DeadPixelCorrection(threshold=30),
155 | BlackLevelCompensation(bit=bit, black_level=black_level),
156 | LensShadingCorrection(shading_map=None),
157 | # 2. Before demosaic, remove moiré pattern, denoise, and deblur
158 | AntiAliasingFilter(method=None),
159 | Denoise(method=None),
160 | # 3. Demosaic, process in rgb space
161 | Demosaic(bayer_pattern=bayer_pattern, method="bilinear"),
162 | AutoWhiteBalance(awb_method="gray_world"),
163 | ColorCorrectionMatrix(ccm_matrix=self.ccm_matrix),
164 | GammaCorrection(gamma_param=self.gamma_param),
165 | # 4. Convert to ycrcb space and do image enhancement
166 | )
167 |
168 | def __call__(self, *args, **kwds):
169 | return self.forward(*args, **kwds)
170 |
171 | def forward(self, bayer_nbit):
172 | """Process single RAW bayer image with a naive ISP pipeline.
173 |
174 | Args:
175 | bayer_nbit: Input tensor of shape [B, 1, H, W], data range [~black_level, 2^bit-1]
176 |
177 | Returns:
178 | rgb: RGB image, shape (B, 3, H, W), data range [0, 1].
179 |
180 | Reference:
181 | [1] https://github.com/QiuJueqin/fast-openISP/tree/master
182 | """
183 | return self.isp_pipeline(bayer_nbit)
184 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/__init__.py:
--------------------------------------------------------------------------------
1 | """ISP modules for image processing."""
2 |
3 | from .anti_alising import AntiAliasingFilter
4 | from .black_level import BlackLevelCompensation
5 | from .color_matrix import ColorCorrectionMatrix
6 | from .color_space import ColorSpaceConversion
7 | from .dead_pixel import DeadPixelCorrection
8 | from .demosaic import Demosaic
9 | from .denoise import Denoise
10 | from .gamma_correction import GammaCorrection
11 | from .lens_shading import LensShadingCorrection
12 | from .white_balance import AutoWhiteBalance
13 |
14 | __all__ = [
15 | "AntiAliasingFilter",
16 | "AutoWhiteBalance",
17 | "BlackLevelCompensation",
18 | "ColorCorrectionMatrix",
19 | "ColorSpaceConversion",
20 | "DeadPixelCorrection",
21 | "Demosaic",
22 | "Denoise",
23 | "GammaCorrection",
24 | "LensShadingCorrection",
25 | ]
26 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/anti_alising.py:
--------------------------------------------------------------------------------
1 | """Anti-aliasing filter (AAF)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 | class AntiAliasingFilter(nn.Module):
9 | """Anti-Aliasing Filter (AAF)."""
10 |
11 | def __init__(self, method="bilateral"):
12 | """Initialize the Anti-Aliasing Filter.
13 |
14 | Args:
15 | method (str): Denoising method to use. Options: "bilateral", "none", or None.
16 | If "none" or None, no filtering is applied.
17 | """
18 | super(AntiAliasingFilter, self).__init__()
19 | self.method = method
20 |
21 | def forward(self, bayer_nbit):
22 | """Apply anti-aliasing filter to remove moiré pattern.
23 |
24 | Args:
25 | bayer_nbit: Input tensor of shape [B, 1, H, W], data range [0, 1]
26 |
27 | Returns:
28 | Filtered bayer tensor of same shape as input
29 |
30 | Reference:
31 | [1] https://github.com/QiuJueqin/fast-openISP/blob/master/modules/aaf.py
32 | """
33 | raise NotImplementedError("Anti-aliasing filter is not tested yet.")
34 | if self.method is None or self.method == "none":
35 | return bayer_nbit
36 |
37 | elif self.method == "bilateral":
38 | # Convert to int32 for calculations
39 | bayer = bayer_nbit.to(torch.int32)
40 |
41 | # Pad the input with reflection padding
42 | padded = F.pad(bayer, (2, 2, 2, 2), mode="reflect")
43 |
44 | # Get all 9 shifted versions for 3x3 window
45 | shifts = []
46 | for i in range(3):
47 | for j in range(3):
48 | shifts.append(
49 | padded[:, :, i : i + bayer.shape[2], j : j + bayer.shape[3]]
50 | )
51 |
52 | # Initialize result tensor
53 | result = torch.zeros_like(shifts[0], dtype=torch.int32)
54 |
55 | # Apply weights: center pixel (index 4) gets weight 8, others get weight 1
56 | for i, shifted in enumerate(shifts):
57 | weight = 8 if i == 4 else 1
58 | result += weight * shifted
59 |
60 | # Right shift by 4 (divide by 16)
61 | result = result >> 4
62 |
63 | return result.to(torch.uint16)
64 |
65 | else:
66 | raise ValueError(f"Unknown denoise method: {self.method}")
67 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/black_level.py:
--------------------------------------------------------------------------------
1 | """Black level compensation (BLC)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 |
7 | class BlackLevelCompensation(nn.Module):
8 | """Black level compensation (BLC)."""
9 |
10 | def __init__(self, bit=10, black_level=64):
11 | """Initialize black level compensation.
12 |
13 | Args:
14 | bit: Bit depth of the input image.
15 | black_level: Black level value.
16 | """
17 | super().__init__()
18 | self.bit = bit
19 | self.black_level = black_level
20 |
21 | def forward(self, bayer):
22 | """Black Level Compensation.
23 |
24 | Args:
25 | bayer (torch.Tensor): Input n-bit bayer image [B, 1, H, W], data range [~black_level, 2**bit - 1].
26 |
27 | Returns:
28 | bayer_float (torch.Tensor): Output float bayer image [B, 1, H, W], data range [0, 1].
29 | """
30 | # Subtract black level
31 | bayer_float = (bayer - self.black_level) / (2**self.bit - 1 - self.black_level)
32 |
33 | # Clamp to [0, 1], (unnecessary)
34 | bayer_float = torch.clamp(bayer_float, 0.0, 1.0)
35 |
36 | return bayer_float
37 |
38 | def reverse(self, bayer):
39 | """Inverse black level compensation.
40 |
41 | Args:
42 | bayer: Input tensor of shape [B, 1, H, W], data range [0, 1].
43 |
44 | Returns:
45 | bayer_nbit: Output tensor of shape [B, 1, H, W], data range [0, 2^bit-1].
46 | """
47 | max_value = 2**self.bit - 1
48 | bayer_nbit = bayer * (max_value - self.black_level) + self.black_level
49 | bayer_nbit = torch.round(bayer_nbit)
50 | return bayer_nbit
51 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/color_matrix.py:
--------------------------------------------------------------------------------
1 | """Color correction matrix (CCM)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | class ColorCorrectionMatrix(nn.Module):
7 | """Color correction matrix (CCM)."""
8 |
9 | def __init__(self, ccm_matrix=None):
10 | """Initialize color correction matrix.
11 |
12 | Args:
13 | ccm_matrix: Color correction matrix of shape [4, 3].
14 |
15 | Reference:
16 | [1] https://github.com/QiuJueqin/fast-openISP/blob/master/configs/nikon_d3200.yaml#L57
17 | [2] https://github.com/timothybrooks/hdr-plus/blob/master/src/finish.cpp#L626
18 | ccm_matrix = torch.tensor(
19 | [
20 | [1.8506, -0.7920, -0.0605],
21 | [-0.1562, 1.6455, -0.4912],
22 | [0.0176, -0.5439, 1.5254],
23 | [0.0, 0.0, 0.0],
24 | ]
25 | )
26 | """
27 | super().__init__()
28 | if ccm_matrix is None:
29 | ccm_matrix = torch.tensor([
30 | [1.0, 0.0, 0.0],
31 | [0.0, 1.0, 0.0],
32 | [0.0, 0.0, 1.0],
33 | [0.0, 0.0, 0.0]
34 | ])
35 | self.register_buffer('ccm_matrix', ccm_matrix)
36 |
37 | def sample_augmentation(self):
38 | if not hasattr(self, "ccm_org"):
39 | self.ccm_org = self.ccm_matrix
40 | self.ccm_matrix = self.ccm_org + torch.randn_like(self.ccm_org) * 0.01
41 |
42 | def reset_augmentation(self):
43 | self.ccm_matrix = self.ccm_org
44 |
45 | def forward(self, rgb_image):
46 | """Color Correction Matrix. Convert RGB image to sensor color space.
47 |
48 | Args:
49 | rgb_image: Input tensor of shape [B, 3, H, W] in RGB format.
50 |
51 | Returns:
52 | rgb_corrected: Corrected RGB image in sensor color space.
53 | """
54 | # Extract matrix and bias
55 | matrix = self.ccm_matrix[:3, :] # Shape: (3, 3)
56 | bias = self.ccm_matrix[3, :].view(1, 3, 1, 1) # Shape: (1, 3, 1, 1)
57 |
58 | # Apply CCM
59 | # Reshape rgb_image to [B, H, W, 3] for matrix multiplication
60 | rgb_image_perm = rgb_image.permute(0, 2, 3, 1) # [B, H, W, 3]
61 | rgb_corrected = torch.matmul(rgb_image_perm, matrix.T) + bias.squeeze()
62 | rgb_corrected = rgb_corrected.permute(0, 3, 1, 2) # [B, 3, H, W]
63 |
64 | return rgb_corrected
65 |
66 | def reverse(self, img):
67 | """Inverse color correction matrix. Convert sensor color space to RGB image.
68 |
69 | Args:
70 | rgb_image: Input tensor of shape [B, 3, H, W] in sensor color space.
71 | """
72 | ccm_matrix = self.ccm_matrix
73 |
74 | # Extract matrix and bias from CCM
75 | matrix = ccm_matrix[:3, :] # Shape: (3, 3)
76 | bias = ccm_matrix[3, :].view(1, 3, 1, 1) # Shape: (1, 3, 1, 1)
77 |
78 | # Compute the inverse of the CCM matrix
79 | inv_matrix = torch.inverse(matrix) # Shape: (3, 3)
80 |
81 | # Prepare rgb_corrected for matrix multiplication
82 | img_perm = img.permute(0, 2, 3, 1) # [B, H, W, 3]
83 |
84 | # Subtract bias
85 | img_minus_bias = img_perm - bias.squeeze()
86 |
87 | # Apply Inverse CCM
88 | img_original = torch.matmul(img_minus_bias, inv_matrix.T) # [B, H, W, 3]
89 | img_original = img_original.permute(0, 3, 1, 2) # [B, 3, H, W]
90 |
91 | # Clip the values to ensure they are within the valid range
92 | img_original = torch.clamp(img_original, 0.0, 1.0)
93 |
94 | return img_original
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/color_space.py:
--------------------------------------------------------------------------------
1 | """Color space conversion (CSC)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | class ColorSpaceConversion(nn.Module):
7 | """Color space conversion (CSC)."""
8 |
9 | def __init__(self):
10 | """Initialize color space conversion module."""
11 | super().__init__()
12 |
13 | # RGB to YCrCb conversion matrix
14 | self.register_buffer("rgb_to_ycrcb_matrix", torch.tensor([
15 | [0.299, 0.587, 0.114],
16 | [0.5, -0.4187, -0.0813],
17 | [-0.1687, -0.3313, 0.5]
18 | ]))
19 |
20 | # YCrCb to RGB conversion matrix
21 | self.register_buffer("ycrcb_to_rgb_matrix", torch.tensor([
22 | [1.0, 0.0, 1.402],
23 | [1.0, -0.344136, -0.714136],
24 | [1.0, 1.772, 0.0]
25 | ]))
26 |
27 | def rgb_to_ycrcb(self, rgb_image):
28 | """Convert RGB to YCrCb.
29 |
30 | Args:
31 | rgb_image: Input tensor of shape [B, 3, H, W] in RGB format.
32 |
33 | Returns:
34 | ycrcb_image: Output tensor of shape [B, 3, H, W] in YCrCb format.
35 |
36 | Reference:
37 | [1] https://github.com/QiuJueqin/fast-openISP/blob/master/modules/csc.py
38 | """
39 | # Reshape for matrix multiplication
40 | rgb_reshaped = rgb_image.permute(0, 2, 3, 1) # [B, H, W, 3]
41 |
42 | # Apply transformation
43 | ycrcb = torch.matmul(rgb_reshaped, self.rgb_to_ycrcb_matrix.T)
44 |
45 | # Add offset to Cr and Cb
46 | ycrcb[..., 1:] += 0.5
47 |
48 | # Reshape back
49 | ycrcb_image = ycrcb.permute(0, 3, 1, 2) # [B, 3, H, W]
50 |
51 | return ycrcb_image
52 |
53 | def ycrcb_to_rgb(self, ycrcb_image):
54 | """Convert YCrCb to RGB.
55 |
56 | Args:
57 | ycrcb_image: Input tensor of shape [B, 3, H, W] in YCrCb format.
58 |
59 | Returns:
60 | rgb_image: Output tensor of shape [B, 3, H, W] in RGB format.
61 | """
62 | # Reshape for matrix multiplication
63 | ycrcb = ycrcb_image.permute(0, 2, 3, 1) # [B, H, W, 3]
64 |
65 | # Subtract offset from Cr and Cb
66 | ycrcb_adj = ycrcb.clone()
67 | ycrcb_adj[..., 1:] -= 0.5
68 |
69 | # Apply transformation
70 | rgb = torch.matmul(ycrcb_adj, self.ycrcb_to_rgb_matrix.T)
71 |
72 | # Clamp values to [0, 1]
73 | rgb = torch.clamp(rgb, 0.0, 1.0)
74 |
75 | # Reshape back
76 | rgb_image = rgb.permute(0, 3, 1, 2) # [B, 3, H, W]
77 |
78 | return rgb_image
79 |
80 | def forward(self, image, conversion="rgb_to_ycrcb"):
81 | """Convert between color spaces.
82 |
83 | Args:
84 | image: Input tensor of shape [B, 3, H, W].
85 | conversion: Conversion direction, "rgb_to_ycrcb" or "ycrcb_to_rgb".
86 |
87 | Returns:
88 | converted_image: Output tensor of shape [B, 3, H, W].
89 | """
90 | if conversion == "rgb_to_ycrcb":
91 | return self.rgb_to_ycrcb(image)
92 | elif conversion == "ycrcb_to_rgb":
93 | return self.ycrcb_to_rgb(image)
94 | else:
95 | raise ValueError(f"Unknown conversion: {conversion}")
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/dead_pixel.py:
--------------------------------------------------------------------------------
1 | """Dead pixel correction (DPC)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 | class DeadPixelCorrection(nn.Module):
8 | """Dead pixel correction (DPC)."""
9 |
10 | def __init__(self, threshold=30, kernel_size=3):
11 | """Initialize dead pixel correction.
12 |
13 | Args:
14 | threshold: Threshold for detecting dead pixels.
15 | kernel_size: Size of the kernel for correction.
16 | """
17 | super().__init__()
18 | self.threshold = threshold
19 | self.kernel_size = kernel_size if kernel_size % 2 == 1 else kernel_size + 1
20 |
21 | def forward(self, bayer_nbit):
22 | """Dead Pixel Correction.
23 |
24 | Args:
25 | bayer_nbit (torch.Tensor): Input n-bit bayer image [B, 1, H, W].
26 |
27 | Returns:
28 | bayer_corrected (torch.Tensor): Corrected n-bit bayer image [B, 1, H, W].
29 |
30 | Reference:
31 | [1] https://github.com/QiuJueqin/fast-openISP/blob/master/modules/dpc.py
32 | """
33 | padding = self.kernel_size // 2
34 |
35 | # Pad the input
36 | bayer_padded = F.pad(bayer_nbit, (padding, padding, padding, padding), mode='reflect')
37 |
38 | # Extract center pixels
39 | center = bayer_nbit
40 |
41 | # Create a median filter
42 | B, C, H, W = bayer_nbit.shape
43 | corrected = torch.zeros_like(center)
44 |
45 | for b in range(B):
46 | for i in range(H):
47 | for j in range(W):
48 | patch = bayer_padded[b, 0, i:i+self.kernel_size, j:j+self.kernel_size]
49 | corrected[b, 0, i, j] = torch.median(patch)
50 |
51 | # Detect dead pixels (pixels that differ significantly from their neighbors)
52 | diff = torch.abs(center - corrected)
53 | mask = diff > self.threshold
54 |
55 | # Combine original and corrected values using mask
56 | result = torch.where(mask, corrected, center)
57 |
58 | return result.to(torch.uint16)
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/denoise.py:
--------------------------------------------------------------------------------
1 | """Denoise.
2 |
3 | Reference:
4 | [1] It is important to remove sensor noise before applying demosaic/CFA, otherwise the CFA will produce zipper noise which is harder to remove. "Thus, it is desirable to suppress the zipper noise in the interpolation stage instead of using noise reduction filter after color interpolation", page 31, Architectural Analysis of a Baseline ISP Pipeline. https://link.springer.com/chapter/10.1007/978-94-017-9987-4_2.
5 | [2] Denoise can also be implemented with deep learning methods, replacing the classical denoise filter.
6 | """
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 |
12 | class Denoise(nn.Module):
13 | """Noise reduction."""
14 |
15 | def __init__(self, method="gaussian", kernel_size=3, sigma=0.5):
16 | """Initialize denoise.
17 |
18 | Args:
19 | method: Noise reduction method, "gaussian" or "median".
20 | kernel_size: Size of the kernel.
21 | sigma: Standard deviation for Gaussian kernel.
22 | """
23 | super().__init__()
24 | self.method = method
25 | self.kernel_size = kernel_size if kernel_size % 2 == 1 else kernel_size + 1
26 | self.sigma = sigma
27 |
28 | def forward(self, img):
29 | """Apply denoise.
30 |
31 | Args:
32 | img (torch.Tensor): Input tensor of shape [B, C, H, W], data range [0, 1].
33 |
34 | Returns:
35 | img_filtered (torch.Tensor): Denoised image, data range [0, 1].
36 | """
37 | if self.method == "gaussian":
38 | # Create Gaussian kernel
39 | kernel = self._create_gaussian_kernel(self.kernel_size, self.sigma, img.device)
40 |
41 | # Apply Gaussian filter
42 | padding = self.kernel_size // 2
43 | img_filtered = F.conv2d(
44 | img,
45 | kernel.expand(img.shape[1], 1, self.kernel_size, self.kernel_size),
46 | padding=padding,
47 | groups=img.shape[1]
48 | )
49 |
50 | elif self.method == "median":
51 | # Apply median filter
52 | padding = self.kernel_size // 2
53 | B, C, H, W = img.shape
54 | img_padded = F.pad(img, (padding, padding, padding, padding), mode='reflect')
55 | img_filtered = torch.zeros_like(img)
56 |
57 | for b in range(B):
58 | for c in range(C):
59 | for i in range(H):
60 | for j in range(W):
61 | patch = img_padded[b, c, i:i+self.kernel_size, j:j+self.kernel_size]
62 | img_filtered[b, c, i, j] = torch.median(patch)
63 |
64 | elif self.method is None:
65 | # No denoising
66 | img_filtered = img
67 |
68 | else:
69 | raise ValueError(f"Unknown noise reduction method: {self.method}")
70 |
71 | return img_filtered
72 |
73 | def _create_gaussian_kernel(self, kernel_size, sigma, device):
74 | """Create a Gaussian kernel."""
75 | x = torch.arange(kernel_size, device=device) - kernel_size // 2
76 | x = x.float()
77 |
78 | # Create 1D Gaussian kernel
79 | kernel_1d = torch.exp(-0.5 * (x / sigma) ** 2)
80 | kernel_1d = kernel_1d / kernel_1d.sum()
81 |
82 | # Create 2D Gaussian kernel
83 | kernel_2d = torch.outer(kernel_1d, kernel_1d)
84 | kernel_2d = kernel_2d.view(1, 1, kernel_size, kernel_size)
85 |
86 | return kernel_2d
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/gamma_correction.py:
--------------------------------------------------------------------------------
1 | """Gamma correction (GC)."""
2 |
3 | import random
4 |
5 | import torch
6 | import torch.nn as nn
7 |
8 |
9 | class GammaCorrection(nn.Module):
10 | """Gamma correction (GC)."""
11 |
12 | def __init__(self, gamma_param=2.2):
13 | """Initialize gamma correction.
14 |
15 | Args:
16 | gamma_param: Gamma parameter.
17 | """
18 | super().__init__()
19 | self.register_buffer('gamma_param', torch.tensor(gamma_param))
20 |
21 | def sample_augmentation(self):
22 | if not hasattr(self, "gamma_param_org"):
23 | self.gamma_param_org = self.gamma_param
24 | self.gamma_param = self.gamma_param_org + torch.randn_like(self.gamma_param_org) * 0.01
25 |
26 | def reset_augmentation(self):
27 | self.gamma_param = self.gamma_param_org
28 |
29 | def forward(self, img, quantize=False):
30 | """Gamma Correction.
31 |
32 | Args:
33 | img (tensor): Input image. Shape of [B, C, H, W].
34 | quantize (bool): Whether to quantize the image to 8-bit.
35 |
36 | Returns:
37 | img_gamma (tensor): Gamma corrected image. Shape of [B, C, H, W].
38 |
39 | Reference:
40 | [1] "There is no restriction as to where stage gamma correction is placed," page 35, Architectural Analysis of a Baseline ISP Pipeline.
41 | """
42 | img_gamma = torch.pow(torch.clamp(img, min=1e-8), 1 / self.gamma_param)
43 | if quantize:
44 | img_gamma = torch.round(img_gamma * 255) / 255
45 | return img_gamma
46 |
47 | def reverse(self, img):
48 | """Inverse gamma correction.
49 |
50 | Args:
51 | img (tensor): Input image. Shape of [B, C, H, W].
52 |
53 | Returns:
54 | img (tensor): Inverse gamma corrected image. Shape of [B, C, H, W].
55 |
56 | Reference:
57 | [1] https://github.com/google-research/google-research/blob/master/unprocessing/unprocess.py#L78
58 | """
59 | gamma_param = self.gamma_param
60 | img = torch.clip(img, 1e-8) ** gamma_param
61 | return img
62 |
--------------------------------------------------------------------------------
/deeplens/sensor/isp_modules/lens_shading.py:
--------------------------------------------------------------------------------
1 | """Lens shading correction (LSC)."""
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 | class LensShadingCorrection(nn.Module):
7 | """Lens shading correction (LSC)."""
8 |
9 | def __init__(self, shading_map=None):
10 | super().__init__()
11 | self.shading_map = shading_map # [H, W]
12 |
13 | def forward(self, x):
14 | """Apply lens shading correction to remove vignetting.
15 |
16 | Args:
17 | x: Input tensor of shape [B, C, H, W].
18 |
19 | Returns:
20 | x: Output tensor of shape [B, C, H, W].
21 | """
22 | return x
--------------------------------------------------------------------------------
/deeplens/sensor/mono_sensor.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | from .sensor import Sensor
4 | from .isp import BlackLevelCompensation
5 |
6 | class MonoSensor(Sensor):
7 | """Monochrome sensor"""
8 |
9 | def __init__(
10 | self,
11 | bit=10,
12 | black_level=64,
13 | res=(4000, 3000),
14 | size=(8.0, 6.0),
15 | iso_base=100,
16 | read_noise_std=0.5,
17 | shot_noise_std_alpha=0.4,
18 | shot_noise_std_beta=0.0,
19 | ):
20 | super().__init__(
21 | bit=bit,
22 | black_level=black_level,
23 | res=res,
24 | size=size,
25 | iso_base=iso_base,
26 | read_noise_std=read_noise_std,
27 | shot_noise_std_alpha=shot_noise_std_alpha,
28 | shot_noise_std_beta=shot_noise_std_beta,
29 | )
30 | self.isp = nn.Sequential(
31 | BlackLevelCompensation(bit, black_level),
32 | )
33 |
34 | def forward(self, img_nbit, iso=100.0):
35 | """Converts light illuminance to monochrome image.
36 |
37 | Args:
38 | img_nbit: Tensor of shape (B, 1, H, W), range [~black_level, 2**bit - 1]
39 | iso: ISO value, default 100.0
40 |
41 | Returns:
42 | img_noisy: Processed monochrome image with noise
43 | """
44 | img_noisy = self.simu_noise(img_nbit, iso)
45 | img_noisy = self.isp(img_noisy)
46 | return img_noisy
47 |
48 |
--------------------------------------------------------------------------------
/deeplens/version.py:
--------------------------------------------------------------------------------
1 | __version__ = '0.1.0'
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: deeplens
2 | channels:
3 | - pytorch
4 | - defaults
5 | dependencies:
6 | - _libgcc_mutex=0.1=main
7 | - _openmp_mutex=5.1=1_gnu
8 | - blas=1.0=mkl
9 | - brotli-python=1.0.9=py310h6a678d5_9
10 | - bzip2=1.0.8=h5eee18b_6
11 | - c-ares=1.19.1=h5eee18b_0
12 | - ca-certificates=2025.2.25=h06a4308_0
13 | - certifi=2025.1.31=py310h06a4308_0
14 | - charset-normalizer=3.3.2=pyhd3eb1b0_0
15 | - contourpy=1.3.1=py310hdb19cb5_0
16 | - cuda-cudart=12.4.127=h99ab3db_0
17 | - cuda-cudart_linux-64=12.4.127=hd681fbe_0
18 | - cuda-cupti=12.4.127=h6a678d5_1
19 | - cuda-libraries=12.4.1=h06a4308_1
20 | - cuda-nvrtc=12.4.127=h99ab3db_1
21 | - cuda-nvtx=12.4.127=h6a678d5_1
22 | - cuda-opencl=12.4.127=h6a678d5_0
23 | - cuda-runtime=12.4.1=hb982923_0
24 | - cuda-version=12.4=hbda6634_3
25 | - cycler=0.11.0=pyhd3eb1b0_0
26 | - cyrus-sasl=2.1.28=h52b45da_1
27 | - expat=2.6.4=h6a678d5_0
28 | - ffmpeg=4.3=hf484d3e_0
29 | - filelock=3.13.1=py310h06a4308_0
30 | - fontconfig=2.14.1=h55d465d_3
31 | - fonttools=4.55.3=py310h5eee18b_0
32 | - freetype=2.13.3=h4a9f257_0
33 | - giflib=5.2.2=h5eee18b_0
34 | - gmp=6.3.0=h6a678d5_0
35 | - gmpy2=2.2.1=py310h5eee18b_0
36 | - gnutls=3.6.15=he1e5248_0
37 | - icu=73.1=h6a678d5_0
38 | - idna=3.7=py310h06a4308_0
39 | - imageio=2.37.0=py310h06a4308_0
40 | - intel-openmp=2023.1.0=hdb19cb5_46306
41 | - jinja2=3.1.6=py310h06a4308_0
42 | - jpeg=9e=h5eee18b_3
43 | - kiwisolver=1.4.8=py310h6a678d5_0
44 | - krb5=1.20.1=h143b758_1
45 | - lame=3.100=h7b6447c_0
46 | - lazy_loader=0.4=py310h06a4308_0
47 | - lcms2=2.16=hb9589c4_0
48 | - ld_impl_linux-64=2.40=h12ee557_0
49 | - lerc=4.0.0=h6a678d5_0
50 | - libabseil=20250127.0=cxx17_h6a678d5_0
51 | - libcublas=12.4.5.8=h99ab3db_1
52 | - libcufft=11.2.1.3=h99ab3db_1
53 | - libcufile=1.9.1.3=h99ab3db_1
54 | - libcups=2.4.2=h2d74bed_1
55 | - libcurand=10.3.5.147=h99ab3db_1
56 | - libcurl=8.12.1=hc9e6f67_0
57 | - libcusolver=11.6.1.9=h99ab3db_1
58 | - libcusparse=12.3.1.170=h99ab3db_1
59 | - libdeflate=1.22=h5eee18b_0
60 | - libedit=3.1.20230828=h5eee18b_0
61 | - libev=4.33=h7f8727e_1
62 | - libffi=3.4.4=h6a678d5_1
63 | - libgcc-ng=11.2.0=h1234567_1
64 | - libgfortran-ng=11.2.0=h00389a5_1
65 | - libgfortran5=11.2.0=h1234567_1
66 | - libglib=2.78.4=hdc74915_0
67 | - libgomp=11.2.0=h1234567_1
68 | - libiconv=1.16=h5eee18b_3
69 | - libidn2=2.3.4=h5eee18b_0
70 | - libjpeg-turbo=2.0.0=h9bf148f_0
71 | - libnghttp2=1.57.0=h2d74bed_0
72 | - libnpp=12.2.5.30=h99ab3db_1
73 | - libnvfatbin=12.4.127=h7934f7d_2
74 | - libnvjitlink=12.4.127=h99ab3db_1
75 | - libnvjpeg=12.3.1.117=h6a678d5_1
76 | - libpng=1.6.39=h5eee18b_0
77 | - libpq=17.4=hdbd6064_0
78 | - libprotobuf=5.29.3=hc99497a_0
79 | - libssh2=1.11.1=h251f7ec_0
80 | - libstdcxx-ng=11.2.0=h1234567_1
81 | - libtasn1=4.19.0=h5eee18b_0
82 | - libtiff=4.5.1=hffd6297_1
83 | - libunistring=0.9.10=h27cfd23_0
84 | - libuuid=1.41.5=h5eee18b_0
85 | - libwebp=1.3.2=h11a3e52_0
86 | - libwebp-base=1.3.2=h5eee18b_1
87 | - libxcb=1.15=h7f8727e_0
88 | - libxkbcommon=1.0.1=h097e994_2
89 | - libxml2=2.13.5=hfdd30dd_0
90 | - llvm-openmp=14.0.6=h9e868ea_0
91 | - lz4-c=1.9.4=h6a678d5_1
92 | - markupsafe=3.0.2=py310h5eee18b_0
93 | - matplotlib=3.10.0=py310h06a4308_0
94 | - matplotlib-base=3.10.0=py310hbfdbfaf_0
95 | - mkl=2023.1.0=h213fc3f_46344
96 | - mkl-service=2.4.0=py310h5eee18b_2
97 | - mkl_fft=1.3.11=py310h5eee18b_0
98 | - mkl_random=1.2.8=py310h1128e8f_0
99 | - mpc=1.3.1=h5eee18b_0
100 | - mpfr=4.2.1=h5eee18b_0
101 | - mpmath=1.3.0=py310h06a4308_0
102 | - mysql=8.4.0=h721767e_2
103 | - ncurses=6.4=h6a678d5_0
104 | - nettle=3.7.3=hbbd107a_1
105 | - networkx=3.4.2=py310h06a4308_0
106 | - numpy=2.0.1=py310h5f9d8c6_1
107 | - numpy-base=2.0.1=py310hb5e798b_1
108 | - ocl-icd=2.3.2=h5eee18b_1
109 | - openh264=2.1.1=h4ff587b_0
110 | - openjpeg=2.5.2=he7f1fd0_0
111 | - openldap=2.6.4=h42fbc30_0
112 | - openssl=3.0.16=h5eee18b_0
113 | - packaging=24.2=py310h06a4308_0
114 | - pcre2=10.42=hebb0a14_1
115 | - pillow=11.1.0=py310hcea889d_0
116 | - pip=25.0=py310h06a4308_0
117 | - pyparsing=3.2.0=py310h06a4308_0
118 | - pyqt=6.7.1=py310h6a678d5_0
119 | - pyqt6-sip=13.9.1=py310h5eee18b_0
120 | - pysocks=1.7.1=py310h06a4308_0
121 | - python=3.10.16=he870216_1
122 | - python-dateutil=2.9.0post0=py310h06a4308_2
123 | - pytorch=2.5.1=py3.10_cuda12.4_cudnn9.1.0_0
124 | - pytorch-cuda=12.4=hc786d27_7
125 | - pytorch-mutex=1.0=cuda
126 | - pyyaml=6.0.2=py310h5eee18b_0
127 | - qtbase=6.7.3=hdaa5aa8_0
128 | - qtdeclarative=6.7.3=h6a678d5_0
129 | - qtsvg=6.7.3=he621ea3_0
130 | - qttools=6.7.3=h80c7b02_0
131 | - qtwebchannel=6.7.3=h6a678d5_0
132 | - qtwebsockets=6.7.3=h6a678d5_0
133 | - readline=8.2=h5eee18b_0
134 | - requests=2.32.3=py310h06a4308_1
135 | - scikit-image=0.25.0=py310h6a678d5_0
136 | - scipy=1.15.2=py310h23a989f_1
137 | - setuptools=75.8.0=py310h06a4308_0
138 | - sip=6.10.0=py310h6a678d5_0
139 | - six=1.16.0=pyhd3eb1b0_1
140 | - sqlite=3.45.3=h5eee18b_0
141 | - tbb=2021.8.0=hdb19cb5_0
142 | - tifffile=2025.2.18=py310h06a4308_0
143 | - tk=8.6.14=h39e8969_0
144 | - tomli=2.0.1=py310h06a4308_0
145 | - torchaudio=2.5.1=py310_cu124
146 | - torchtriton=3.1.0=py310
147 | - torchvision=0.20.1=py310_cu124
148 | - tornado=6.4.2=py310h5eee18b_0
149 | - tqdm=4.67.1=py310h2f386ee_0
150 | - typing_extensions=4.12.2=py310h06a4308_0
151 | - tzdata=2025a=h04d1e81_0
152 | - unicodedata2=15.1.0=py310h5eee18b_1
153 | - urllib3=2.3.0=py310h06a4308_0
154 | - wheel=0.45.1=py310h06a4308_0
155 | - xcb-util-cursor=0.1.4=h5eee18b_0
156 | - xz=5.6.4=h5eee18b_1
157 | - yaml=0.2.5=h7b6447c_0
158 | - zlib=1.2.13=h5eee18b_1
159 | - zstd=1.5.6=hc292b87_0
160 | - pip:
161 | - annotated-types==0.7.0
162 | - click==8.1.8
163 | - docker-pycreds==0.4.0
164 | - einops==0.8.1
165 | - fsspec==2025.3.2
166 | - gitdb==4.0.12
167 | - gitpython==3.1.44
168 | - huggingface-hub==0.30.1
169 | - lpips==0.1.4
170 | - opencv-python==4.11.0.86
171 | - platformdirs==4.3.7
172 | - protobuf==5.29.4
173 | - psutil==7.0.0
174 | - pydantic==2.11.2
175 | - pydantic-core==2.33.1
176 | - regex==2024.11.6
177 | - safetensors==0.5.3
178 | - sentry-sdk==2.25.1
179 | - setproctitle==1.3.5
180 | - smmap==5.0.2
181 | - sympy==1.13.1
182 | - tokenizers==0.21.1
183 | - transformers==4.51.0
184 | - typing-inspection==0.4.0
185 | - wandb==0.19.9
186 | # additional packages for 3d view
187 | - pyvista==0.44.2
188 | - pyvistaqt==0.11.2
189 |
190 | prefix: /home/yangx0i/anaconda3/envs/deeplens
191 |
--------------------------------------------------------------------------------
/imgs/autolens1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/autolens1.gif
--------------------------------------------------------------------------------
/imgs/autolens2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/autolens2.gif
--------------------------------------------------------------------------------
/imgs/end2end.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/end2end.gif
--------------------------------------------------------------------------------
/imgs/hybridlens.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/hybridlens.png
--------------------------------------------------------------------------------
/imgs/implicit_net.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/implicit_net.png
--------------------------------------------------------------------------------
/imgs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/logo.png
--------------------------------------------------------------------------------
/imgs/paper_deeplens.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/paper_deeplens.png
--------------------------------------------------------------------------------
/imgs/paper_dff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/imgs/paper_dff.png
--------------------------------------------------------------------------------
/lenses/camera/ef100mm_f2.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/ef100mm_f2.8.png
--------------------------------------------------------------------------------
/lenses/camera/ef35mm_f2.0.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "JP 1992-015611 Example 1 (Canon EF35mm f2)",
3 | "foclen": 34.825179762620515,
4 | "fnum": 2.3096660274122307,
5 | "r_sensor": 21.6,
6 | "d_sensor": 85.2300033569336,
7 | "sensor_size": [
8 | 30.547012947258853,
9 | 30.547012947258853
10 | ],
11 | "surfaces": [
12 | {
13 | "idx": 1,
14 | "type": "Spheric",
15 | "r": 15.015,
16 | "c": 0.03539822995662689,
17 | "roc": 28.25000010523945,
18 | "d": 0.0,
19 | "mat1": "air",
20 | "mat2": "1.58913/61.2",
21 | "d_next": 1.5
22 | },
23 | {
24 | "idx": 2,
25 | "type": "Spheric",
26 | "r": 12.47,
27 | "c": 0.0633312240242958,
28 | "roc": 15.789999568244081,
29 | "d": 1.5,
30 | "mat1": "1.58913/61.2",
31 | "mat2": "air",
32 | "d_next": 15.280000686645508
33 | },
34 | {
35 | "idx": 3,
36 | "type": "Spheric",
37 | "r": 12.29,
38 | "c": 0.02642706222832203,
39 | "roc": 37.839998686206386,
40 | "d": 16.780000686645508,
41 | "mat1": "air",
42 | "mat2": "1.80610/40.9",
43 | "d_next": 10.539999008178711
44 | },
45 | {
46 | "idx": 4,
47 | "type": "Spheric",
48 | "r": 12.29,
49 | "c": -0.050581689924001694,
50 | "roc": -19.76999980630316,
51 | "d": 27.31999969482422,
52 | "mat1": "1.80610/40.9",
53 | "mat2": "1.64769/33.8",
54 | "d_next": 1.5300006866455078
55 | },
56 | {
57 | "idx": 5,
58 | "type": "Spheric",
59 | "r": 12.29,
60 | "c": -0.007039774674922228,
61 | "roc": -142.05000105504763,
62 | "d": 28.850000381469727,
63 | "mat1": "1.64769/33.8",
64 | "mat2": "air",
65 | "d_next": 3.0900001525878906
66 | },
67 | {
68 | "idx": 6,
69 | "type": "Aperture",
70 | "r": 9.8795,
71 | "d": 31.940000534057617,
72 | "is_square": false,
73 | "diffraction": false,
74 | "d_next": 3.539999008178711
75 | },
76 | {
77 | "idx": 7,
78 | "type": "Spheric",
79 | "r": 9.965,
80 | "c": -0.036310821771621704,
81 | "roc": -27.539999129998712,
82 | "d": 35.47999954223633,
83 | "mat1": "air",
84 | "mat2": "1.67270/32.1",
85 | "d_next": 1.9399986267089844
86 | },
87 | {
88 | "idx": 8,
89 | "type": "Spheric",
90 | "r": 9.525,
91 | "c": 0.02182929404079914,
92 | "roc": 45.810001831987385,
93 | "d": 37.41999816894531,
94 | "mat1": "1.67270/32.1",
95 | "mat2": "air",
96 | "d_next": 2.0
97 | },
98 | {
99 | "idx": 9,
100 | "type": "Spheric",
101 | "r": 9.525,
102 | "c": -0.02166377753019333,
103 | "roc": -46.160001348161735,
104 | "d": 39.41999816894531,
105 | "mat1": "air",
106 | "mat2": "1.78472/25.7",
107 | "d_next": 1.1000022888183594
108 | },
109 | {
110 | "idx": 10,
111 | "type": "Spheric",
112 | "r": 9.965,
113 | "c": 0.003612325293943286,
114 | "roc": 276.8299969210082,
115 | "d": 40.52000045776367,
116 | "mat1": "1.78472/25.7",
117 | "mat2": "1.77250/49.6",
118 | "d_next": 3.579998016357422
119 | },
120 | {
121 | "idx": 11,
122 | "type": "Spheric",
123 | "r": 10.175,
124 | "c": -0.04140786826610565,
125 | "roc": -24.149999550171206,
126 | "d": 44.099998474121094,
127 | "mat1": "1.77250/49.6",
128 | "mat2": "air",
129 | "d_next": 0.10000228881835938
130 | },
131 | {
132 | "idx": 12,
133 | "type": "Spheric",
134 | "r": 11.255,
135 | "c": 0.007425558753311634,
136 | "roc": 134.6700003624673,
137 | "d": 44.20000076293945,
138 | "mat1": "air",
139 | "mat2": "1.77250/49.6",
140 | "d_next": 3.200000762939453
141 | },
142 | {
143 | "idx": 13,
144 | "type": "Spheric",
145 | "r": 11.255,
146 | "c": -0.021649707108736038,
147 | "roc": -46.19000132322725,
148 | "d": 47.400001525878906,
149 | "mat1": "1.77250/49.6",
150 | "mat2": "air",
151 | "d_next": 37.83000183105469
152 | }
153 | ]
154 | }
--------------------------------------------------------------------------------
/lenses/camera/ef35mm_f2.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/ef35mm_f2.0.png
--------------------------------------------------------------------------------
/lenses/camera/ef35mm_f2.0.zmx:
--------------------------------------------------------------------------------
1 | VERS 190513 80 123457 L123457
2 | MODE SEQ
3 | NAME
4 | PFIL 0 0 0
5 | LANG 0
6 | UNIT MM X W X CM MR CPMM
7 | FLOA
8 | ENVD 2.0E+1 1 0
9 | GFAC 0 0
10 | GCAT OSAKAGASCHEMICAL MISC
11 | XFLN 0. 0. 0.
12 | YFLN 0.0 22.070704979145525 30.90523045170307
13 | WAVL 0.4861327 0.5875618 0.6562725
14 | RAIM 0 0 1 1 0 0 0 0 0
15 | PUSH 0 0 0 0 0 0
16 | SDMA 0 1 0
17 | FTYP 0 0 3 3 0 0 0
18 | ROPD 2
19 | PICB 1
20 | PWAV 2
21 | POLS 1 0 1 0 0 1 0
22 | GLRS 1 0
23 | GSTD 0 100.000 100.000 100.000 100.000 100.000 100.000 0 1 1 0 0 1 1 1 1 1 1
24 | NSCD 100 500 0 1.0E-3 5 1.0E-6 0 0 0 0 0 0 1000000 0 2
25 | COFN QF "COATING.DAT" "SCATTER_PROFILE.DAT" "ABG_DATA.DAT" "PROFILE.GRD"
26 | COFN COATING.DAT SCATTER_PROFILE.DAT ABG_DATA.DAT PROFILE.GRD
27 | SURF 0
28 | TYPE STANDARD
29 | CURV 0.0
30 | DISZ INFINITY
31 | SURF 1
32 | TYPE STANDARD
33 | CURV 0.03539822995662689
34 | DISZ 1.5
35 | GLAS ___BLANK 1 0 1.58913 61.2
36 | DIAM 18.031727235645135 1 0 0 1 ""
37 | SURF 2
38 | TYPE STANDARD
39 | CURV 0.0633312240242958
40 | DISZ 15.280000686645508
41 | DIAM 14.663525141163387 1 0 0 1 ""
42 | SURF 3
43 | TYPE STANDARD
44 | CURV 0.02642706222832203
45 | DISZ 10.539999008178711
46 | GLAS ___BLANK 1 0 1.8061 40.9
47 | DIAM 13.890377611901716 1 0 0 1 ""
48 | SURF 4
49 | TYPE STANDARD
50 | CURV -0.050581689924001694
51 | DISZ 1.5300006866455078
52 | GLAS ___BLANK 1 0 1.64769 33.8
53 | DIAM 13.25413870635828 1 0 0 1 ""
54 | SURF 5
55 | TYPE STANDARD
56 | CURV -0.007039774674922228
57 | DISZ 3.0900001525878906
58 | DIAM 11.30567251320589 1 0 0 1 ""
59 | SURF 6
60 | STOP
61 | TYPE STANDARD
62 | CURV 0.0
63 | DISZ 3.539999008178711
64 | SURF 7
65 | TYPE STANDARD
66 | CURV -0.036310821771621704
67 | DISZ 1.9399986267089844
68 | GLAS ___BLANK 1 0 1.6727 32.1
69 | DIAM 9.84615764740822 1 0 0 1 ""
70 | SURF 8
71 | TYPE STANDARD
72 | CURV 0.02182929404079914
73 | DISZ 2.0
74 | DIAM 10.747041874306118 1 0 0 1 ""
75 | SURF 9
76 | TYPE STANDARD
77 | CURV -0.02166377753019333
78 | DISZ 1.1000022888183594
79 | GLAS ___BLANK 1 0 1.78472 25.7
80 | DIAM 10.44458516400841 1 0 0 1 ""
81 | SURF 10
82 | TYPE STANDARD
83 | CURV 0.003612325293943286
84 | DISZ 3.579998016357422
85 | GLAS ___BLANK 1 0 1.7725 49.6
86 | DIAM 11.748965463271194 1 0 0 1 ""
87 | SURF 11
88 | TYPE STANDARD
89 | CURV -0.04140786826610565
90 | DISZ 0.10000228881835938
91 | DIAM 11.869804041682217 1 0 0 1 ""
92 | SURF 12
93 | TYPE STANDARD
94 | CURV 0.007425558753311634
95 | DISZ 3.200000762939453
96 | GLAS ___BLANK 1 0 1.7725 49.6
97 | DIAM 14.141745527292915 1 0 0 1 ""
98 | SURF 13
99 | TYPE STANDARD
100 | CURV -0.021649707108736038
101 | DISZ 37.83000183105469
102 | DIAM 14.193953218619235 1 0 0 1 ""
103 | SURF 14
104 | TYPE STANDARD
105 | CURV 0.
106 | DISZ 0.0
107 | DIAM 21.106820931884467
108 |
--------------------------------------------------------------------------------
/lenses/camera/ef40mm_f2.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 39.53853478898897,
3 | "fnum": 3.233449383315725,
4 | "r_sensor": 21.6,
5 | "d_sensor": 62.46411895751953,
6 | "sensor_size": [
7 | 30.547012947258853,
8 | 30.547012947258853
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Spheric",
13 | "r": 9.30,
14 | "c": 0.02618829347193241,
15 | "roc": 38.18500052597016,
16 | "d": 0.0,
17 | "mat1": "air",
18 | "mat2": "1.83481/42.7",
19 | "d_next": 3.069999933242798
20 | },
21 | {
22 | "type": "Spheric",
23 | "r": 8.70,
24 | "c": -0.010647586546838284,
25 | "roc": -93.91799687197116,
26 | "d": 3.069999933242798,
27 | "mat1": "1.83481/42.7",
28 | "mat2": "1.53172/48.8",
29 | "d_next": 1.2000000476837158
30 | },
31 | {
32 | "type": "Spheric",
33 | "r": 6.80,
34 | "c": 0.07580926269292831,
35 | "roc": 13.191000208649735,
36 | "d": 4.269999980926514,
37 | "mat1": "1.53172/48.8",
38 | "mat2": "air",
39 | "d_next": 2.1999998092651367
40 | },
41 | {
42 | "type": "Spheric",
43 | "r": 6.70,
44 | "c": 0.0657418966293335,
45 | "roc": 15.21100015775645,
46 | "d": 6.46999979019165,
47 | "mat1": "air",
48 | "mat2": "1.69680/55.5",
49 | "d_next": 2.0099997520446777
50 | },
51 | {
52 | "type": "Spheric",
53 | "r": 6.50,
54 | "c": 0.03523608297109604,
55 | "roc": 28.37999901465479,
56 | "d": 8.479999542236328,
57 | "mat1": "1.69680/55.5",
58 | "mat2": "air",
59 | "d_next": 2.330000877380371
60 | },
61 | {
62 | "type": "Aperture",
63 | "r": 6.42,
64 | "d": 10.8100004196167,
65 | "is_square": false,
66 | "diffraction": false,
67 | "d_next": 5.119999885559082,
68 | "mat1": "air",
69 | "mat2": "air"
70 | },
71 | {
72 | "type": "Spheric",
73 | "r": 6.25,
74 | "c": -0.08593279868364334,
75 | "roc": -11.63700025273752,
76 | "d": 15.930000305175781,
77 | "mat1": "air",
78 | "mat2": "1.69895/30.1",
79 | "d_next": 1.0
80 | },
81 | {
82 | "type": "Spheric",
83 | "r": 6.90,
84 | "c": 0.007555380929261446,
85 | "roc": 132.3560002285354,
86 | "d": 16.93000030517578,
87 | "mat1": "1.69895/30.1",
88 | "mat2": "1.83481/42.7",
89 | "d_next": 3.3500003814697266
90 | },
91 | {
92 | "type": "Spheric",
93 | "r": 7.22,
94 | "c": -0.05528832972049713,
95 | "roc": -18.086999644506687,
96 | "d": 20.280000686645508,
97 | "mat1": "1.83481/42.7",
98 | "mat2": "air",
99 | "d_next": 0.14999961853027344
100 | },
101 | {
102 | "type": "Aspheric",
103 | "r": 7.60,
104 | "c": -0.025309408083558083,
105 | "roc": -39.51099909956553,
106 | "d": 20.43000030517578,
107 | "k": 0.0,
108 | "ai": [
109 | 0.0,
110 | -3.4255001082783565e-05,
111 | 4.6012001320150375e-08,
112 | -2.0635000375079926e-09,
113 | 1.3484999575319456e-11
114 | ],
115 | "mat1": "air",
116 | "mat2": "1.58313/59.4",
117 | "ai2": 0.0,
118 | "ai4": -3.4255001082783565e-05,
119 | "ai6": 4.6012001320150375e-08,
120 | "ai8": -2.0635000375079926e-09,
121 | "ai10": 1.3484999575319456e-11,
122 | "d_next": 3.0799999237060547
123 | },
124 | {
125 | "type": "Spheric",
126 | "r": 8.25,
127 | "c": -0.06505757570266724,
128 | "roc": -15.371000059551895,
129 | "d": 23.510000228881836,
130 | "mat1": "1.58313/59.4",
131 | "mat2": "air",
132 | "d_next": 38.954118728637695
133 | }
134 | ]
135 | }
--------------------------------------------------------------------------------
/lenses/camera/ef40mm_f2.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/ef40mm_f2.8.png
--------------------------------------------------------------------------------
/lenses/camera/ef50mm_f1.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "JP 1987-087922 Example 1 (Canon EF50mm f1.8)",
3 | "foclen": 50.520361337010705,
4 | "fnum": 1.7867431649971506,
5 | "r_sensor": 21.6,
6 | "d_sensor": 72.78326416015625,
7 | "sensor_size": [
8 | 30.547012947258853,
9 | 30.547012947258853
10 | ],
11 | "surfaces": [
12 | {
13 | "type": "Spheric",
14 | "r": 15.495,
15 | "c": 0.029154518619179726,
16 | "roc": 34.30000038972125,
17 | "d": 0.0,
18 | "mat1": "air",
19 | "mat2": "1.6280/57.0",
20 | "d_next": 4.5
21 | },
22 | {
23 | "type": "Spheric",
24 | "r": 15.495,
25 | "c": 0.004037956707179546,
26 | "roc": 247.65000531629903,
27 | "d": 4.5,
28 | "mat1": "1.6280/57.0",
29 | "mat2": "air",
30 | "d_next": 2.4000000953674316
31 | },
32 | {
33 | "type": "Spheric",
34 | "r": 12.805,
35 | "c": 0.046490006148815155,
36 | "roc": 21.509999306065613,
37 | "d": 6.900000095367432,
38 | "mat1": "air",
39 | "mat2": "1.70154/41.2",
40 | "d_next": 4.349999904632568
41 | },
42 | {
43 | "type": "Spheric",
44 | "r": 12.075,
45 | "c": 0.02480774000287056,
46 | "roc": 40.31000001952164,
47 | "d": 11.25,
48 | "mat1": "1.70154/41.2",
49 | "mat2": "air",
50 | "d_next": 0.9200000762939453
51 | },
52 | {
53 | "type": "Spheric",
54 | "r": 12.805,
55 | "c": 0.01274859718978405,
56 | "roc": 78.44000285783123,
57 | "d": 12.170000076293945,
58 | "mat1": "air",
59 | "mat2": "1.6727/25.6",
60 | "d_next": 1.3999996185302734
61 | },
62 | {
63 | "type": "Spheric",
64 | "r": 9.985,
65 | "c": 0.06527414917945862,
66 | "roc": 15.32000052962305,
67 | "d": 13.569999694824219,
68 | "mat1": "1.6727/25.6",
69 | "mat2": "air",
70 | "d_next": 6.75
71 | },
72 | {
73 | "type": "Stop",
74 | "r": 9.645,
75 | "c": 0.0,
76 | "d": 20.31999969482422,
77 | "mat1": "air",
78 | "mat2": "air",
79 | "d_next": 4.950000762939453
80 | },
81 | {
82 | "type": "Spheric",
83 | "r": 9.685,
84 | "c": -0.06317119300365448,
85 | "roc": -15.830000233526532,
86 | "d": 25.270000457763672,
87 | "mat1": "air",
88 | "mat2": "1.6990/30.1",
89 | "d_next": 1.1000003814697266
90 | },
91 | {
92 | "type": "Spheric",
93 | "r": 11.88,
94 | "c": 0.007354563567787409,
95 | "roc": 135.96999886981004,
96 | "d": 26.3700008392334,
97 | "mat1": "1.6990/30.1",
98 | "mat2": "1.7995/42.2",
99 | "d_next": 5.69999885559082
100 | },
101 | {
102 | "type": "Spheric",
103 | "r": 11.88,
104 | "c": -0.047080978751182556,
105 | "roc": -21.240000240540507,
106 | "d": 32.06999969482422,
107 | "mat1": "1.7995/42.2",
108 | "mat2": "air",
109 | "d_next": 0.15000152587890625
110 | },
111 | {
112 | "type": "Spheric",
113 | "r": 11.8,
114 | "c": 0.006167129147797823,
115 | "roc": 162.15000140820516,
116 | "d": 32.220001220703125,
117 | "mat1": "air",
118 | "mat2": "1.7200/55.2",
119 | "d_next": 3.0
120 | },
121 | {
122 | "type": "Spheric",
123 | "r": 11.8,
124 | "c": -0.020012008026242256,
125 | "roc": -49.969997947665945,
126 | "d": 35.220001220703125,
127 | "mat1": "1.7200/55.2",
128 | "mat2": "air",
129 | "d_next": 37.563262939453125
130 | }
131 | ]
132 | }
--------------------------------------------------------------------------------
/lenses/camera/ef50mm_f1.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/ef50mm_f1.8.png
--------------------------------------------------------------------------------
/lenses/camera/ef85mm_f1.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "JP 1993-157964 Example 1 (Canon EF85mm f1.8 USM)",
3 | "foclen": 83.87447467479473,
4 | "fnum": 1.6915526808585557,
5 | "r_sensor": 21.6,
6 | "d_sensor": 112.43000030517578,
7 | "sensor_size": [
8 | 30.547012947258853,
9 | 30.547012947258853
10 | ],
11 | "surfaces": [
12 | {
13 | "idx": 1,
14 | "type": "Spheric",
15 | "r": 26.6,
16 | "c": 0.0053949072025716305,
17 | "roc": 185.36000017262995,
18 | "d": 0.0,
19 | "mat1": "air",
20 | "mat2": "1.65160/58.5",
21 | "d_next": 4.400000095367432
22 | },
23 | {
24 | "idx": 2,
25 | "type": "Spheric",
26 | "r": 26.6,
27 | "c": -0.0023383060470223427,
28 | "roc": -427.66001536600606,
29 | "d": 4.400000095367432,
30 | "mat1": "1.65160/58.5",
31 | "mat2": "air",
32 | "d_next": 0.19999980926513672
33 | },
34 | {
35 | "idx": 3,
36 | "type": "Spheric",
37 | "r": 23.76,
38 | "c": 0.015728216618299484,
39 | "roc": 63.57999919943364,
40 | "d": 4.599999904632568,
41 | "mat1": "air",
42 | "mat2": "1.77250/49.6",
43 | "d_next": 4.599999904632568
44 | },
45 | {
46 | "idx": 4,
47 | "type": "Spheric",
48 | "r": 23.76,
49 | "c": 0.006397134158760309,
50 | "roc": 156.31999817146064,
51 | "d": 9.199999809265137,
52 | "mat1": "1.77250/49.6",
53 | "mat2": "air",
54 | "d_next": 0.15000057220458984
55 | },
56 | {
57 | "idx": 5,
58 | "type": "Spheric",
59 | "r": 21.19,
60 | "c": 0.02942041866481304,
61 | "roc": 33.989998966126365,
62 | "d": 9.350000381469727,
63 | "mat1": "air",
64 | "mat2": "1.71300/53.8",
65 | "d_next": 8.19999885559082
66 | },
67 | {
68 | "idx": 6,
69 | "type": "Spheric",
70 | "r": 20.21,
71 | "c": 0.004132573027163744,
72 | "roc": 241.97999489105635,
73 | "d": 17.549999237060547,
74 | "mat1": "1.71300/53.8",
75 | "mat2": "air",
76 | "d_next": 0.15999984741210938
77 | },
78 | {
79 | "idx": 7,
80 | "type": "Spheric",
81 | "r": 20.21,
82 | "c": 0.0035959582310169935,
83 | "roc": 278.08999319694107,
84 | "d": 17.709999084472656,
85 | "mat1": "air",
86 | "mat2": "1.71736/29.5",
87 | "d_next": 3.200000762939453
88 | },
89 | {
90 | "idx": 8,
91 | "type": "Spheric",
92 | "r": 20.21,
93 | "c": 0.03786444664001465,
94 | "roc": 26.409999055504834,
95 | "d": 20.90999984741211,
96 | "mat1": "1.71736/29.5",
97 | "mat2": "air",
98 | "d_next": 8.819999694824219
99 | },
100 | {
101 | "idx": 9,
102 | "type": "Aperture",
103 | "r": 16.16,
104 | "d": 29.729999542236328,
105 | "is_square": false,
106 | "diffraction": false,
107 | "d_next": 3.5900001525878906
108 | },
109 | {
110 | "idx": 10,
111 | "type": "Spheric",
112 | "r": 14.9945,
113 | "c": -0.009854158386588097,
114 | "roc": -101.48000070315898,
115 | "d": 33.31999969482422,
116 | "mat1": "air",
117 | "mat2": "1.80518/25.4",
118 | "d_next": 2.4000015258789062
119 | },
120 | {
121 | "idx": 11,
122 | "type": "Spheric",
123 | "r": 14.74,
124 | "c": -0.018903592601418495,
125 | "roc": -52.89999742826459,
126 | "d": 35.720001220703125,
127 | "mat1": "1.80518/25.4",
128 | "mat2": "1.51633/64.2",
129 | "d_next": 1.5
130 | },
131 | {
132 | "idx": 12,
133 | "type": "Spheric",
134 | "r": 14.25,
135 | "c": 0.027502750977873802,
136 | "roc": 36.35999907080235,
137 | "d": 37.220001220703125,
138 | "mat1": "1.51633/64.2",
139 | "mat2": "air",
140 | "d_next": 16.299999237060547
141 | },
142 | {
143 | "idx": 13,
144 | "type": "Spheric",
145 | "r": 13.49,
146 | "c": -0.00011039245873689651,
147 | "roc": -9058.589793559599,
148 | "d": 53.52000045776367,
149 | "mat1": "air",
150 | "mat2": "1.80518/25.4",
151 | "d_next": 2.0
152 | },
153 | {
154 | "idx": 14,
155 | "type": "Spheric",
156 | "r": 14.64,
157 | "c": 0.023894863203167915,
158 | "roc": 41.849998951549665,
159 | "d": 55.52000045776367,
160 | "mat1": "1.80518/25.4",
161 | "mat2": "1.80610/41.0",
162 | "d_next": 12.39999771118164
163 | },
164 | {
165 | "idx": 15,
166 | "type": "Spheric",
167 | "r": 17.48,
168 | "c": -0.00243445229716599,
169 | "roc": -410.7700122791999,
170 | "d": 67.91999816894531,
171 | "mat1": "1.80610/41.0",
172 | "mat2": "air",
173 | "d_next": 0.20000457763671875
174 | },
175 | {
176 | "idx": 16,
177 | "type": "Spheric",
178 | "r": 17.48,
179 | "c": 0.013687380589544773,
180 | "roc": 73.05999810978142,
181 | "d": 68.12000274658203,
182 | "mat1": "air",
183 | "mat2": "1.77250/49.6",
184 | "d_next": 5.399993896484375
185 | },
186 | {
187 | "idx": 17,
188 | "type": "Spheric",
189 | "r": 18.62,
190 | "c": -0.008193363435566425,
191 | "roc": -122.04999910770684,
192 | "d": 73.5199966430664,
193 | "mat1": "1.77250/49.6",
194 | "mat2": "air",
195 | "d_next": 38.910003662109375
196 | }
197 | ]
198 | }
--------------------------------------------------------------------------------
/lenses/camera/ef85mm_f1.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/ef85mm_f1.8.png
--------------------------------------------------------------------------------
/lenses/camera/rf16mm_f2.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/rf16mm_f2.8.png
--------------------------------------------------------------------------------
/lenses/camera/rf24mm_f1.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/rf24mm_f1.8.png
--------------------------------------------------------------------------------
/lenses/camera/rf35mm_f1.8.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/rf35mm_f1.8.json
--------------------------------------------------------------------------------
/lenses/camera/rf50mm_f1.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "US 2021/0263286 Example 1 (Canon RF50mm F1.8 STM)",
3 | "foclen": 49.8176600996622,
4 | "fnum": 1.855399138024343,
5 | "r_sensor": 21.6,
6 | "d_sensor": 59.58000183105469,
7 | "sensor_size": [
8 | 30.547012947258853,
9 | 30.547012947258853
10 | ],
11 | "surfaces": [
12 | {
13 | "type": "Spheric",
14 | "r": 15.0,
15 | "c": 0.03493937849998474,
16 | "roc": 28.621001372432445,
17 | "d": 0.0,
18 | "mat1": "air",
19 | "mat2": "1.83481/42.7",
20 | "d_next": 4.199999809265137
21 | },
22 | {
23 | "type": "Spheric",
24 | "r": 14.24,
25 | "c": 0.014676528982818127,
26 | "roc": 68.13600144630274,
27 | "d": 4.199999809265137,
28 | "mat1": "1.83481/42.7",
29 | "mat2": "air",
30 | "d_next": 0.18000030517578125
31 | },
32 | {
33 | "type": "Spheric",
34 | "r": 12.0,
35 | "c": 0.05626828595995903,
36 | "roc": 17.772000389555284,
37 | "d": 4.380000114440918,
38 | "mat1": "air",
39 | "mat2": "1.79952/42.2",
40 | "d_next": 6.699999809265137
41 | },
42 | {
43 | "type": "Spheric",
44 | "r": 10.35,
45 | "c": 0.01679966412484646,
46 | "roc": 59.524999581450835,
47 | "d": 11.079999923706055,
48 | "mat1": "1.79952/42.2",
49 | "mat2": "1.80518/25.4",
50 | "d_next": 1.1000003814697266
51 | },
52 | {
53 | "type": "Spheric",
54 | "r": 8.36,
55 | "c": 0.08751203119754791,
56 | "roc": 11.427000222890724,
57 | "d": 12.180000305175781,
58 | "mat1": "1.80518/25.4",
59 | "mat2": "air",
60 | "d_next": 5.270000457763672
61 | },
62 | {
63 | "type": "Aperture",
64 | "r": 8.12,
65 | "d": 17.450000762939453,
66 | "is_square": false,
67 | "diffraction": false,
68 | "d_next": 6.19999885559082
69 | },
70 | {
71 | "type": "Spheric",
72 | "r": 7.5,
73 | "c": -0.05978715792298317,
74 | "roc": -16.72599994280015,
75 | "d": 23.649999618530273,
76 | "mat1": "air",
77 | "mat2": "1.67270/32.1",
78 | "d_next": 0.8999996185302734
79 | },
80 | {
81 | "type": "Spheric",
82 | "r": 7.73,
83 | "c": -0.03352442383766174,
84 | "roc": -29.828998847001447,
85 | "d": 24.549999237060547,
86 | "mat1": "1.67270/32.1",
87 | "mat2": "air",
88 | "d_next": 0.8299999237060547
89 | },
90 | {
91 | "type": "Aspheric",
92 | "r": 7.76,
93 | "c": -0.03999999910593033,
94 | "roc": -25.00000055879356,
95 | "d": 25.3799991607666,
96 | "k": 0.0,
97 | "ai": [
98 | 0.0,
99 | -4.120319863432087e-05,
100 | -2.9001500934100477e-07,
101 | -4.671190101390721e-09,
102 | 7.906460208761956e-11,
103 | -9.284699745149005e-13
104 | ],
105 | "mat1": "air",
106 | "mat2": "1.53110/55.9",
107 | "ai2": 0.0,
108 | "ai4": -4.120319863432087e-05,
109 | "ai6": -2.9001500934100477e-07,
110 | "ai8": -4.671190101390721e-09,
111 | "ai10": 7.906460208761956e-11,
112 | "ai12": -9.284699745149005e-13,
113 | "d_next": 2.950000762939453
114 | },
115 | {
116 | "type": "Aspheric",
117 | "r": 9.7,
118 | "c": -0.05442769452929497,
119 | "roc": -18.37299941965691,
120 | "d": 28.329999923706055,
121 | "k": 0.0,
122 | "ai": [
123 | 0.0,
124 | -2.4161900000763126e-05,
125 | -3.291459904630756e-07,
126 | 1.9109799853644915e-10,
127 | -9.285930314614776e-13,
128 | -2.2919299379371705e-13
129 | ],
130 | "mat1": "1.53110/55.9",
131 | "mat2": "air",
132 | "ai2": 0.0,
133 | "ai4": -2.4161900000763126e-05,
134 | "ai6": -3.291459904630756e-07,
135 | "ai8": 1.9109799853644915e-10,
136 | "ai10": -9.285930314614776e-13,
137 | "ai12": -2.2919299379371705e-13,
138 | "d_next": 0.9799995422363281
139 | },
140 | {
141 | "type": "Spheric",
142 | "r": 12.23,
143 | "c": 0.0035713776014745235,
144 | "roc": 280.0039961014281,
145 | "d": 29.309999465942383,
146 | "mat1": "air",
147 | "mat2": "1.7340/51.5",
148 | "d_next": 4.600000381469727
149 | },
150 | {
151 | "type": "Spheric",
152 | "r": 12.85,
153 | "c": -0.02941003441810608,
154 | "roc": -34.00200033034837,
155 | "d": 33.90999984741211,
156 | "mat1": "1.7340/51.5",
157 | "mat2": "air",
158 | "d_next": 25.670001983642578
159 | }
160 | ]
161 | }
--------------------------------------------------------------------------------
/lenses/camera/rf50mm_f1.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/rf50mm_f1.8.png
--------------------------------------------------------------------------------
/lenses/camera/sigma70mm_f2.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "JP 2008-020656 Example 1 (Sigma Macro 70mm F2.8 EX DG)",
3 | "foclen": 69.168,
4 | "fnum": 2.912,
5 | "r_sensor": 21.6,
6 | "d_sensor": 124.856,
7 | "(sensor_size)": [
8 | 23.476,
9 | 23.476
10 | ],
11 | "surfaces": [
12 | {
13 | "idx": 1,
14 | "type": "Spheric",
15 | "r": 19.21,
16 | "(c)": 0.003,
17 | "roc": 369.55,
18 | "(d)": 0.0,
19 | "mat2": "1.54072/47.2",
20 | "d_next": 1.8
21 | },
22 | {
23 | "idx": 2,
24 | "type": "Spheric",
25 | "r": 18.0,
26 | "(c)": 0.029,
27 | "roc": 34.98,
28 | "(d)": 1.8,
29 | "mat2": "air",
30 | "d_next": 13.45
31 | },
32 | {
33 | "idx": 3,
34 | "type": "Spheric",
35 | "r": 16.5,
36 | "(c)": 0.022,
37 | "roc": 46.16,
38 | "(d)": 15.25,
39 | "mat2": "1.77250/49.6",
40 | "d_next": 5.65
41 | },
42 | {
43 | "idx": 4,
44 | "type": "Spheric",
45 | "r": 16.5,
46 | "(c)": -0.007,
47 | "roc": -135.9,
48 | "(d)": 20.9,
49 | "mat2": "air",
50 | "d_next": 0.15
51 | },
52 | {
53 | "idx": 5,
54 | "type": "Spheric",
55 | "r": 14.4,
56 | "(c)": 0.032,
57 | "roc": 31.73,
58 | "(d)": 21.05,
59 | "mat2": "1.69680/55.5",
60 | "d_next": 3.5
61 | },
62 | {
63 | "idx": 6,
64 | "type": "Spheric",
65 | "r": 14.4,
66 | "(c)": 0.014,
67 | "roc": 69.5,
68 | "(d)": 24.55,
69 | "mat2": "air",
70 | "d_next": 5.79
71 | },
72 | {
73 | "idx": 7,
74 | "type": "Spheric",
75 | "r": 15.0,
76 | "(c)": 0.001,
77 | "roc": 1000.0,
78 | "(d)": 30.34,
79 | "mat2": "1.60342/38.0",
80 | "d_next": 1.0
81 | },
82 | {
83 | "idx": 8,
84 | "type": "Spheric",
85 | "r": 13.5,
86 | "(c)": 0.044,
87 | "roc": 22.74,
88 | "(d)": 31.34,
89 | "mat2": "air",
90 | "d_next": 12.1
91 | },
92 | {
93 | "idx": 9,
94 | "type": "Aperture",
95 | "r": 10.5,
96 | "(d)": 43.44,
97 | "is_square": false,
98 | "diffraction": false,
99 | "d_next": 4.2
100 | },
101 | {
102 | "idx": 10,
103 | "type": "Spheric",
104 | "r": 11.2,
105 | "(c)": -0.037,
106 | "roc": -27.01,
107 | "(d)": 47.64,
108 | "mat2": "1.58144/40.9",
109 | "d_next": 1.0
110 | },
111 | {
112 | "idx": 11,
113 | "type": "Spheric",
114 | "r": 12.5,
115 | "(c)": 0.01,
116 | "roc": 101.88,
117 | "(d)": 48.64,
118 | "mat2": "1.56045/71.6",
119 | "d_next": 4.7
120 | },
121 | {
122 | "idx": 12,
123 | "type": "Spheric",
124 | "r": 12.5,
125 | "(c)": -0.028,
126 | "roc": -35.28,
127 | "(d)": 53.34,
128 | "mat2": "air",
129 | "d_next": 1.85
130 | },
131 | {
132 | "idx": 13,
133 | "type": "Spheric",
134 | "r": 13.0,
135 | "(c)": 0.001,
136 | "roc": 1000.0,
137 | "(d)": 55.19,
138 | "mat2": "1.56045/71.6",
139 | "d_next": 3.05
140 | },
141 | {
142 | "idx": 14,
143 | "type": "Spheric",
144 | "r": 13.0,
145 | "(c)": -0.019,
146 | "roc": -53.2,
147 | "(d)": 58.24,
148 | "mat2": "air",
149 | "d_next": 0.15
150 | },
151 | {
152 | "idx": 15,
153 | "type": "Spheric",
154 | "r": 13.3,
155 | "(c)": 0.008,
156 | "roc": 125.46,
157 | "(d)": 58.39,
158 | "mat2": "1.49700/81.6",
159 | "d_next": 3.0
160 | },
161 | {
162 | "idx": 16,
163 | "type": "Spheric",
164 | "r": 13.3,
165 | "(c)": -0.011,
166 | "roc": -89.7,
167 | "(d)": 61.39,
168 | "mat2": "air",
169 | "d_next": 1.5
170 | },
171 | {
172 | "idx": 17,
173 | "type": "Spheric",
174 | "r": 14.4,
175 | "(c)": 0.0,
176 | "roc": 0.0,
177 | "(d)": 62.89,
178 | "mat2": "1.64000/60.2",
179 | "d_next": 1.2
180 | },
181 | {
182 | "idx": 18,
183 | "type": "Spheric",
184 | "r": 13.0,
185 | "(c)": 0.019,
186 | "roc": 52.82,
187 | "(d)": 64.09,
188 | "mat2": "air",
189 | "d_next": 2.6
190 | },
191 | {
192 | "idx": 19,
193 | "type": "Spheric",
194 | "r": 15.0,
195 | "(c)": 0.001,
196 | "roc": 931.0,
197 | "(d)": 66.69,
198 | "mat2": "1.83481/42.7",
199 | "d_next": 2.35
200 | },
201 | {
202 | "idx": 20,
203 | "type": "Spheric",
204 | "r": 15.0,
205 | "(c)": -0.009,
206 | "roc": -113.6,
207 | "(d)": 69.04,
208 | "mat2": "air",
209 | "d_next": 55.816
210 | }
211 | ]
212 | }
--------------------------------------------------------------------------------
/lenses/camera/sigma70mm_f2.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/sigma70mm_f2.8.png
--------------------------------------------------------------------------------
/lenses/camera/yongnuo_50mm_f1.8.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 48.90925059746878,
3 | "fnum": 1.7575576898499428,
4 | "r_sensor": 21.6,
5 | "d_sensor": 70.22348022460938,
6 | "sensor_size": [
7 | 30.547012947258853,
8 | 30.547012947258853
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Spheric",
13 | "r": 16.34,
14 | "c": 0.02137208729982376,
15 | "roc": 46.79000164893797,
16 | "d": 0.0,
17 | "mat1": "air",
18 | "mat2": "1.68854/59.43",
19 | "d_next": 4.480000019073486
20 | },
21 | {
22 | "type": "Spheric",
23 | "r": 16.34,
24 | "c": 0.0009328357991762459,
25 | "roc": 1072.0000249594457,
26 | "d": 4.480000019073486,
27 | "mat1": "1.68854/59.43",
28 | "mat2": "air",
29 | "d_next": 2.4499998092651367
30 | },
31 | {
32 | "type": "Spheric",
33 | "r": 13.5,
34 | "c": 0.04818812757730484,
35 | "roc": 20.75199951265527,
36 | "d": 6.929999828338623,
37 | "mat1": "air",
38 | "mat2": "1.8061/40.61",
39 | "d_next": 4.349999904632568
40 | },
41 | {
42 | "type": "Spheric",
43 | "r": 12.2,
44 | "c": 0.025728104636073112,
45 | "roc": 38.86800112737066,
46 | "d": 11.279999732971191,
47 | "mat1": "1.8061/40.61",
48 | "mat2": "air",
49 | "d_next": 0.8850002288818359
50 | },
51 | {
52 | "type": "Spheric",
53 | "r": 13.5,
54 | "c": 0.014383315108716488,
55 | "roc": 69.52500118654747,
56 | "d": 12.164999961853027,
57 | "mat1": "air",
58 | "mat2": "1.68893/31.18",
59 | "d_next": 1.3299999237060547
60 | },
61 | {
62 | "type": "Spheric",
63 | "r": 10.95,
64 | "c": 0.06522306054830551,
65 | "roc": 15.33200054694428,
66 | "d": 13.494999885559082,
67 | "mat1": "1.68893/31.18",
68 | "mat2": "air",
69 | "d_next": 6.199999809265137
70 | },
71 | {
72 | "type": "Aperture",
73 | "r": 9.65,
74 | "d": 19.69499969482422,
75 | "is_square": false,
76 | "diffraction": false,
77 | "mat1": "air",
78 | "mat2": "air",
79 | "d_next": 5.450000762939453
80 | },
81 | {
82 | "type": "Spheric",
83 | "r": 10.45,
84 | "c": -0.06146281585097313,
85 | "roc": -16.269999773922937,
86 | "d": 25.145000457763672,
87 | "mat1": "air",
88 | "mat2": "1.67271/32.25",
89 | "d_next": 1.279998779296875
90 | },
91 | {
92 | "type": "Spheric",
93 | "r": 12.48,
94 | "c": 0,
95 | "roc": 0.0,
96 | "d": 26.424999237060547,
97 | "mat1": "1.67271/32.25",
98 | "mat2": "1.7433/49.4",
99 | "d_next": 5.5
100 | },
101 | {
102 | "type": "Spheric",
103 | "r": 12.48,
104 | "c": -0.04748789221048355,
105 | "roc": -21.057999280482644,
106 | "d": 31.924999237060547,
107 | "mat1": "1.7433/49.4",
108 | "mat2": "air",
109 | "d_next": 0.24000167846679688
110 | },
111 | {
112 | "type": "Spheric",
113 | "r": 12.48,
114 | "c": 0.00492426473647356,
115 | "roc": 203.07600291939934,
116 | "d": 32.165000915527344,
117 | "mat1": "air",
118 | "mat2": "1.7433/49.4",
119 | "d_next": 3.1699981689453125
120 | },
121 | {
122 | "type": "Spheric",
123 | "r": 12.48,
124 | "c": -0.02147950790822506,
125 | "roc": -46.55600138851757,
126 | "d": 35.334999084472656,
127 | "mat1": "1.7433/49.4",
128 | "mat2": "air",
129 | "d_next": 34.88848114013672
130 | }
131 | ]
132 | }
--------------------------------------------------------------------------------
/lenses/camera/yongnuo_50mm_f1.8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/camera/yongnuo_50mm_f1.8.png
--------------------------------------------------------------------------------
/lenses/cellphone/3P_blank.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 6.959019600531343,
3 | "fnum": 3.4795098002656717,
4 | "r_sensor": 3.5649999999999995,
5 | "d_sensor": 6.093751907348633,
6 | "sensor_size": [
7 | 5.041671349860083,
8 | 5.041671349860083
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Aperture",
13 | "r": 1.0,
14 | "d": 0.0,
15 | "is_square": false,
16 | "diffraction": false,
17 | "mat1": "air",
18 | "mat2": "air",
19 | "d_next": 0.10000000149011612
20 | },
21 | {
22 | "type": "Aspheric",
23 | "r": 3.0,
24 | "c": 0.0010000000474974513,
25 | "roc": 999.999952502551,
26 | "d": 0.10000000149011612,
27 | "k": 1.0,
28 | "ai": [
29 | 0.0,
30 | 0.0,
31 | 0.0,
32 | 0.0,
33 | 0.0,
34 | 0.0
35 | ],
36 | "mat1": "air",
37 | "mat2": "pmma",
38 | "ai2": 0.0,
39 | "ai4": 0.0,
40 | "ai6": 0.0,
41 | "ai8": 0.0,
42 | "ai10": 0.0,
43 | "ai12": 0.0,
44 | "d_next": 1.0000000223517418
45 | },
46 | {
47 | "type": "Aspheric",
48 | "r": 3.0,
49 | "c": 0.0010000000474974513,
50 | "roc": 999.999952502551,
51 | "d": 1.100000023841858,
52 | "k": 1.0,
53 | "ai": [
54 | 0.0,
55 | 0.0,
56 | 0.0,
57 | 0.0,
58 | 0.0,
59 | 0.0
60 | ],
61 | "mat1": "pmma",
62 | "mat2": "air",
63 | "ai2": 0.0,
64 | "ai4": 0.0,
65 | "ai6": 0.0,
66 | "ai8": 0.0,
67 | "ai10": 0.0,
68 | "ai12": 0.0,
69 | "d_next": 0.9999998807907104
70 | },
71 | {
72 | "type": "Aspheric",
73 | "r": 3.0,
74 | "c": 0.0010000000474974513,
75 | "roc": 999.999952502551,
76 | "d": 2.0999999046325684,
77 | "k": 3.119999885559082,
78 | "ai": [
79 | 0.0,
80 | 0.0,
81 | 0.0,
82 | 0.0,
83 | 0.0,
84 | 0.0
85 | ],
86 | "mat1": "air",
87 | "mat2": "okp4",
88 | "ai2": 0.0,
89 | "ai4": 0.0,
90 | "ai6": 0.0,
91 | "ai8": 0.0,
92 | "ai10": 0.0,
93 | "ai12": 0.0,
94 | "d_next": 1.0
95 | },
96 | {
97 | "type": "Aspheric",
98 | "r": 3.0,
99 | "c": 0.0010000000474974513,
100 | "roc": 999.999952502551,
101 | "d": 3.0999999046325684,
102 | "k": -10.029999732971191,
103 | "ai": [
104 | 0.0,
105 | 0.0,
106 | 0.0,
107 | 0.0,
108 | 0.0,
109 | 0.0
110 | ],
111 | "mat1": "okp4",
112 | "mat2": "air",
113 | "ai2": 0.0,
114 | "ai4": 0.0,
115 | "ai6": 0.0,
116 | "ai8": 0.0,
117 | "ai10": 0.0,
118 | "ai12": 0.0,
119 | "d_next": 1.0
120 | },
121 | {
122 | "type": "Aspheric",
123 | "r": 3.0,
124 | "c": 0.0010000000474974513,
125 | "roc": 999.999952502551,
126 | "d": 4.099999904632568,
127 | "k": 0.6499999761581421,
128 | "ai": [
129 | 0.0,
130 | 0.0,
131 | 0.0,
132 | 0.0,
133 | 0.0,
134 | 0.0
135 | ],
136 | "mat1": "air",
137 | "mat2": "coc",
138 | "ai2": 0.0,
139 | "ai4": 0.0,
140 | "ai6": 0.0,
141 | "ai8": 0.0,
142 | "ai10": 0.0,
143 | "ai12": 0.0,
144 | "d_next": 1.0
145 | },
146 | {
147 | "type": "Aspheric",
148 | "r": 3.0,
149 | "c": 0.0010000000474974513,
150 | "roc": 999.999952502551,
151 | "d": 5.099999904632568,
152 | "k": -2.8499999046325684,
153 | "ai": [
154 | 0.0,
155 | 0.0,
156 | 0.0,
157 | 0.0,
158 | 0.0,
159 | 0.0
160 | ],
161 | "mat1": "coc",
162 | "mat2": "air",
163 | "ai2": 0.0,
164 | "ai4": 0.0,
165 | "ai6": 0.0,
166 | "ai8": 0.0,
167 | "ai10": 0.0,
168 | "ai12": 0.0,
169 | "d_next": 1.4937520027160645
170 | }
171 | ]
172 | }
--------------------------------------------------------------------------------
/lenses/cellphone/cellphone68deg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/cellphone/cellphone68deg.png
--------------------------------------------------------------------------------
/lenses/cellphone/cellphone80deg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/cellphone/cellphone80deg.png
--------------------------------------------------------------------------------
/lenses/cooke.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 49.805628777394965,
3 | "fnum": 4.005140333600394,
4 | "r_sensor": 18.175,
5 | "d_sensor": 60.43902587890625,
6 | "sensor_size": [
7 | 25.703331496131003,
8 | 25.703331496131003
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Spheric",
13 | "r": 7.5,
14 | "c": 0.04540000110864639,
15 | "roc": 22.02643118018671,
16 | "d": 0.0,
17 | "mat1": "air",
18 | "mat2": "n-sk16",
19 | "d_next": 3.259000062942505
20 | },
21 | {
22 | "type": "Spheric",
23 | "r": 7.5,
24 | "c": -0.002300000051036477,
25 | "roc": -434.78259904792515,
26 | "d": 3.259000062942505,
27 | "mat1": "n-sk16",
28 | "mat2": "air",
29 | "d_next": 6.008000135421753
30 | },
31 | {
32 | "type": "Spheric",
33 | "r": 7.5,
34 | "c": -0.04500000178813934,
35 | "roc": -22.222221339190483,
36 | "d": 9.267000198364258,
37 | "mat1": "air",
38 | "mat2": "f2",
39 | "d_next": 1.0
40 | },
41 | {
42 | "type": "Spheric",
43 | "r": 7.5,
44 | "c": 0.049300000071525574,
45 | "roc": 20.28397562980075,
46 | "d": 10.267000198364258,
47 | "mat1": "f2",
48 | "mat2": "air",
49 | "d_next": 2.118000030517578
50 | },
51 | {
52 | "type": "Stop",
53 | "r": 5.0,
54 | "c": 0.0,
55 | "d": 12.385000228881836,
56 | "mat1": "air",
57 | "mat2": "occluder",
58 | "d_next": 2.75
59 | },
60 | {
61 | "type": "Spheric",
62 | "r": 7.5,
63 | "c": 0.012500000186264515,
64 | "roc": 79.99999880790712,
65 | "d": 15.135000228881836,
66 | "mat1": "occluder",
67 | "mat2": "n-sk16",
68 | "d_next": 2.9519996643066406
69 | },
70 | {
71 | "type": "Spheric",
72 | "r": 7.5,
73 | "c": -0.0544000007212162,
74 | "roc": -18.38235269746966,
75 | "d": 18.086999893188477,
76 | "mat1": "n-sk16",
77 | "mat2": "air",
78 | "d_next": 42.35202598571777
79 | }
80 | ]
81 | }
--------------------------------------------------------------------------------
/lenses/cooke.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/cooke.png
--------------------------------------------------------------------------------
/lenses/cooke40_inferior.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 50.53261492842398,
3 | "fnum": 4.269302818821893,
4 | "r_sensor": 18.175,
5 | "d_sensor": 60.294999893188475,
6 | "sensor_size": [
7 | 25.703331496131003,
8 | 25.703331496131003
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Spheric",
13 | "r": 7.5,
14 | "c": 0.040400002151727676,
15 | "roc": 24.752473929193485,
16 | "d": 0.0,
17 | "mat1": "air",
18 | "mat2": "n-sk16",
19 | "d_next": 3.259000062942505
20 | },
21 | {
22 | "type": "Spheric",
23 | "r": 7.5,
24 | "c": -0.0013000000035390258,
25 | "roc": -769.2307671366711,
26 | "d": 3.259000062942505,
27 | "mat1": "n-sk16",
28 | "mat2": "air",
29 | "d_next": 6.008000135421753
30 | },
31 | {
32 | "type": "Spheric",
33 | "r": 7.5,
34 | "c": -0.04500000178813934,
35 | "roc": -22.222221339190483,
36 | "d": 9.267000198364258,
37 | "mat1": "air",
38 | "mat2": "f2",
39 | "d_next": 1.0
40 | },
41 | {
42 | "type": "Spheric",
43 | "r": 7.5,
44 | "c": 0.049300000071525574,
45 | "roc": 20.28397562980075,
46 | "d": 10.267000198364258,
47 | "mat1": "f2",
48 | "mat2": "air",
49 | "d_next": 2.118000030517578
50 | },
51 | {
52 | "type": "Aperture",
53 | "r": 5.0,
54 | "d": 12.385000228881836,
55 | "is_square": false,
56 | "diffraction": false,
57 | "mat1": "air",
58 | "mat2": "air",
59 | "d_next": 2.75
60 | },
61 | {
62 | "type": "Spheric",
63 | "r": 7.5,
64 | "c": 0.002500000176951289,
65 | "roc": 399.9999716877957,
66 | "d": 15.135000228881836,
67 | "mat1": "occluder",
68 | "mat2": "n-sk16",
69 | "d_next": 2.9519996643066406
70 | },
71 | {
72 | "type": "Spheric",
73 | "r": 7.5,
74 | "c": -0.0544000007212162,
75 | "roc": -18.38235269746966,
76 | "d": 18.086999893188477,
77 | "mat1": "n-sk16",
78 | "mat2": "air",
79 | "d_next": 42.208
80 | }
81 | ]
82 | }
--------------------------------------------------------------------------------
/lenses/cooke40_inferior.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/cooke40_inferior.png
--------------------------------------------------------------------------------
/lenses/hybridlens/a489_doe.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "A489 with a DOE",
3 | "foclen": 7.7719,
4 | "fnum": 3.8859,
5 | "r_sensor": 2.1213,
6 | "d_sensor": 11.257,
7 | "(sensor_size)": [
8 | 3.0,
9 | 3.0
10 | ],
11 | "sensor_res": [
12 | 3000,
13 | 3000
14 | ],
15 | "surfaces": [
16 | {
17 | "idx": 1,
18 | "type": "Aperture",
19 | "r": 1.0,
20 | "(d)": 0.0,
21 | "mat2": "air",
22 | "is_square": false,
23 | "diffraction": false,
24 | "d_next": 0.1
25 | },
26 | {
27 | "idx": 2,
28 | "type": "Aspheric",
29 | "r": 6.35,
30 | "(c)": 0.2104,
31 | "roc": 4.7531,
32 | "d": 0.1,
33 | "k": -1.2051,
34 | "ai": [
35 | 0.0,
36 | 0.000533,
37 | 1.12e-05,
38 | -3.75e-07,
39 | -7.63e-09,
40 | 1.36e-10
41 | ],
42 | "mat2": "h-k51",
43 | "(ai2)": 0.0,
44 | "(ai4)": 0.000533,
45 | "(ai6)": 1.12e-05,
46 | "(ai8)": -3.75e-07,
47 | "(ai10)": -7.63e-09,
48 | "(ai12)": 1.36e-10,
49 | "d_next": 7.5
50 | },
51 | {
52 | "idx": 3,
53 | "type": "Spheric",
54 | "r": 6.35,
55 | "(c)": -0.0639,
56 | "roc": -15.65,
57 | "(d)": 7.6,
58 | "mat2": "air",
59 | "d_next": 3.657
60 | }
61 | ],
62 | "DOE": {
63 | "type": "DOE",
64 | "(size)": 3.0,
65 | "d": 8.3098,
66 | "res": [
67 | 3000,
68 | 3000
69 | ],
70 | "fab_ps": 0.001,
71 | "is_square": true,
72 | "param_model": "binary2",
73 | "doe_path": null,
74 | "order2": 0.0,
75 | "order4": 0.0,
76 | "order6": 0.0,
77 | "order8": 0.0
78 | }
79 | }
--------------------------------------------------------------------------------
/lenses/paraxiallens/doelens.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "A paraxial DOE lens system.",
3 | "d_sensor": 50.0,
4 | "sensor_size": [4.0, 4.0],
5 | "sensor_res": [2000, 2000],
6 | "surfaces": [
7 | {
8 | "idx": 1,
9 | "type": "Fresnel",
10 | "size": 4.0,
11 | "res": [4000, 4000],
12 | "fab_ps": 0.001,
13 | "(d)": 1.2,
14 | "is_square": true,
15 | "f0": 50.0,
16 | "wvln0": 0.589,
17 | "d_next": 50.0
18 | }
19 | ]
20 | }
--------------------------------------------------------------------------------
/lenses/paraxiallens/doethinlens.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": "A paraxial hybrid lens system with a diffractive optical element (DOE) and a thin lens.",
3 | "surfaces": [
4 | {
5 | "idx": 1,
6 | "type": "Aperture",
7 | "r": 2.0,
8 | "(d)": 0.0,
9 | "is_square": false,
10 | "diffraction": false,
11 | "d_next": 2.0
12 | },
13 | {
14 | "idx": 2,
15 | "type": "ThinLens",
16 | "foclen": 50.0,
17 | "r": 5.0,
18 | "(d)": 0.2,
19 | "d_next": 2.0
20 | },
21 | {
22 | "idx": 3,
23 | "type": "DOE",
24 | "l": 4.0,
25 | "res": [4000, 4000],
26 | "fab_ps": 0.001,
27 | "(d)": 1.2,
28 | "is_square": true,
29 | "param_model": "pixel2d",
30 | "doe_path": null,
31 | "d_next": 48.0
32 | },
33 | {
34 | "idx": 4,
35 | "type": "Sensor",
36 | "l": 4.0,
37 | "res": [2000, 2000]
38 | }
39 | ]
40 | }
--------------------------------------------------------------------------------
/lenses/readme.md:
--------------------------------------------------------------------------------
1 | # Lens file recources
2 |
3 | Lens data in this folder is collected from the following several resources:
4 |
5 | - https://www.photonstophotos.net/GeneralTopics/Lenses/OpticalBench/OpticalBench.htm
6 | - https://www.lens-designs.com/
--------------------------------------------------------------------------------
/lenses/thorlabs/acl12708u.json:
--------------------------------------------------------------------------------
1 | {
2 | "foclen": 7.508396961682535,
3 | "fnum": 3.7541984808412674,
4 | "r_last": 3.5649999999999995,
5 | "d_sensor": 11.084592819213867,
6 | "sensor_size": [
7 | 5.041671349860083,
8 | 5.041671349860083
9 | ],
10 | "surfaces": [
11 | {
12 | "type": "Stop",
13 | "r": 1.0,
14 | "c": 0.0,
15 | "d": 0.0,
16 | "mat1": "air",
17 | "mat2": "air",
18 | "d_next": 0.0
19 | },
20 | {
21 | "type": "Aspheric",
22 | "r": 6.35,
23 | "c": 0.2103879451751709,
24 | "roc": 4.753124040293235,
25 | "d": 0.0,
26 | "k": -1.205070972442627,
27 | "ai": [
28 | 0.0005332418368197978,
29 | 1.1162886949023232e-05,
30 | -3.7455666301866586e-07,
31 | -7.634201715234212e-09,
32 | 1.3602199921969316e-10
33 | ],
34 | "mat1": "air",
35 | "mat2": "hk51",
36 | "ai2": 0.0005332418368197978,
37 | "ai4": 1.1162886949023232e-05,
38 | "ai6": -3.7455666301866586e-07,
39 | "ai8": -7.634201715234212e-09,
40 | "ai10": 1.3602199921969316e-10,
41 | "d_next": 7.5
42 | },
43 | {
44 | "type": "Spheric",
45 | "r": 6.35,
46 | "c": -0.06390020996332169,
47 | "roc": -15.649400848197425,
48 | "d": 7.5,
49 | "mat1": "hk51",
50 | "mat2": "air",
51 | "d_next": 3.584592819213867
52 | }
53 | ]
54 | }
--------------------------------------------------------------------------------
/lenses/thorlabs/acl12708u.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/thorlabs/acl12708u.png
--------------------------------------------------------------------------------
/lenses/thorlabs/acl12708u_psf20000mm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/thorlabs/acl12708u_psf20000mm.png
--------------------------------------------------------------------------------
/lenses/zemax_double_gaussian.zmx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vccimaging/DeepLens/76c5b827f8bf3a12c6d196c0d73c7d67f50862ac/lenses/zemax_double_gaussian.zmx
--------------------------------------------------------------------------------
/misc/aatdff_bibtex.txt:
--------------------------------------------------------------------------------
1 | @ARTICLE{10209238,
2 | author={Yang, Xinge and Fu, Qiang and Elhoseiny, Mohamed and Heidrich, Wolfgang},
3 | journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
4 | title={Aberration-Aware Depth-From-Focus},
5 | year={2023},
6 | volume={},
7 | number={},
8 | pages={1-11},
9 | keywords={Lenses;Optical imaging;Training;Estimation;Computational modeling;Optical sensors;Integrated optics;Depth from focus;optical aberration;point spread function;ray tracing},
10 | doi={10.1109/TPAMI.2023.3301931}}
11 |
--------------------------------------------------------------------------------
/misc/deeplens_bibtex.txt:
--------------------------------------------------------------------------------
1 | @article{yang2024curriculum,
2 | abstract = {Deep optical optimization has recently emerged as a new paradigm for designing computational imaging systems using only the output image as the objective. However, it has been limited to either simple optical systems consisting of a single element such as a diffractive optical element or metalens, or the fine-tuning of compound lenses from good initial designs. Here we present a DeepLens design method based on curriculum learning, which is able to learn optical designs of compound lenses ab initio from randomly initialized surfaces without human intervention, therefore overcoming the need for a good initial design. We demonstrate the effectiveness of our approach by fully automatically designing both classical imaging lenses and a large field-of-view extended depth-of-field computational lens in a cellphone-style form factor, with highly aspheric surfaces and a short back focal length.},
3 | author = {Yang, Xinge and Fu, Qiang and Heidrich, Wolfgang},
4 | date = {2024/08/03},
5 | date-added = {2024-08-05 07:44:03 -0700},
6 | date-modified = {2024-08-05 07:44:03 -0700},
7 | doi = {10.1038/s41467-024-50835-7},
8 | id = {Yang2024},
9 | isbn = {2041-1723},
10 | journal = {Nature Communications},
11 | number = {1},
12 | pages = {6572},
13 | title = {Curriculum learning for ab initio deep learned refractive optics},
14 | url = {https://doi.org/10.1038/s41467-024-50835-7},
15 | volume = {15},
16 | year = {2024},
17 | bdsk-url-1 = {https://doi.org/10.1038/s41467-024-50835-7}
18 | }
--------------------------------------------------------------------------------
/misc/do_bibtex.txt:
--------------------------------------------------------------------------------
1 | @article{wang2022dO,
2 | title={{dO: A differentiable engine for Deep Lens design of computational imaging systems}},
3 | author={Wang, Congli and Chen, Ni and Heidrich, Wolfgang},
4 | journal={IEEE Transactions on Computational Imaging},
5 | year={2022},
6 | volume={8},
7 | number={},
8 | pages={905-916},
9 | doi={10.1109/TCI.2022.3212837},
10 | publisher={IEEE}
11 | }
--------------------------------------------------------------------------------
/misc/fluidiclens_bibtext.txt:
--------------------------------------------------------------------------------
1 | @inproceedings{10.1145/3680528.3687584,
2 | author = {Na, Mulun and Jimenez Romero, Hector A. and Yang, Xinge and Klein, Jonathan and Michels, Dominik L. and Heidrich, Wolfgang},
3 | title = {End-to-end Optimization of Fluidic Lenses},
4 | year = {2024},
5 | isbn = {9798400711312},
6 | publisher = {Association for Computing Machinery},
7 | address = {New York, NY, USA},
8 | url = {https://doi.org/10.1145/3680528.3687584},
9 | doi = {10.1145/3680528.3687584},
10 | abstract = {Prototyping and small volume production of custom imaging-grade lenses is difficult and expensive, especially for more complex aspherical shapes. Fluidic shaping has recently been proposed as a potential solution: It makes use of the atomic level smoothness of interfaces between liquids, where the shape of the interface can be carefully controlled by boundary conditions, buoyancy control and other physical parameters. If one of the liquids is a resin, its shape can be “frozen” by curing, thus creating a solid optical element. While fluidic shaping is a promising avenue, the shape space generated by this method is currently only described in the form of partial differential equations, which are incompatible with existing lens design processes. Moreover, we show that the existing PDEs are inaccurate for larger curvatures. In this work, we develop a new formulation of the shape space lenses generated by the fluidic shaping technique. It overcomes the inaccuracies of previous models, and, through a differentiable implementation, can be integrated into recent end-to-end optical design pipelines based on differentiable ray tracing. We extensively evaluate the model and the design pipeline with simulations, as well as initial physical prototypes.},
11 | booktitle = {SIGGRAPH Asia 2024 Conference Papers},
12 | articleno = {55},
13 | numpages = {10},
14 | keywords = {Lens Design, End-to-End Optimization, Fluid Dynamics, Reconstruction Network},
15 | location = {
16 | },
17 | series = {SA '24}
18 | }
--------------------------------------------------------------------------------
/misc/hybridlens_bibtex.txt:
--------------------------------------------------------------------------------
1 | @article{yang2024end,
2 | title={End-to-End Hybrid Refractive-Diffractive Lens Design with Differentiable Ray-Wave Model},
3 | author={Yang, Xinge and Souza, Matheus and Wang, Kunyi and Chakravarthula, Praneeth and Fu, Qiang and Heidrich, Wolfgang},
4 | journal={arXiv preprint arXiv:2406.00834},
5 | year={2024}
6 | }
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | setup(name='deeplens', version='1.0', packages=find_packages())
3 | from setuptools import setup, find_packages
4 |
5 | setup(
6 | name='deeplens',
7 | version='1.0',
8 | author='Xinge Yang',
9 | author_email='xinge.yang@kaust.edu.sa',
10 | description='DeepLens: a differentiable ray tracer for computational lens design.',
11 | long_description=open('README.md').read(),
12 | long_description_content_type='text/markdown',
13 | url='https://github.com/singer-yang/DeepLens',
14 | packages=find_packages(),
15 | install_requires=[
16 | 'opencv-python',
17 | 'matplotlib',
18 | 'scikit-image',
19 | 'h5py',
20 | 'transformers',
21 | 'lpips',
22 | 'einops',
23 | 'timm',
24 | 'tqdm'
25 | ],
26 | classifiers=[
27 | 'License :: CC-BY-4.0 License',
28 | 'Programming Language :: Python :: 3',
29 | 'Programming Language :: Python :: 3.6',
30 | 'Programming Language :: Python :: 3.7',
31 | 'Programming Language :: Python :: 3.8',
32 | 'Programming Language :: Python :: 3.9',
33 | ],
34 | python_requires='>=3.6',
35 | )
36 |
--------------------------------------------------------------------------------
/visualization_demo.py:
--------------------------------------------------------------------------------
1 | from deeplens import GeoLens
2 |
3 | import os
4 | import numpy as np
5 | import torch
6 | import matplotlib.pyplot as plt
7 |
8 | import pyvista as pv
9 | import pyvistaqt as pvqt
10 |
11 | from deeplens.view_3d import *
12 |
13 | R = np.array([255,0,0])
14 | G = np.array([0,255,0])
15 | B = np.array([0,0,255])
16 | SAVE_DIR = "./visualization"
17 | if not os.path.exists(SAVE_DIR):
18 | os.mkdir(SAVE_DIR)
19 |
20 | lens_config = os.path.relpath(
21 | "./lenses/cellphone/cellphone68deg.json"
22 | )
23 |
24 | lens = GeoLens(lens_config)
25 |
26 | lens.draw_layout(os.path.join(SAVE_DIR, "lens_2dlayout.png")),
27 | plotter = pvqt.BackgroundPlotter()
28 |
29 | hfov = lens.hfov
30 |
31 | draw_lens_3D(plotter,lens,
32 | fovs= [0., hfov*0.99 * 57.296],
33 | fov_phis = [45., 135., 225., 315.],
34 | is_show_bridge=False,
35 | save_dir=SAVE_DIR,)
36 |
37 | plotter.show()
38 | plotter.app.exec_()
--------------------------------------------------------------------------------