├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── README_RawDataInfo.md ├── bimodal2unimodal_hist_img.py ├── depth_decoding.py ├── download_raw_histogram_data.py ├── download_raw_timestamp_data.py ├── environment.yml ├── hist2timestamps.py ├── io_dirpaths.json ├── pileup_correction.py ├── preprocess_per_scene_irf.py ├── preprocess_raw_hist_img.py ├── process_hist_img.py ├── read_fullscan_hydraharp_t3.py ├── read_hydraharp_outfile_t3.py ├── read_positions_file.py ├── research_utils ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── environment.yml ├── improc_ops.py ├── io_ops.py ├── np_utils.py ├── plot_utils.py ├── py_utils.py ├── scipy_utils.py ├── shared_constants.py ├── signalproc_ops.py ├── signalproc_ops_2D.py ├── tests │ └── test_signalproc_ops_2D.py ├── timer.py ├── torch_datasets.py └── torch_utils.py ├── scan_data_utils.py └── scan_params.json /.gitignore: -------------------------------------------------------------------------------- 1 | ## 2 | 3 | scan_data_scripts_old/ 4 | data_raw_timestamps/ 5 | data_raw_histograms/ 6 | preprocessed_hist_imgs/ 7 | system_irf/ 8 | 9 | ## Auto-generated 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | pip-wheel-metadata/ 33 | share/python-wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | MANIFEST 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .nox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *.cover 59 | *.py,cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | .python-version 95 | 96 | # pipenv 97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 100 | # install all needed dependencies. 101 | #Pipfile.lock 102 | 103 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 104 | __pypackages__/ 105 | 106 | # Celery stuff 107 | celerybeat-schedule 108 | celerybeat.pid 109 | 110 | # SageMath parsed files 111 | *.sage.py 112 | 113 | # Environments 114 | .env 115 | .venv 116 | env/ 117 | venv/ 118 | ENV/ 119 | env.bak/ 120 | venv.bak/ 121 | 122 | # Spyder project settings 123 | .spyderproject 124 | .spyproject 125 | 126 | # Rope project settings 127 | .ropeproject 128 | 129 | # mkdocs documentation 130 | /site 131 | 132 | # mypy 133 | .mypy_cache/ 134 | .dmypy.json 135 | dmypy.json 136 | 137 | # Pyre type checker 138 | .pyre/ 139 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/felipegb94/WISC-SinglePhoton3DData/e0102416ffca8bfa730179a6a1f83a6de9240fc6/.gitmodules -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Felipe Gutierrez-Barragan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # WISC-SinglePhoton3DData 2 | 3 | Raw single-photon timestamp data processing scripts for data obtained from a Hydrahard TCSPC. The data was captured in an experimental scanning single-photon LiDAR prototype built at UW-Madison by the [Computational Optics](http://compoptics.wisc.edu/) and [WISION](https://wisionlab.cs.wisc.edu/) groups. 4 | 5 | - [WISC-SinglePhoton3DData](#wisc-singlephoton3ddata) 6 | - [Setup](#setup) 7 | - [Step 1: Clone repository](#step-1-clone-repository) 8 | - [Step 2: Setup Python Environment](#step-2-setup-python-environment) 9 | - [Step 3: Edit Variables in `io_dirpath.json`](#step-3-edit-variables-in-io_dirpathjson) 10 | - [Raw Timestamp Data](#raw-timestamp-data) 11 | - [Computing Histograms from Raw Timestamp Data](#computing-histograms-from-raw-timestamp-data) 12 | - [Raw 3D Histogram Image Data](#raw-3d-histogram-image-data) 13 | - [Pre-processing Raw Histograms](#pre-processing-raw-histograms) 14 | - [Extracting IRF for Depth Estimation](#extracting-irf-for-depth-estimation) 15 | - [Estimating depths](#estimating-depths) 16 | - [Additional Code and Scripts](#additional-code-and-scripts) 17 | - [Citation and Reference](#citation-and-reference) 18 | 19 | ## Setup 20 | ### Step 1: Clone repository 21 | 22 | ``` 23 | git clone git@github.com:felipegb94/WISC-SinglePhoton3DData.git 24 | ``` 25 | ### Step 2: Setup Python Environment 26 | 27 | The code is tested on Python 3.6. You can setup a conda environment using the `environment.yml` file here or follow these steps: 28 | 29 | 1. Create environment: `conda create --name SP3DDEnv python=3.6` 30 | 2. Activate environment: `conda activate SP3DDEnv` 31 | 3. Install packages: `conda install numpy scipy matplotlib ipython gdown` 32 | 4. Install `rarfile` needed to unpack the `.rar` data files after download: `pip install rarfile` 33 | 34 | ### Step 3: Edit Variables in `io_dirpath.json` 35 | 36 | Edit `io_dirpaths.json` file to set the `*_dirpath` variables to where you want data the data downloaded and also where the intermediate results should be saved. By default they are all saved within this folder (and ignored by git). 37 | ## Raw Timestamp Data 38 | 39 | **Note:** Instead of the raw timestamp data you can download the raw histogram images obtained by loading the timestamps and assembling them into a single 3D histogram image. If you prefer to directly deal with the histogram images skip to the next section 40 | 41 | You can download the raw timestamp data using the `download_raw_timestamp_data.py` script. This script only downloads a single scan at a time. Each scan file is between 1-8GB in size. To change the scan that is downloaded edit the `scene_id` variable inside `download_raw_timestamp_data.py`. 42 | 43 | For more information about the data folder content that is downloaded for each scan see `README_RawDataInfo.md`. 44 | 45 | ### Computing Histograms from Raw Timestamp Data 46 | 47 | The scripts `read_hydrahard_outfile_t3.py` and `read_fullscan_hydrahard_t3.py`. 48 | 49 | 1. `read_hydrahard_outfile_t3.py`: Selects a single file from the raw timestamp data and loads it and creates a histogram from it. Each file has the timestamps for a single point in space. This is a good file to look at to become familiar to loading and generating the histograms from the timestamps. 50 | 2. `read_fullscan_hydrahard_t3.py`: Reads all the timestamp file for each point in the scan, builds histograms, and reshapes into a 3D histogram image. The output image is saved. 51 | 52 | ## Raw 3D Histogram Image Data 53 | 54 | You can download the raw histogram images using the `download_raw_histogram_data.py` script. This script only downloads a single scan at a time. Each scan file is between 100MB-1GB in size. To change the scan that is downloaded edit the `scene_id` variable inside `download_raw_histogram_data.py`. 55 | 56 | ### Pre-processing Raw Histograms 57 | 58 | The script `preprocess_raw_hist_img.py` preprocesses the raw histograms (crops them and shifts them), and saves them in the folder specified by the `preprocessed_hist_data_base_dirpath` variable in `io_dirpaths.json` 59 | 60 | The histogram cropping and shift parameters can be set in `scan_params.json` under the `hist_preprocessing_params`. The shift parameter can be particularly important for pile-up correction on data acquried in synchronous mode. 61 | 62 | ### Extracting IRF for Depth Estimation 63 | 64 | To estimate depths with a match filtering algorithm we want to extract the IRF of the system. For each captured scene the alignment of the system may have changed slightly which can change the overall IRF. *Therefore, we extract the IRF for each scene individually from a high SNR point*. 65 | 66 | The script `preprocess_per_scene_irf.py` extracts the scene IRF and saves it. Before running this script for a given scan you need to have ran the `preprocess_raw_hist_img.py` script. 67 | 68 | **Important Note:** For data acquired in synchronous mode, i.e., that has pile-up, you may need to perform pile-up correction before extracting the IRF. 69 | 70 | ### Estimating depths 71 | 72 | There are two simple methods to estimate depths from the pre-processed histograms: 73 | 74 | 1. *Argmax*: Take the argmax across the time dimension 75 | 1. *matchfilt*: Use the previously extracted IRF and estimate depths using matched filtering. The script `process_hist_img.py` shows an example of how this can be done. Note that before running this script for a given scene you need to have ran `preprocess_raw_hist_img.py` and also `preprocess_per_scene_irf.py` for the given scene. 76 | 77 | ## Additional Code and Scripts 78 | 79 | Here are descriptions for the code files provided: 80 | 81 | * `pileup_correction`: Pile-up correction algorithms for timestamp data obtained in synchronous and in free running mode. The free running mode does not change the histograms too much. For the synchronous mode you need to make sure that the histogram is correctly shifted (0th time bin is actually the early time bins). 82 | * `scan_data_utils.py` and `research_utils/`: Some utility functions used by the scripts here. 83 | * `hist2timestamps.py`: Take the histogram and convert it back to individual timestamps. 84 | * `depth_decoding.py`: Depth estimation for coarse and full-resolution histograms. 85 | * `bimodal2unimodal_hist_img.py`: For some the free-running mode scene (face and deer) there is a bi-modal IRF due to inter-reflections. As long as the IRF is bi-modal, then we can estimate depths effectively here with match filtering. However, if we want to transform the data to be solely unimodal signals, this script can do that. 86 | 87 | ## Citation and Reference 88 | 89 | The code and data in this repository comes from 3 different projects. 90 | 91 | * Initial Code + Data for Optimal attenuation results: 92 | 93 | ```latex 94 | @inproceedings{gupta2019photon, 95 | title={Photon-flooded single-photon 3D cameras}, 96 | author={Gupta, Anant and Ingle, Atul and Velten, Andreas and Gupta, Mohit}, 97 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 98 | pages={6770--6779}, 99 | year={2019} 100 | } 101 | ``` 102 | 103 | * Initial Code + Data for asynchronous acquisition results: 104 | 105 | ```latex 106 | @inproceedings{gupta2019asynchronous, 107 | title={Asynchronous single-photon 3D imaging}, 108 | author={Gupta, Anant and Ingle, Atul and Gupta, Mohit}, 109 | booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, 110 | pages={7909--7918}, 111 | year={2019} 112 | } 113 | ``` 114 | 115 | * Current version of the code was implemented for: 116 | 117 | ```latex 118 | @inproceedings{gutierrez2022compressive, 119 | title={Compressive Single-Photon 3D Cameras}, 120 | author={Gutierrez-Barragan, Felipe and Ingle, Atul and Seets, Trevor and Gupta, Mohit and Velten, Andreas}, 121 | booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, 122 | year={2022} 123 | } 124 | ``` 125 | -------------------------------------------------------------------------------- /README_RawDataInfo.md: -------------------------------------------------------------------------------- 1 | # Information about raw data downloaded 2 | 3 | Each data folder downloaded will have one or more folders in it that describe the operating mode or amount of filtering used to mitigate pile-up. 4 | 5 | The folder naming convention is as follow: 6 | 7 | * `free`: Free running mode == Photon-driven mode, as decribed in Gupta et al., ICCV 2019 8 | * `det`: Deterministic shifting == Uniform shifting mode, as decribed in Gupta et al., ICCV 2019 9 | * `ext`: External triggering (by laser). Synchronous mode. No attenuation 10 | * `ext_opt_filtering`: External triggering (by laser). Synchronous mode. Optimal Filterning 11 | * `ext_5%`: External triggering (by laser). Synchronous mode. Filtering 95% of the light (extreme filtering) 12 | * `pulse_waveform_calib`: Single point scan for calibrating waveform. Some waveforms still had undesired reflections. 13 | 14 | Inside the `notes.txt` of each scan you may find additional info on acquisition parameters. 15 | 16 | ## Scan Data Scene IDs 17 | 18 | There are scans from Gupta et al., CVPR 2019 and from Gupta et al., ICCV 2019. As you will obsserve in the `download_data.py` script you can specify the `scene_id` you want to download. Here we list all the scene ids you can select, and from which paper those scenes correspond to. 19 | 20 | ### Raw data from scans from Gupta et al., CVPR 2019 21 | 22 | * `20181105_face`: Scan of a face with two levels of filtering. 23 | * `20181112_blocks`: Scan of a block structure with multiple levels of filtering. 24 | 25 | ### Raw data from scans from Gupta et al., ICCV 2019 26 | 27 | * `20190207_face_scanning_low_mu`: Scan of a manequin face with a flat wall background. 28 | * `20190209_deer_high_mu`: Scan of a reindeer wooden structure. 29 | 30 | -------------------------------------------------------------------------------- /bimodal2unimodal_hist_img.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Take the histogram images with bimodal signal, and turn them into unimodal signal 3 | The bimodal signal observed in our data is due to imperfect alignment and it is not very common. 4 | ''' 5 | #### Standard Library Imports 6 | import os 7 | import sys 8 | sys.path.append('./tof-lib') 9 | 10 | #### Library imports 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | from scipy.ndimage import gaussian_filter 14 | from IPython.core import debugger 15 | breakpoint = debugger.set_trace 16 | 17 | #### Local imports 18 | from scan_data_utils import * 19 | from research_utils.timer import Timer 20 | from research_utils.plot_utils import * 21 | from depth_decoding import IdentityCoding 22 | from research_utils.io_ops import load_json 23 | 24 | def bimodal2unimodal_crop_inplace(bimodal_hist, unimodal_hist, first_pulse_start_idx, pulse_len, second_pulse_offset): 25 | ''' 26 | In-place bimodal2unimodal crop operation. Crops second peak from bimodal, and then stitches the two remaining arrays together. 27 | ''' 28 | assert(bimodal_hist.ndim==1), "Only works for 1 dim hist" 29 | assert(unimodal_hist.ndim==1), "Only works for 1 dim hist" 30 | nt = bimodal_hist.shape[-1] 31 | unimodal_nt = nt - pulse_len 32 | assert(unimodal_hist.shape[-1] == unimodal_nt), "unimodal_hist should have dims of bimodal_hist.size - pulse_len" 33 | second_pulse_start_idx = (first_pulse_start_idx + second_pulse_offset) % nt 34 | second_pulse_end_idx = (second_pulse_start_idx + pulse_len) % nt 35 | try: 36 | if((first_pulse_start_idx < second_pulse_start_idx) and (first_pulse_start_idx < second_pulse_end_idx)): 37 | unimodal_hist[0:second_pulse_start_idx] = bimodal_hist[0:second_pulse_start_idx] 38 | unimodal_hist[second_pulse_start_idx:] = bimodal_hist[second_pulse_end_idx:] 39 | elif((first_pulse_start_idx < second_pulse_start_idx)): 40 | unimodal_hist[:] = bimodal_hist[second_pulse_end_idx:second_pulse_start_idx] 41 | else: 42 | unimodal_hist[0:second_pulse_start_idx] = bimodal_hist[0:second_pulse_start_idx] 43 | unimodal_hist[second_pulse_start_idx:] = bimodal_hist[second_pulse_end_idx:] 44 | except: 45 | print("Something went wrong. Check logic...") 46 | unimodal_hist[:] = np.nan 47 | return unimodal_hist 48 | 49 | def bimodal2unimodal_crop(bimodal_hist, first_pulse_start_idx, pulse_len, second_pulse_offset): 50 | ''' 51 | bimodal2unimodal crop operation. Crops second peak from bimodal, and then stitches the two remaining arrays together. 52 | Returns new array 53 | ''' 54 | nt = bimodal_hist.shape[-1] 55 | unimodal_nt = nt - pulse_len 56 | assert(unimodal_nt > 0), "pulse_len cant be larger than nt" 57 | unimodal_hist = np.zeros((unimodal_nt,), dtype=bimodal_hist.dtype) 58 | bimodal2unimodal_crop_inplace(bimodal_hist, unimodal_hist, first_pulse_start_idx, pulse_len, second_pulse_offset) 59 | return unimodal_hist 60 | 61 | 62 | if __name__=='__main__': 63 | 64 | ## Load parameters shared by all 65 | scan_data_params = load_json('scan_params.json') 66 | io_dirpaths = load_json('io_dirpaths.json') 67 | hist_img_base_dirpath = io_dirpaths["preprocessed_hist_data_base_dirpath"] 68 | 69 | ## Load processed scene: 70 | # scene_id = '20190209_deer_high_mu/free' 71 | scene_id = '20190207_face_scanning_low_mu/free' 72 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 73 | assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id) 74 | hist_dirpath = os.path.join(hist_img_base_dirpath, scene_id) 75 | 76 | ## Histogram image params 77 | downsamp_factor = 1 # Spatial downsample factor 78 | hist_tbin_factor = 1.0 # increase tbin size to make histogramming faster 79 | n_rows_fullres = scan_data_params['scene_params'][scene_id]['n_rows_fullres'] 80 | n_cols_fullres = scan_data_params['scene_params'][scene_id]['n_cols_fullres'] 81 | (nr, nc) = (n_rows_fullres // downsamp_factor, n_cols_fullres // downsamp_factor) # dims for face_scanning scene 82 | min_tbin_size = scan_data_params['min_tbin_size'] # Bin size in ps 83 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 84 | 85 | ## Load histogram image 86 | hist_img_tau = scan_data_params['hist_preprocessing_params']['hist_end_time'] - scan_data_params['hist_preprocessing_params']['hist_start_time'] 87 | hist_img_fname = get_hist_img_fname(nr, nc, hist_tbin_size, hist_img_tau) 88 | hist_img_fpath = os.path.join(hist_dirpath, hist_img_fname) 89 | hist_img = np.load(hist_img_fpath) 90 | nt = hist_img.shape[-1] 91 | (tbins, tbin_edges) = get_hist_bins(hist_img_tau, hist_tbin_size) 92 | 93 | ## Load IRF 94 | irf_tres = scan_data_params['min_tbin_size'] # in picosecs 95 | irf = get_scene_irf(scene_id, nt, tlen=hist_img_tau, is_unimodal=False) 96 | 97 | ## Load uni-modal IRF 98 | unimodal_nt = get_unimodal_nt(nt, scan_data_params['irf_params']['pulse_len'], hist_tbin_size) 99 | unimodal_hist_tau = unimodal_nt*hist_tbin_size 100 | unimodal_irf = get_scene_irf(scene_id, nt, tlen=hist_img_tau, is_unimodal=True) 101 | 102 | ## reconstruct depths with irf 103 | coding_obj = IdentityCoding(nt, h_irf=irf, account_irf=True) 104 | 105 | ## Generate uni-modal hist image 106 | pulse_len = time2bin(scan_data_params['irf_params']['pulse_len'], irf_tres) 107 | second_pulse_offset = time2bin(scan_data_params['irf_params']['second_pulse_offset'], irf_tres) 108 | unimodal_hist_img = np.zeros((nr,nc,unimodal_nt)).astype(hist_img.dtype) 109 | denoised_hist_img = gaussian_filter(hist_img, sigma=0.75, mode='wrap', truncate=1) 110 | accurate_shifts = coding_obj.max_peak_decoding(denoised_hist_img, rec_algo_id='matchfilt').squeeze() 111 | for i in range(nr): 112 | for j in range(nc): 113 | first_pulse_start_idx = accurate_shifts[i,j] 114 | bimodal2unimodal_crop_inplace(hist_img[i,j], unimodal_hist_img[i,j], first_pulse_start_idx, pulse_len, second_pulse_offset) 115 | 116 | ## Save output unimodal hist img 117 | unimodal_hist_img_fname = get_hist_img_fname(nr, nc, hist_tbin_size, unimodal_hist_tau, is_unimodal=True) 118 | unimodal_hist_img_fpath = os.path.join(hist_dirpath, unimodal_hist_img_fname) 119 | np.save(unimodal_hist_img_fpath, unimodal_hist_img) 120 | 121 | -------------------------------------------------------------------------------- /depth_decoding.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: Felipe Gutierrez-Barragan 3 | 4 | Depth estimations for coarse (gated) and full-resolution histograms 5 | 6 | ''' 7 | ## Standard Library Imports 8 | from abc import ABC, abstractmethod 9 | 10 | ## Library Imports 11 | import numpy as np 12 | from scipy import interpolate 13 | from IPython.core import debugger 14 | breakpoint = debugger.set_trace 15 | 16 | ## Local Imports 17 | from research_utils.shared_constants import * 18 | from research_utils import signalproc_ops, np_utils, py_utils 19 | 20 | 21 | def norm_t(C, axis=-1): 22 | ''' 23 | Divide by standard deviation across given axis 24 | ''' 25 | return C / (np.linalg.norm(C, ord=2, axis=axis, keepdims=True) + EPSILON) 26 | 27 | def zero_norm_t(C, axis=-1): 28 | ''' 29 | Apply zero norm transform to give axis 30 | This performs exactly the same as the old zero_norm_t_old, but in the old version denominator is scale by a factor of (1/sqrt(K)) which is part of the standard deviation formula 31 | ''' 32 | return norm_t(C - C.mean(axis=axis, keepdims=True), axis=axis) 33 | 34 | class Coding(ABC): 35 | ''' 36 | Abstract class for linear coding 37 | ''' 38 | C = None 39 | h_irf = None 40 | def __init__(self, h_irf=None, account_irf=False): 41 | # Set the coding matrix C if it has not been set yet 42 | if(self.C is None): self.set_coding_mat() 43 | # 44 | (self.n_maxres, self.n_codes) = (self.C.shape[-2], self.C.shape[-1]) 45 | # Set the impulse response function (used for accounting for system band-limit and match filter reconstruction) 46 | self.update_irf(h_irf) 47 | # the account_irf flag controls if we want to account IRF when estimating shifts. 48 | # This means that the C matrices used during decoding may be different from the encoding one 49 | self.account_irf = account_irf 50 | # Update all the parameters derived from C 51 | self.update_C_derived_params() 52 | # Get all functions for reconstruction and decoding available 53 | self.rec_algos_avail = py_utils.get_obj_functions(self, filter_str='reconstruction') 54 | # Set if we want to account for IRF when decoding 55 | 56 | @abstractmethod 57 | def set_coding_mat(self): 58 | ''' 59 | This method initializes the coding matrix, self.C 60 | ''' 61 | pass 62 | 63 | def update_irf(self, h_irf=None): 64 | # If nothing is given set to gaussian 65 | if(h_irf is None): 66 | print("hirf is NONE") 67 | self.h_irf = np.zeros((self.n_maxres,)) 68 | self.h_irf[0] = 1. 69 | else: self.h_irf = h_irf.squeeze() 70 | assert(self.h_irf.ndim == 1), "irf should be 1 dim vector" 71 | assert(self.h_irf.shape[-1] == self.n_maxres), "irf size should match n_maxres" 72 | assert(np.all(self.h_irf >= 0.)), "irf should be non-negative" 73 | # normalize 74 | self.h_irf = self.h_irf / self.h_irf.sum() 75 | 76 | def update_C(self, C=None): 77 | if(not (C is None)): self.C = C 78 | # update any C derived params 79 | self.update_C_derived_params() 80 | 81 | def update_C_derived_params(self): 82 | # Store how many codes there are 83 | (self.n_maxres, self.n_codes) = (self.C.shape[-2], self.C.shape[-1]) 84 | assert(self.n_codes <= self.n_maxres), "n_codes ({}) should not be larger than n_maxres ({})".format(self.n_codes, self.n_maxres) 85 | if(self.account_irf): 86 | # self.decoding_C = signalproc_ops.circular_conv(self.C, self.h_irf[:, np.newaxis], axis=0) 87 | # self.decoding_C = signalproc_ops.circular_corr(self.C, self.h_irf[:, np.newaxis], axis=0) 88 | self.decoding_C = signalproc_ops.circular_corr(self.h_irf[:, np.newaxis], self.C, axis=0) 89 | else: 90 | self.decoding_C = self.C 91 | # Pre-initialize some useful variables 92 | self.zero_norm_C = zero_norm_t(self.decoding_C) 93 | self.norm_C = norm_t(self.decoding_C) 94 | # Set domains 95 | self.domain = np.arange(0, self.n_maxres)*(TWOPI / self.n_maxres) 96 | 97 | def get_n_maxres(self): return self.n_maxres 98 | 99 | def get_domain(self): return self.domain 100 | 101 | def get_input_C(self, input_C=None): 102 | if(input_C is None): input_C = self.C 103 | self.verify_input_c_vec(input_C) # Last dim should be the codes 104 | return input_C 105 | 106 | def get_input_zn_C(self, zn_input_C=None): 107 | if(zn_input_C is None): zn_input_C = self.zero_norm_C 108 | self.verify_input_c_vec(zn_input_C) # Last dim should be the codes 109 | return zn_input_C 110 | 111 | def get_input_norm_C(self, norm_input_C=None): 112 | if(norm_input_C is None): norm_input_C = self.norm_C 113 | self.verify_input_c_vec(norm_input_C) # Last dim should be the codes 114 | return norm_input_C 115 | 116 | def encode(self, transient_img): 117 | ''' 118 | Encode the transient image using the n_codes inside the self.C matrix 119 | ''' 120 | assert(transient_img.shape[-1] == self.n_maxres), "Input c_vec does not have the correct dimensions" 121 | return np.matmul(transient_img[..., np.newaxis, :], self.C).squeeze(-2) 122 | 123 | def verify_input_c_vec(self, c_vec): 124 | assert(c_vec.shape[-1] == self.n_codes), "Input c_vec does not have the correct dimensions" 125 | 126 | def get_rec_algo_func(self, rec_algo_id): 127 | # Check if rec algorithm exists 128 | rec_algo_func_name = '{}_reconstruction'.format(rec_algo_id) 129 | rec_algo_function = getattr(self, rec_algo_func_name, None) 130 | assert(rec_algo_function is not None), "Reconstruction algorithm {} is NOT available. Please choose from the following algos: {}".format(rec_algo_func_name, self.rec_algos_avail) 131 | # # Apply rec algo 132 | # print("Running reconstruction algorithm {}".format(rec_algo_func_name)) 133 | return rec_algo_function 134 | 135 | def reconstruction(self, c_vec, rec_algo_id='linear', **kwargs): 136 | rec_algo_function = self.get_rec_algo_func(rec_algo_id) 137 | lookup = rec_algo_function(c_vec, **kwargs) 138 | return lookup 139 | 140 | def max_peak_decoding(self, c_vec, rec_algo_id='linear', **kwargs): 141 | ''' 142 | Perform max peak decoding using the specified reconstruction algorithm 143 | kwargs (key-work arguments) will depend on the chosen reconstruction algorithm 144 | ''' 145 | lookup = self.reconstruction(c_vec, rec_algo_id, **kwargs) 146 | return np.argmax(lookup, axis=-1) 147 | 148 | def maxgauss_peak_decoding(self, c_vec, gauss_sigma, rec_algo_id='linear', **kwargs): 149 | lookup = self.reconstruction(c_vec, rec_algo_id, **kwargs) 150 | return signalproc_ops.max_gaussian_center_of_mass_mle(lookup, sigma_tbins = gauss_sigma) 151 | 152 | class GatedCoding(Coding): 153 | ''' 154 | Gated coding class. Coding is applied like a gated camera 155 | In the extreme case that we have a gate for every time bin then the C matrix is an (n_maxres x n_maxres) identity matrix 156 | ''' 157 | def __init__(self, n_maxres, n_gates=None, **kwargs): 158 | if(n_gates is None): n_gates = n_maxres 159 | assert((n_maxres % n_gates) == 0), "Right now GatedCoding required n_maxres to be divisible by n_gates" 160 | assert((n_maxres >= n_gates)), "n_gates should always be smaller than n_maxres" 161 | self.n_gates = n_gates 162 | self.set_coding_mat(n_maxres, n_gates) 163 | super().__init__(**kwargs) 164 | 165 | def set_coding_mat(self, n_maxres, n_gates): 166 | self.gate_len = int(n_maxres / n_gates) 167 | self.C = np.zeros((n_maxres, n_gates)) 168 | for i in range(n_gates): 169 | start_tbin = i*self.gate_len 170 | end_tbin = start_tbin + self.gate_len 171 | self.C[start_tbin:end_tbin, i] = 1. 172 | 173 | def encode(self, transient_img): 174 | ''' 175 | Encode the transient image using the n_codes inside the self.C matrix 176 | For GatedCoding with many n_gates, encoding through matmul is quite slow, so we assign it differently 177 | ''' 178 | assert(transient_img.shape[-1] == self.n_maxres), "Input c_vec does not have the correct dimensions" 179 | c_vals = np.array(transient_img[..., 0::self.gate_len]) 180 | for i in range(self.gate_len-1): 181 | start_idx = i+1 182 | c_vals += transient_img[..., start_idx::self.gate_len] 183 | return c_vals 184 | 185 | def linear_reconstruction(self, c_vals): 186 | if(self.n_gates == self.n_maxres): return c_vals 187 | if(self.account_irf): 188 | print("Warning: Linear Reconstruction in Gated does not account for IRF, so unless the IRF spreads across time bins, this will produce quantized depths") 189 | x_fullres = np.arange(0, self.n_maxres) 190 | # Create a circular x axis by concatenating the first element to the end and the last element to the begining 191 | circular_x_lres = np.arange((0.5*self.gate_len)-0.5-self.gate_len, self.n_maxres + self.gate_len, self.gate_len) 192 | circular_c_vals = np.concatenate((c_vals[..., -1][...,np.newaxis], c_vals, c_vals[..., 0][...,np.newaxis]), axis=-1) 193 | f = interpolate.interp1d(circular_x_lres, circular_c_vals, axis=-1, kind='linear') 194 | return f(x_fullres) 195 | 196 | def matchfilt_reconstruction(self, c_vals): 197 | template = self.h_irf 198 | self.verify_input_c_vec(c_vals) 199 | zn_template = zero_norm_t(template, axis=-1) 200 | zn_c_vals = zero_norm_t(c_vals, axis=-1) 201 | shifts = signalproc_ops.circular_matched_filter(zn_c_vals, zn_template) 202 | # vectorize tensors 203 | (c_vals, c_vals_shape) = np_utils.vectorize_tensor(c_vals, axis=-1) 204 | shifts = shifts.reshape((c_vals.shape[0],)) 205 | h_rec = np.zeros(c_vals.shape, dtype=template.dtype) 206 | for i in range(shifts.size): h_rec[i,:] = np.roll(template, shift=shifts[i], axis=-1) 207 | c_vals = c_vals.reshape(c_vals_shape) 208 | return h_rec.reshape(c_vals_shape) 209 | 210 | class IdentityCoding(GatedCoding): 211 | ''' 212 | Identity coding class. GatedCoding in the extreme case where n_maxres == n_gates 213 | ''' 214 | def __init__(self, n_maxres, **kwargs): 215 | super().__init__(n_maxres=n_maxres, **kwargs) -------------------------------------------------------------------------------- /download_raw_histogram_data.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script downloads the raw timestamp data files for a given scenes 3D scan. 3 | The raw data folders can be 1 to 9GB in size. 4 | ''' 5 | 6 | #### Standard Library Imports 7 | import gdown 8 | import zipfile 9 | import os 10 | 11 | #### Library imports 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from IPython.core import debugger 15 | breakpoint = debugger.set_trace 16 | 17 | #### Local imports 18 | from research_utils import io_ops 19 | 20 | def download_and_extract_zip(url, data_base_dirpath, scene_id): 21 | zip_fpath = os.path.join(data_base_dirpath, '{}.zip'.format(scene_id)) 22 | print("Downloading: {}".format(zip_fpath)) 23 | if(os.path.exists(zip_fpath)): 24 | print("{} already exists. Aborting download. If you wish to overwrite delete the file. ".format(zip_fpath)) 25 | else: 26 | gdown.download(url, zip_fpath, fuzzy=True) 27 | print('Extracting ... this may take a few minutes..') 28 | with zipfile.ZipFile(zip_fpath, 'r') as f: 29 | f.extractall(data_base_dirpath) 30 | 31 | if __name__=='__main__': 32 | 33 | ## Set scene ID that we want to download 34 | # See io_dirpaths.json for all options 35 | ## Scans from async shifting paper 36 | # scene_id = "20190207_face_scanning_low_mu" 37 | # scene_id = "20190209_deer_high_mu" 38 | ## Scans from optimal filterins 39 | scene_id = "20181105_face" 40 | # scene_id = "20181112_blocks" 41 | 42 | ## Get dirpath where to download the data 43 | io_dirpaths = io_ops.load_json('io_dirpaths.json') 44 | data_base_dirpath = io_dirpaths['hist_data_base_dirpath'] 45 | # Make folder to save data in. 46 | os.makedirs(data_base_dirpath, exist_ok=True) 47 | 48 | ## Download file 49 | gdrive_urls = io_dirpaths["histogram_gdrive_urls"] 50 | assert(scene_id in gdrive_urls.keys()), "Invalid scene ID" 51 | scene_url = gdrive_urls[scene_id] 52 | 53 | download_and_extract_zip(scene_url, data_base_dirpath, scene_id) 54 | -------------------------------------------------------------------------------- /download_raw_timestamp_data.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script downloads the raw timestamp data files for a given scenes 3D scan. 3 | The raw data folders can be 1 to 9GB in size. 4 | ''' 5 | 6 | #### Standard Library Imports 7 | import gdown 8 | import rarfile 9 | import os 10 | 11 | #### Library imports 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from IPython.core import debugger 15 | breakpoint = debugger.set_trace 16 | 17 | #### Local imports 18 | from research_utils import io_ops 19 | 20 | def download_and_extract_rar(url, data_base_dirpath, scene_id): 21 | rar_fpath = os.path.join(data_base_dirpath, '{}.rar'.format(scene_id)) 22 | print("Downloading: {}".format(rar_fpath)) 23 | if(os.path.exists(rar_fpath)): 24 | print("{} already exists. Aborting download. If you wish to overwrite delete the file. ".format(rar_fpath)) 25 | else: 26 | gdown.download(url, rar_fpath, fuzzy=True) 27 | print('Extracting ... this may take a few minutes..') 28 | with rarfile.RarFile(rar_fpath, 'r') as f: 29 | f.extractall(path=data_base_dirpath) 30 | 31 | if __name__=='__main__': 32 | 33 | ## Set scene ID that we want to download 34 | # See io_dirpaths.json for all options 35 | ## Scans from async shifting paper 36 | scene_id = "20190207_face_scanning_low_mu" 37 | # scene_id = "20190209_deer_high_mu" 38 | ## Scans from optimal filterins 39 | scene_id = "20181105_face" 40 | scene_id = "20181112_blocks" 41 | 42 | ## Get dirpath where to download the data 43 | io_dirpaths = io_ops.load_json('io_dirpaths.json') 44 | data_base_dirpath = io_dirpaths['timestamp_data_base_dirpath'] 45 | # Make folder to save data in. 46 | os.makedirs(data_base_dirpath, exist_ok=True) 47 | 48 | ## Download file 49 | gdrive_urls = io_dirpaths["gdrive_urls"] 50 | assert(scene_id in gdrive_urls.keys()), "Invalid scene ID" 51 | scene_url = gdrive_urls[scene_id] 52 | 53 | download_and_extract_rar(scene_url, data_base_dirpath, scene_id) 54 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: SP3DDEnv 2 | channels: 3 | - conda-forge 4 | - http://conda.anaconda.org/gurobi 5 | - defaults 6 | dependencies: 7 | - _libgcc_mutex=0.1=conda_forge 8 | - _openmp_mutex=4.5=1_gnu 9 | - backports=1.0=py_2 10 | - backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0 11 | - beautifulsoup4=4.10.0=pyha770c72_0 12 | - ca-certificates=2021.10.8=ha878542_0 13 | - certifi=2016.9.26=py36_0 14 | - colorama=0.4.4=pyh9f0ad1d_0 15 | - cycler=0.11.0=pyhd8ed1ab_0 16 | - decorator=5.1.1=pyhd8ed1ab_0 17 | - filelock=3.0.4=py36_0 18 | - freetype=2.10.4=h0708190_1 19 | - gdown=4.4.0=pyhd8ed1ab_0 20 | - icu=67.1=he1b5a44_0 21 | - ipython=5.8.0=py36_1 22 | - ipython_genutils=0.2.0=py_1 23 | - kiwisolver=1.3.1=py36h2531618_0 24 | - ld_impl_linux-64=2.36.1=hea4e1c9_2 25 | - libblas=3.9.0=13_linux64_openblas 26 | - libcblas=3.9.0=13_linux64_openblas 27 | - libffi=3.4.2=h7f98852_5 28 | - libgcc-ng=11.2.0=h1d223b6_14 29 | - libgfortran-ng=11.2.0=h69a702a_14 30 | - libgfortran5=11.2.0=h5c6108e_14 31 | - libgomp=11.2.0=h1d223b6_14 32 | - liblapack=3.9.0=13_linux64_openblas 33 | - libnsl=2.0.0=h7f98852_0 34 | - libopenblas=0.3.18=pthreads_h8fe5266_0 35 | - libpng=1.6.37=h21135ba_2 36 | - libstdcxx-ng=11.2.0=he4da1e4_14 37 | - libzlib=1.2.11=h36c2ea0_1013 38 | - matplotlib=3.2.2=1 39 | - matplotlib-base=3.2.2=py36h5fdd944_1 40 | - ncurses=6.3=h9c3ff4c_0 41 | - numpy=1.19.5=py36hfc0c790_2 42 | - openssl=1.1.1n=h166bdaf_0 43 | - pexpect=4.8.0=pyh9f0ad1d_2 44 | - pickleshare=0.7.5=py_1003 45 | - pip=20.0.2=py36_1 46 | - prompt_toolkit=1.0.15=py_1 47 | - ptyprocess=0.7.0=pyhd3deb0d_0 48 | - pygments=2.11.2=pyhd8ed1ab_0 49 | - pyparsing=3.0.7=pyhd8ed1ab_0 50 | - python=3.6.15=hb7a2778_0_cpython 51 | - python-dateutil=2.8.2=pyhd8ed1ab_0 52 | - python_abi=3.6=2_cp36m 53 | - readline=8.1=h46c0cb4_0 54 | - requests=2.12.5=py36_0 55 | - scipy=1.5.3=py36h81d768a_1 56 | - setuptools=49.6.0=py36h5fab9bb_3 57 | - simplegeneric=0.8.1=py_1 58 | - six=1.16.0=pyh6c4a22f_0 59 | - soupsieve=2.3.1=pyhd8ed1ab_0 60 | - sqlite=3.37.1=h4ff8645_0 61 | - tk=8.6.12=h27826a3_0 62 | - tornado=6.1=py36h8f6f2f9_1 63 | - tqdm=4.63.1=pyhd8ed1ab_0 64 | - traitlets=4.3.3=pyhd8ed1ab_2 65 | - wcwidth=0.2.5=pyh9f0ad1d_2 66 | - wheel=0.37.1=pyhd8ed1ab_0 67 | - xz=5.2.5=h516909a_1 68 | - zlib=1.2.11=h36c2ea0_1013 69 | - pip: 70 | - rarfile==4.0 71 | prefix: /home/felipe/miniconda3/envs/SP3DDEnv 72 | -------------------------------------------------------------------------------- /hist2timestamps.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @author: Felipe Gutierrez-Barragan 3 | Function that loads a histogram and generates the timestamps that created the histogram. 4 | The inversion will only be perfect if the input histogram is the same resolution as the timestamps. 5 | 6 | To run simply set the base_dirpath variable below to the correct path, and then run. 7 | ''' 8 | ## Standard Library Imports 9 | import os 10 | 11 | ## Library Imports 12 | import numpy as np 13 | 14 | ## Local Imports 15 | 16 | def hist2timestamps(hist_tensor, max_n_timestamps=None): 17 | ''' 18 | Input: 19 | * hist_tensor: Tensor whose last dimension is the histogram dimension. Example a tensor with dimsn n_rows x n_cols x n_hist_bins 20 | * max_n_timestamps: Max number of timestamps that we will accept. If None, then this is derived from the hist with the most timestamps 21 | Output 22 | * timestamps_tensor: tensor whose first K-1 dimensions are equal to the hist_tensor. The last dimension depends on max_n_timestamps 23 | ''' 24 | (hist_tensor, hist_shape) = vectorize_tensor(hist_tensor) 25 | hist_tensor = hist_tensor.astype(int) 26 | n_hists = hist_tensor.shape[0] 27 | n_bins = hist_tensor.shape[-1] 28 | n_timestamps_per_hist = hist_tensor.sum(axis=-1) 29 | if(max_n_timestamps is None): max_n_timestamps = np.max(n_timestamps_per_hist) 30 | timestamp_tensor = -1*np.ones((n_hists, max_n_timestamps)).astype(np.int) 31 | n_timestamp_per_elem = np.zeros((n_hists,)).astype(np.int) 32 | for i in range(n_hists): 33 | curr_hist = hist_tensor[i] 34 | tmp_timestamp_arr = -1*np.ones((n_timestamps_per_hist[i],)) 35 | curr_idx = 0 36 | for j in range(n_bins): 37 | curr_bin_n = curr_hist[j] 38 | if(curr_bin_n > 0): 39 | tmp_timestamp_arr[curr_idx:curr_idx+curr_bin_n] = j 40 | curr_idx = curr_idx+curr_bin_n 41 | # If number of timestamps is larger than max_n_timestamps, randomly sample max_n 42 | if(n_timestamps_per_hist[i] >= max_n_timestamps): 43 | timestamp_tensor[i,:] = np.random.choice(tmp_timestamp_arr, size=(max_n_timestamps,), replace=False) 44 | n_timestamp_per_elem[i] = max_n_timestamps 45 | else: 46 | timestamp_tensor[i,0:n_timestamps_per_hist[i]] = tmp_timestamp_arr 47 | n_timestamp_per_elem[i] = n_timestamps_per_hist[i] 48 | return timestamp_tensor.reshape(hist_shape[0:-1] + (max_n_timestamps,)), n_timestamp_per_elem.reshape(hist_shape[0:-1]) 49 | 50 | def vectorize_tensor(tensor, axis=-1): 51 | ''' 52 | Take an N-Dim Tensor and make it a 2D matrix. Leave the first or last dimension untouched, and basically squeeze the 1st-N-1 53 | dimensions. 54 | This is useful when applying operations on only the first or last dimension of a tensor. Makes it easier to input to different 55 | number of pytorch functions. 56 | ''' 57 | assert((axis==0) or (axis==-1)), 'Error: Input axis needs to be the first or last axis of tensor' 58 | tensor_shape = tensor.shape 59 | n_untouched_dim = tensor.shape[axis] 60 | n_elems = int(round(tensor.size / n_untouched_dim)) 61 | if(axis == -1): 62 | return (tensor.reshape((n_elems, n_untouched_dim)), tensor_shape) 63 | else: 64 | return (tensor.reshape((n_untouched_dim, n_elems)), tensor_shape) 65 | 66 | if __name__=='__main__': 67 | 68 | ## Path to raw_hist_imgs folder 69 | base_dirpath = './data_raw_histograms' 70 | 71 | ## Scene IDs that we can load from 72 | # scene_id = '20190209_deer_high_mu/free' 73 | scene_id = '20190207_face_scanning_low_mu/free' 74 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 75 | 76 | fname = 'raw-hist-img_r-102-c-58_tres-8ps_tlen-100000ps.npy' 77 | 78 | raw_hist_img = np.load(os.path.join(base_dirpath, scene_id, fname)) 79 | 80 | (tstamps_img, n_timestamp_per_elem) = hist2timestamps(raw_hist_img) 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /io_dirpaths.json: -------------------------------------------------------------------------------- 1 | { 2 | "timestamp_data_base_dirpath": "./data_raw_timestamps" 3 | , "hist_data_base_dirpath": "./data_raw_histograms" 4 | , "preprocessed_hist_data_base_dirpath": "./preprocessed_hist_imgs" 5 | , "system_irfs_dirpath": "./system_irfs" 6 | , "gdrive_urls": { 7 | "20190207_face_scanning_low_mu": "https://drive.google.com/file/d/1qpKLH1NVIikRzZpVLmflzsjjI7dENam4/view?usp=sharing" 8 | , "20190209_deer_high_mu": "https://drive.google.com/file/d/1-BzXPa3be-64diXEvXC6gSPNU0BuL7KL/view?usp=sharing" 9 | , "20181105_face": "https://drive.google.com/file/d/1np33I5O-Vdsh-rL5wNZdVz_qJfh3q-ji/view?usp=sharing" 10 | , "20181105_tajmahal": "https://drive.google.com/file/d/1D2nWaNa9z3Uay5A0WBUmByf5sQtyrCT2/view?usp=sharing" 11 | , "20181112_blocks": "https://drive.google.com/file/d/1JxKVKAnvbKVMeLDsJMnHp34xplm4duHC/view?usp=sharing" 12 | } 13 | , "histogram_gdrive_urls": { 14 | "20190207_face_scanning_low_mu": "https://drive.google.com/file/d/1cD9CK7oaFR9tM0_1sfoqJz1OHbG5JL8r/view?usp=sharing" 15 | , "20190209_deer_high_mu": "https://drive.google.com/file/d/111Nq_1odhs9tSAJ6474s58JcVaXjObfg/view?usp=sharing" 16 | , "20181105_face": "https://drive.google.com/file/d/1Ld7yAKEi-9RRSf_B9K5xPP2QqWPwJu54/view?usp=sharing" 17 | , "20181105_tajmahal": "https://drive.google.com/file/d/1srwvsS1eKGpOboHnNHiiktnHI4GFVW_i/view?usp=sharing" 18 | , "20181112_blocks": "https://drive.google.com/file/d/1AltGet8kxeX6vIBLLUf1zC-delHh8A7B/view?usp=sharing" 19 | } 20 | 21 | } -------------------------------------------------------------------------------- /pileup_correction.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | 3 | #### Library imports 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from IPython.core import debugger 7 | breakpoint = debugger.set_trace 8 | 9 | #### Local imports 10 | from scan_data_utils import * 11 | 12 | 13 | def coates_correction_sync_mode(counts, n_laser_cycles): 14 | ''' 15 | Coates correction described in Eq. 6 of Gupta et al., CVPR 2019 16 | - counts: Histogram of timestamps 17 | - n_empty_laser_cycles: Number of laser cycles with no photons 18 | - n_laser_cycles: total number of laser cycles 19 | NOTE: Although numer/denom calc could be vectorized, it does not speed up. 20 | Using for loop is faster than using linear algebra in this case 21 | ''' 22 | print("WARNING: For coates correction to work, the input histogram needs to be correctly shifted such that the 0th time bin actually corresponds to the earlier time bins. ") 23 | assert(counts.ndim==1), 'only implemented for single histogram at a time' 24 | B = counts.shape[-1] # number of bins 25 | N_i = np.array(counts, dtype=np.float32) # histogram with one additional bin 26 | r_i_hat = np.zeros((B,), dtype=N_i.dtype) 27 | for i in range(B): 28 | numer = n_laser_cycles - np.sum(N_i[0:i]) 29 | denom = numer - N_i[i] 30 | if denom<1e-15: continue 31 | r_i_hat[i] = np.log(numer/denom) 32 | return r_i_hat 33 | 34 | def coates_correction_sync_mode_fullimg(counts, n_laser_cycles): 35 | ''' 36 | Coates correction described in Eq. 6 of Gupta et al., CVPR 2019 37 | - counts: Image or list of histograms. Last dimension should be the histogram dimension. First N dimensions will be for the list/image of histograms 38 | - n_empty_laser_cycles: Number of laser cycles with no photons 39 | - n_laser_cycles: total number of laser cycles 40 | ''' 41 | print("WARNING: For coates correction to work, the input histogram needs to be correctly shifted such that the 0th time bin actually corresponds to the earlier time bins. ") 42 | B = counts.shape[-1] # number of bins 43 | N_i = np.array(counts, dtype=np.float32) # histogram with one additional bin 44 | r_i_hat = np.zeros_like(N_i) 45 | for i in range(B): 46 | numer = n_laser_cycles - N_i[..., 0:i].sum(axis=-1) 47 | denom = numer - N_i[..., i] 48 | # if denom<1e-15: continue 49 | nonzero_mask = denom > 1e-15 50 | curr_r_i_hat = r_i_hat[..., i] 51 | curr_r_i_hat[nonzero_mask] = np.log(numer[nonzero_mask]/denom[nonzero_mask]) 52 | # r_i_hat[..., i] = np.log(numer[nonzero_mask]/denom[nonzero_mask]) 53 | return r_i_hat 54 | 55 | def coates_est_free_running(counts, rep_period, hist_tbin_size, dead_time, n_laser_cycles, hist_tbin_factor=1): 56 | ''' 57 | Coates correction as described in Suppl. Note 5 58 | - counts: Histogram of timestamps 59 | - rep_period: laser repetition period 60 | - hist_tbin_size: size of histogram time bin 61 | - dead_time: spad dead time 62 | - n_laser_cycles: number of laser cycles 63 | - hist_tbin_factor: 1 if histogram bin size == time resolution. > 1 if we downsampled the histogram 64 | NOTE: The units of rep_period, hist_tbin_size, and dead_time should match 65 | NOTE: Usually the correction does not change the shape of free running too much 66 | ''' 67 | assert(counts.ndim==1), 'only implemented for single histogram at a time' 68 | ## correct histogram (Coates estimate) 69 | max_n_photons_per_cycle = int(np.ceil(rep_period / dead_time)) 70 | dead_time_bins = int(np.floor(dead_time / hist_tbin_size)) 71 | # Max number of photons that a bin can have detected. This is equal to # of laser cycles. If we downsampled histogram then it will be n_laser_cycles*hist_tbin_factor 72 | max_photons_per_bin = int(n_laser_cycles * hist_tbin_factor)*max_n_photons_per_cycle 73 | # init the denominator seq to the max number of photons that could have been detected by a bin 74 | denominator_seq = np.ones_like(counts) * max_photons_per_bin 75 | n_hist_bins = counts.shape[-1] 76 | assert(counts.ndim==1), 'only implemented for single histogram at a time' 77 | for i in range(n_hist_bins): 78 | start_bin = i + 1 79 | end_bin = np.min([start_bin + dead_time_bins, n_hist_bins]) 80 | # Wrapped bin always start at 0 81 | wrapped_start_bin = 0 82 | # If there is no wrapping (start_bin + dead_time_bins - n_hist_bins < 0) so wrapped_end_bin is 0 83 | wrapped_end_bin = np.max([0, start_bin + dead_time_bins - n_hist_bins]) 84 | n_bins = (end_bin - start_bin) + (wrapped_end_bin - wrapped_start_bin) 85 | # print("n_bins: {}, start: {}, end: {}, wrapped_start: {}, wrapped_end: {}".format(n_bins, start_bin, end_bin, wrapped_start_bin, wrapped_end_bin)) 86 | assert(n_bins == dead_time_bins), "Coates correction bins should always be equal to the number of dead time bins" 87 | curr_bin_counts = counts[i] 88 | denominator_seq[start_bin:end_bin] -= curr_bin_counts 89 | denominator_seq[wrapped_start_bin:wrapped_end_bin] -= curr_bin_counts 90 | 91 | corrected_counts = counts / denominator_seq 92 | return corrected_counts -------------------------------------------------------------------------------- /preprocess_per_scene_irf.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script uses a high SNR pixel from a pre-processed histogram image and extracts the IRF of that scene 3 | The data collected by this setup has a bi-modal IRF due to lens inter-reflections which explains the two peaks. 4 | 5 | NOTE: This script may not work well with data acquired in synchronous mode that has pile-up. 6 | You may need to correct for pile-up first 7 | 8 | NOTE: The ext_5% when denoised end up with 0 photons everywhere so we need to reduce the amount of denoising 9 | ''' 10 | 11 | #### Standard Library Imports 12 | import os 13 | import sys 14 | sys.path.append('./tof-lib') 15 | 16 | #### Library imports 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | from scipy.ndimage import gaussian_filter 20 | from IPython.core import debugger 21 | breakpoint = debugger.set_trace 22 | 23 | #### Local imports 24 | from scan_data_utils import irf_dirpath 25 | from scan_data_utils import * 26 | from bimodal2unimodal_hist_img import bimodal2unimodal_crop, get_unimodal_nt 27 | from research_utils.timer import Timer 28 | from research_utils.plot_utils import * 29 | from research_utils.io_ops import load_json 30 | from depth_decoding import IdentityCoding 31 | 32 | if __name__=='__main__': 33 | 34 | ## Load parameters shared by all 35 | scan_data_params = load_json('scan_params.json') 36 | io_dirpaths = load_json('io_dirpaths.json') 37 | 38 | hist_img_base_dirpath = io_dirpaths["preprocessed_hist_data_base_dirpath"] 39 | 40 | ## Load processed scene: 41 | ## Set scene that will be processed 42 | scene_id = '20190207_face_scanning_low_mu/free' 43 | # scene_id = '20190207_face_scanning_low_mu/det' 44 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 45 | # scene_id = '20190207_face_scanning_low_mu/ext_opt_filtering' 46 | # scene_id = '20190207_face_scanning_low_mu/ext_5%' 47 | # scene_id = '20190209_deer_high_mu/free' 48 | # scene_id = '20190209_deer_high_mu/det' 49 | # scene_id = '20190209_deer_high_mu/ext' 50 | # scene_id = '20190209_deer_high_mu/ext_5%' 51 | scene_id = '20181105_face/low_flux' 52 | scene_id = '20181105_face/opt_flux' 53 | 54 | assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id) 55 | hist_dirpath = os.path.join(hist_img_base_dirpath, scene_id) 56 | 57 | out_dirpath = os.path.join(irf_dirpath, scene_id) 58 | os.makedirs(out_dirpath, exist_ok=True) 59 | 60 | ## Get params for scene 61 | scan_params = scan_data_params['scene_params'][scene_id] 62 | 63 | ## Set parameters of histogram we want to load 64 | irf_tres = scan_data_params['min_tbin_size'] # in picosecs 65 | hist_img_tau = scan_data_params['hist_preprocessing_params']['hist_end_time'] - scan_data_params['hist_preprocessing_params']['hist_start_time'] 66 | hist_img_fname = get_hist_img_fname(scan_params['n_rows_fullres'], scan_params['n_cols_fullres'], irf_tres, hist_img_tau) 67 | hist_img_fpath = os.path.join(hist_dirpath, hist_img_fname) 68 | 69 | ## Load histogram 70 | assert(os.path.exists(hist_img_fpath)), "{} does not exist. Make sure to run preprocess_raw_hist_img.py first".format(hist_img_fpath) 71 | hist_img = np.load(hist_img_fpath) 72 | (nr,nc,nt) = hist_img.shape 73 | (tbins, tbin_edges) = get_hist_bins(hist_img_tau, irf_tres) 74 | 75 | ## Apply denoising 76 | if('ext_5%' in scene_id): 77 | d_hist_img = gaussian_filter(hist_img, sigma=0.1, mode='wrap', truncate=3) 78 | else: 79 | d_hist_img = gaussian_filter(hist_img, sigma=1, mode='wrap', truncate=3) 80 | min_signal_threshold=1.0 81 | 82 | if('20190207_face_scanning_low_mu' in scene_id): 83 | (r,c) = (109, 50) 84 | elif('20190209_deer_high_mu' in scene_id): 85 | (r,c) = (58, 60) 86 | else: 87 | (r,c) = (nr//2, nc//2) 88 | 89 | (r_max,c_max) = np.unravel_index(np.argmax(hist_img.sum(axis=-1)), (nr,nc)) 90 | ## extract selected irf and center it 91 | irf = d_hist_img[r, c, :] 92 | irf = np.roll(irf, -1*irf.argmax()) 93 | 94 | ## 95 | ## Zero out bins with less than scene specific threshold 96 | irf -= np.median(irf) 97 | d_hist_img -= np.median(d_hist_img,axis=-1,keepdims=True) 98 | irf[irf < min_signal_threshold] = 0. 99 | d_hist_img[d_hist_img < min_signal_threshold] = 0. 100 | 101 | ## Save IRF 102 | irf_fname = get_irf_fname(irf_tres, hist_img_tau) 103 | np.save(os.path.join(out_dirpath, irf_fname), irf) 104 | 105 | ## Create uni-modal irf by zero-ing out the second peak OR cropping 106 | pulse_len = time2bin(scan_data_params['irf_params']['pulse_len'], irf_tres) 107 | second_pulse_offset = time2bin(scan_data_params['irf_params']['second_pulse_offset'], irf_tres) 108 | unimodal_nt = get_unimodal_nt(nt, scan_data_params['irf_params']['pulse_len'], irf_tres) 109 | # Generate uni-modal IRF with the same length as original 110 | unimodal_irf_samelen = np.array(irf) 111 | unimodal_irf_samelen[second_pulse_offset:second_pulse_offset+pulse_len] = 0. 112 | np.save(os.path.join(out_dirpath, "unimodal-"+irf_fname), unimodal_irf_samelen) 113 | # Generate uni-modal IRF where we crop the second pulse and reduce the length 114 | unimodal_irf = bimodal2unimodal_crop(irf, first_pulse_start_idx=0, pulse_len=pulse_len, second_pulse_offset=second_pulse_offset) 115 | unimodal_irf_tau = unimodal_irf.size*irf_tres 116 | unimodal_irf_fname = get_irf_fname(irf_tres, unimodal_irf_tau) 117 | np.save(os.path.join(out_dirpath, "unimodal-"+unimodal_irf_fname), unimodal_irf) 118 | 119 | ## Fit a cubic spline function to be able to generate any 120 | f = fit_irf(irf) 121 | x_fullres = np.arange(0, nt) * (1./nt) 122 | 123 | ## reconstruct depths with irf 124 | coding_obj = IdentityCoding(nt, h_irf=irf, account_irf=True) 125 | decoded_depths = coding_obj.max_peak_decoding(hist_img, rec_algo_id='matchfilt').squeeze() 126 | 127 | ## Plot some results 128 | plt.clf() 129 | plt.pause(0.1) 130 | plt.subplot(3,3,1) 131 | plt.imshow(hist_img.sum(axis=-1)); plt.title('Sum of Hist') 132 | plt.subplot(3,3,2) 133 | plt.imshow(hist_img.argmax(axis=-1)); plt.title('Argmax') 134 | plt.subplot(3,3,3) 135 | plt.imshow(decoded_depths); plt.title('MatchFilt w/ IRF');plt.colorbar() 136 | plt.subplot(3,1,2) 137 | plt.plot(hist_img[r,c], linewidth=2, alpha=0.75, label='Raw IRF: {},{}'.format(r,c)) 138 | plt.plot(irf, linewidth=2, alpha=0.75, label='Processed IRF: {},{}'.format(r,c)) 139 | plt.plot(d_hist_img[r+1,c], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(r+1,c)) 140 | # plt.plot(d_hist_img[r+1,c+1], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(r+1,c+1)) 141 | plt.plot(d_hist_img[93,45], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(93,45)) 142 | plt.legend(fontsize=14) 143 | plt.subplot(3,1,3) 144 | plt.plot(hist_img[r,c], linewidth=2, alpha=0.75, label='Raw IRF: {},{}'.format(r,c)) 145 | plt.plot(unimodal_irf, linewidth=2, alpha=0.75, label='Crop Uni-modal IRF: {},{}'.format(r,c)) 146 | plt.legend(fontsize=14) 147 | 148 | 149 | 150 | # results_dirpath = os.path.join(io_dirpaths['results_dirpath'], 'real_data_results/irf_calib') 151 | 152 | # out_fname = 'irf_{}_r-{}-c-{}_tres-{}ps_tlen-{}ps'.format(scene_id.replace('/','--'), r, c, int(irf_tres), int(hist_img_tau)) 153 | # save_currfig_png(results_dirpath, out_fname) 154 | -------------------------------------------------------------------------------- /preprocess_raw_hist_img.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Preprocess the raw histogram images produced by read_fullscan_hydraharp_t3.py 3 | Preprocessing Steps: 4 | * Crop earlier and later time bins in histogram (some of them have undesired reflections) 5 | * Shift histogram 6 | 7 | NOTE: Make sure to set the hist_preprocessing_params inside scan_params.json correctly. Or tune them until you get what you need. 8 | The default parameters in the scan_params.json work well for 20190209_deer_high_mu and 20190207_face_scanning_low_mu 9 | ''' 10 | #### Standard Library Imports 11 | import os 12 | 13 | #### Library imports 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | from IPython.core import debugger 17 | breakpoint = debugger.set_trace 18 | 19 | #### Local imports 20 | from scan_data_utils import * 21 | from research_utils.plot_utils import * 22 | from research_utils.io_ops import load_json 23 | 24 | if __name__=='__main__': 25 | ## Load parameters shared by all 26 | scan_data_params = load_json('scan_params.json') 27 | io_dirpaths = load_json('io_dirpaths.json') 28 | raw_hist_data_base_dirpath = io_dirpaths['hist_data_base_dirpath'] 29 | preprocessed_hist_data_base_dirpath = io_dirpaths['preprocessed_hist_data_base_dirpath'] 30 | 31 | ## Set scene that will be processed 32 | scene_id = '20190209_deer_high_mu/free' 33 | # scene_id = '20190209_deer_high_mu/det' 34 | # scene_id = '20190209_deer_high_mu/ext' 35 | # scene_id = '20190209_deer_high_mu/ext_5%' 36 | # scene_id = '20190207_face_scanning_low_mu/free' 37 | # scene_id = '20190207_face_scanning_low_mu/det' 38 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 39 | # scene_id = '20190207_face_scanning_low_mu/ext_opt_filtering' 40 | # scene_id = '20190207_face_scanning_low_mu/ext_5%' 41 | # scene_id = '20181112_blocks/extreme_flux' 42 | # scene_id = '20181112_blocks/high_flux' 43 | # scene_id = '20181112_blocks/med_flux' 44 | # scene_id = '20181112_blocks/low_flux' 45 | # scene_id = '20181105_face/low_flux' 46 | scene_id = '20181105_face/opt_flux' 47 | # scene_id = '20181105_tajmahal' 48 | assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id) 49 | 50 | ## Get dirpaths 51 | raw_hist_dirpath = os.path.join(raw_hist_data_base_dirpath, scene_id) 52 | hist_dirpath = os.path.join(preprocessed_hist_data_base_dirpath, scene_id) 53 | os.makedirs(hist_dirpath, exist_ok=True) 54 | 55 | ## Get parameters for raw hist image 56 | (nr, nc) = (scan_data_params["scene_params"][scene_id]["n_rows_fullres"], scan_data_params["scene_params"][scene_id]["n_cols_fullres"]) 57 | 58 | ## Set histogram parameters 59 | laser_rep_freq = scan_data_params['laser_rep_freq'] # most data acquisitions were done with a 10MHz laser rep freq 60 | laser_rep_period = (1. / laser_rep_freq)*1e12 # in picosecs 61 | dead_time = scan_data_params['dead_time'] # In picoseconds 62 | max_n_tstamps = int(1e8) # discard timestamps if needed 63 | max_tbin = laser_rep_period # Period in ps 64 | min_tbin_size = scan_data_params['min_tbin_size'] # Bin size in ps 65 | hist_tbin_factor = 1.0 # increase tbin size to make histogramming faster 66 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 67 | n_hist_bins = get_nt(max_tbin, hist_tbin_size) 68 | 69 | ## Load Raw Hist Image if it exists, otherwise, create it 70 | raw_hist_img_fname = 'raw-' + get_hist_img_fname(nr, nc, hist_tbin_size, max_tbin) 71 | raw_hist_img_fpath = os.path.join(raw_hist_dirpath, raw_hist_img_fname) 72 | raw_hist_img_params_str = raw_hist_img_fpath.split('raw-hist-img_')[-1].split('.npy')[0] 73 | raw_hist_img_dims = raw_hist_img_params_str.split('_tres-')[0] 74 | 75 | ## Load histogram image 76 | raw_hist_img = np.load(raw_hist_img_fpath) 77 | 78 | ##### BEGIN PRE-PROCESSING 79 | 80 | ## Histogram pre-processing parameters 81 | hist_start_time = scan_data_params['hist_preprocessing_params']['hist_start_time'] # in ps. used to crop hist 82 | hist_end_time = scan_data_params['hist_preprocessing_params']['hist_end_time'] # in ps. used to crop hist 83 | hist_shift_time = scan_data_params['hist_preprocessing_params']['hist_shift_time'] # circshift histograms forward so they are not close to boundary 84 | hist_start_bin = time2bin(hist_start_time, hist_tbin_size) 85 | hist_end_bin = time2bin(hist_end_time, hist_tbin_size) 86 | hist_shift_bin = time2bin(hist_shift_time, hist_tbin_size) 87 | hist_img_tau = hist_end_time - hist_start_time 88 | 89 | ## Pre-process and save hist image 90 | # Crop beginning and end to remove system inter-reflections 91 | hist_img = raw_hist_img[..., hist_start_bin:hist_end_bin] 92 | # Circ shift to move peaks away from 0th bin 93 | hist_img = np.roll(hist_img, hist_shift_bin) 94 | hist_img_fname = get_hist_img_fname(nr, nc, int(hist_tbin_size), hist_img_tau) 95 | np.save(os.path.join(hist_dirpath, hist_img_fname), hist_img) 96 | 97 | ## Plot center histogram 98 | plt.clf() 99 | plt.subplot(2,1,1) 100 | plt.plot(hist_img[nr//2, nc//2,:]) 101 | plt.title("Center Pixel Histogram") 102 | plt.subplot(2,1,2) 103 | plt.imshow(np.argmax(hist_img, axis=-1)) 104 | plt.title("Argmax of histogram image") -------------------------------------------------------------------------------- /process_hist_img.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | import os 3 | 4 | #### Library imports 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | from scipy.ndimage import gaussian_filter, median_filter 8 | from IPython.core import debugger 9 | breakpoint = debugger.set_trace 10 | 11 | #### Local imports 12 | from scan_data_utils import * 13 | from research_utils.timer import Timer 14 | from research_utils.plot_utils import * 15 | from depth_decoding import IdentityCoding 16 | from research_utils.io_ops import load_json 17 | from research_utils import np_utils, improc_ops 18 | 19 | depth_offset = 0.0 20 | 21 | def depths2xyz(depths, fov_major_axis=40, mask=None): 22 | (n_rows, n_cols) = depths.shape 23 | (fov_horiz, fov_vert) = improc_ops.calc_fov(n_rows, n_cols, fov_major_axis) 24 | (phi_img, theta_img) = improc_ops.calc_spherical_coords(fov_horiz, fov_vert, n_rows, n_cols, is_deg=True) 25 | depths+=depth_offset 26 | (x,y,z) = improc_ops.spherical2xyz(depths, phi_img, theta_img) 27 | zmap = np.array(z) 28 | if(not (mask is None)): 29 | (x,y,z) = (x[mask], y[mask], z[mask]) 30 | zmap[np.logical_not(mask)] = np.nan 31 | xyz = np.concatenate((x.flatten()[...,np.newaxis], y.flatten()[...,np.newaxis], z.flatten()[...,np.newaxis]), axis=-1) 32 | return (xyz, zmap) 33 | 34 | 35 | def compose_output_fname(coding_id, n_codes, rec_algo, account_irf=True): 36 | out_fname = '{}_ncodes-{}_rec-{}'.format(coding_id, n_codes, rec_algo) 37 | if(account_irf): 38 | return out_fname + '-irf' 39 | else: 40 | return out_fname 41 | 42 | 43 | if __name__=='__main__': 44 | 45 | ## Load parameters shared by all 46 | scan_data_params = load_json('scan_params.json') 47 | io_dirpaths = load_json('io_dirpaths.json') 48 | hist_img_base_dirpath = io_dirpaths["preprocessed_hist_data_base_dirpath"] 49 | 50 | ## Load processed scene: 51 | scene_id = '20190209_deer_high_mu/free' 52 | # scene_id = '20190207_face_scanning_low_mu/free' 53 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 54 | assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id) 55 | hist_dirpath = os.path.join(hist_img_base_dirpath, scene_id) 56 | 57 | ## Histogram image params 58 | downsamp_factor = 1 # Spatial downsample factor 59 | hist_tbin_factor = 1.0 # increase tbin size to make histogramming faster 60 | n_rows_fullres = scan_data_params['scene_params'][scene_id]['n_rows_fullres'] 61 | n_cols_fullres = scan_data_params['scene_params'][scene_id]['n_cols_fullres'] 62 | (nr, nc) = (n_rows_fullres // downsamp_factor, n_cols_fullres // downsamp_factor) # dims for face_scanning scene 63 | min_tbin_size = scan_data_params['min_tbin_size'] # Bin size in ps 64 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 65 | hist_img_tau = scan_data_params['hist_preprocessing_params']['hist_end_time'] - scan_data_params['hist_preprocessing_params']['hist_start_time'] 66 | nt = get_nt(hist_img_tau, hist_tbin_size) 67 | 68 | ## Load histogram image 69 | hist_img_fname = get_hist_img_fname(nr, nc, hist_tbin_size, hist_img_tau, is_unimodal=False) 70 | hist_img_fpath = os.path.join(hist_dirpath, hist_img_fname) 71 | hist_img = np.load(hist_img_fpath) 72 | 73 | ## Shift histogram image if needed 74 | global_shift = 0 75 | hist_img = np.roll(hist_img, global_shift, axis=-1) 76 | 77 | 78 | denoised_hist_img = gaussian_filter(hist_img, sigma=0.75, mode='wrap', truncate=1) 79 | (tbins, tbin_edges) = get_hist_bins(hist_img_tau, hist_tbin_size) 80 | 81 | ## Load IRF 82 | irf_tres = scan_data_params['min_tbin_size'] # in picosecs 83 | irf = get_scene_irf(scene_id, nt, tlen=hist_img_tau, is_unimodal=False) 84 | 85 | ## Decode depths 86 | c_obj = IdentityCoding(hist_img.shape[-1], h_irf=irf, account_irf=True) 87 | # Get ground truth depths using a denoised histogram image 88 | matchfilt_tof = c_obj.max_peak_decoding(hist_img, rec_algo_id='matchfilt').squeeze()*hist_tbin_size 89 | matchfilt_depths = time2depth(matchfilt_tof*1e-12) 90 | (matchfilt_xyz, matchfilt_zmap) = depths2xyz(time2depth(matchfilt_tof*1e-12), fov_major_axis=scan_data_params['fov_major_axis'], mask=None) 91 | 92 | argmax_tof = hist_img.argmax(axis=-1)*hist_tbin_size 93 | argmax_depths = time2depth(argmax_tof*1e-12) 94 | (argmax_xyz, argmax_zmap) = depths2xyz(time2depth(argmax_tof*1e-12), fov_major_axis=scan_data_params['fov_major_axis'], mask=None) 95 | 96 | ## estimated signal to background ratio 97 | nphotons = hist_img.sum(axis=-1) 98 | bkg_per_bin = np.median(hist_img, axis=-1) 99 | signal = np.sum(hist_img - bkg_per_bin[...,np.newaxis], axis=-1) 100 | signal[signal < 0] = 0 101 | bkg = bkg_per_bin*nt 102 | sbr = signal / (bkg + 1e-3) 103 | 104 | 105 | plt.clf() 106 | plt.subplot(2,2,1) 107 | plt.imshow(matchfilt_tof); plt.title("MatchFilt Depths") 108 | plt.subplot(2,2,2) 109 | plt.imshow(argmax_tof); plt.title("Argmax Depths") 110 | plt.subplot(2,2,3) 111 | plt.imshow(signal); plt.title("Est. Signal Lvl") 112 | plt.subplot(2,2,4) 113 | plt.imshow(bkg); plt.title("Est. Bkg Lvl") -------------------------------------------------------------------------------- /read_fullscan_hydraharp_t3.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script reads all the raw timestamp files for a scan, and saves a raw histogram image for that file 3 | If the raw histogram image already exists, this script does nothing 4 | ''' 5 | #### Standard Library Imports 6 | import glob 7 | import os 8 | 9 | #### Library imports 10 | import numpy as np 11 | import matplotlib.pyplot as plt 12 | from IPython.core import debugger 13 | breakpoint = debugger.set_trace 14 | 15 | #### Local imports 16 | from scan_data_utils import * 17 | from pileup_correction import * 18 | from read_hydraharp_outfile_t3 import * 19 | from read_positions_file import read_positions_file, get_coords, POSITIONS_FNAME 20 | from research_utils.timer import Timer 21 | from research_utils.plot_utils import * 22 | # from toflib.coding import IdentityCoding, GrayCoding, TruncatedFourierCoding, WalshHadamardCoding, WalshHadamardBinaryCoding 23 | from research_utils.io_ops import load_json, write_json 24 | 25 | def update_scene_scan_params(scan_params, scene_id, n_rows_fullres, n_cols_fullres): 26 | if(scene_id in scan_params["scene_params"].keys()): 27 | # Swap rows and cols to transpose image 28 | scan_params["scene_params"][scene_id]["n_rows_fullres"] = n_cols_fullres 29 | scan_params["scene_params"][scene_id]["n_cols_fullres"] = n_rows_fullres 30 | else: 31 | # Swap rows and cols to transpose image 32 | scan_params["scene_params"][scene_id] = scan_params["scene_params"]["default"] 33 | scan_params["scene_params"][scene_id]["n_rows_fullres"] = n_cols_fullres 34 | scan_params["scene_params"][scene_id]["n_cols_fullres"] = n_rows_fullres 35 | write_json("scan_params.json", scan_params) 36 | 37 | 38 | if __name__=='__main__': 39 | 40 | ## Load parameters shared by all 41 | scan_data_params = load_json('scan_params.json') 42 | io_dirpaths = load_json('io_dirpaths.json') 43 | timestamp_data_base_dirpath = io_dirpaths['timestamp_data_base_dirpath'] 44 | hist_data_base_dirpath = io_dirpaths['hist_data_base_dirpath'] 45 | os.makedirs(hist_data_base_dirpath, exist_ok=True) 46 | 47 | lres_mode = False # Load a low-res version of the image 48 | lres_factor = 1 # Load a low-res version of the image 49 | overwrite_hist_img = False 50 | timestamp_data_base_dirpath = io_dirpaths['timestamp_data_base_dirpath'] 51 | 52 | ## Set scene that will be processed 53 | # scene_id = '20190209_deer_high_mu/free' 54 | # scene_id = '20190209_deer_high_mu/det' 55 | # scene_id = '20190209_deer_high_mu/ext' 56 | # scene_id = '20190209_deer_high_mu/ext_5%' 57 | scene_id = '20190207_face_scanning_low_mu/free' 58 | # scene_id = '20190207_face_scanning_low_mu/det' 59 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 60 | # scene_id = '20190207_face_scanning_low_mu/ext_opt_filtering' 61 | # scene_id = '20190207_face_scanning_low_mu/ext_5%' 62 | # scene_id = '20181112_blocks/extreme_flux' 63 | # scene_id = '20181112_blocks/high_flux' 64 | # scene_id = '20181112_blocks/med_flux' 65 | # scene_id = '20181112_blocks/low_flux' 66 | # scene_id = '20181105_face/low_flux' 67 | # scene_id = '20181105_face/opt_flux' 68 | # scene_id = '20181105_tajmahal' 69 | assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id) 70 | dirpath = os.path.join(timestamp_data_base_dirpath, scene_id) 71 | hist_dirpath = os.path.join(hist_data_base_dirpath, scene_id) 72 | os.makedirs(hist_dirpath, exist_ok=True) 73 | 74 | ## Read positions data 75 | pos_data = read_positions_file(os.path.join(dirpath, POSITIONS_FNAME)) 76 | (x_coords, y_coords) = get_coords(pos_data) 77 | n_rows_fullres = y_coords.size 78 | n_cols_fullres = x_coords.size 79 | update_scene_scan_params(scan_data_params, scene_id, n_rows_fullres, n_cols_fullres) 80 | 81 | ## Get list of all files in the directory, and the scan parameters 82 | fpaths_list = glob.glob(os.path.join(dirpath, 't3mode_*_*_*.out')) 83 | fnames_list = [os.path.basename(fpath) for fpath in fpaths_list] 84 | n_params_in_fname = count_params_in_fname(fnames_list[0]) 85 | assert(n_params_in_fname == 3), 'Invalid fname {}. Expected fname with 3 params'.format(fnames_list[0]) 86 | scan_pos_indeces = np.array([parse_scan_pos_idx(fname) for fname in fnames_list]) 87 | is_long_cable_flags = np.array([parse_is_long_cable_flag(fname) for fname in fnames_list]) 88 | delay_param_list = np.array([parse_delay_param(fname) for fname in fnames_list]) 89 | 90 | ## sort the filenames to match the pos data 91 | sort_indeces = np.argsort(scan_pos_indeces) 92 | scan_pos_indeces = scan_pos_indeces[sort_indeces] 93 | is_long_cable_flags = is_long_cable_flags[sort_indeces] 94 | delay_param_list = delay_param_list[sort_indeces] 95 | fpaths_list = [fpaths_list[sort_idx] for sort_idx in sort_indeces] 96 | fnames_list = [fnames_list[sort_idx] for sort_idx in sort_indeces] 97 | 98 | ## Change all vectors into images to know exactly which pixel corresponds to each ID 99 | fpaths_img = vector2img(np.array(fpaths_list), n_rows_fullres, n_cols_fullres) 100 | fnames_img = vector2img(np.array(fnames_list), n_rows_fullres, n_cols_fullres) 101 | scan_pos_indeces_img = vector2img(scan_pos_indeces, n_rows_fullres, n_cols_fullres).astype(np.float32) 102 | if(lres_mode): 103 | fpaths_img = fpaths_img[0::lres_factor,0::lres_factor] 104 | fnames_img = fnames_img[0::lres_factor,0::lres_factor] 105 | scan_pos_indeces_img = scan_pos_indeces_img[0::lres_factor,0::lres_factor] 106 | (nr, nc) = fnames_img.shape 107 | 108 | ## Set histogram parameters 109 | laser_rep_freq = scan_data_params['laser_rep_freq'] # most data acquisitions were done with a 10MHz laser rep freq 110 | laser_rep_period = (1. / laser_rep_freq)*1e12 # in picosecs 111 | dead_time = scan_data_params['dead_time'] # In picoseconds 112 | max_n_tstamps = int(1e8) # discard timestamps if needed 113 | max_tbin = laser_rep_period # Period in ps 114 | min_tbin_size = scan_data_params['min_tbin_size'] # Bin size in ps 115 | hist_tbin_factor = 1.0 # increase tbin size to make histogramming faster 116 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 117 | n_hist_bins = get_nt(max_tbin, hist_tbin_size) 118 | 119 | ## allocate histogram image 120 | n_data_files = len(fpaths_list) 121 | n_scan_points = np.min([pos_data.shape[0], n_data_files]) 122 | assert(n_data_files >= pos_data.shape[0]), "Number of data files needs to be >= number of positions" 123 | print("n data files = {}".format(n_data_files)) 124 | print("n positions = {}".format(pos_data.shape[0])) 125 | assert(n_data_files == pos_data.shape[0]), "Number of data files does not match number of points in pos data" 126 | n_scan_points = len(fpaths_list) 127 | histograms = np.zeros((n_scan_points, n_hist_bins)) 128 | raw_hist_img = np.zeros((nr, nc, n_hist_bins)) 129 | n_laser_cycles_img = np.zeros((nr, nc)) 130 | n_empty_laser_cycles_img = np.zeros((nr, nc)) 131 | 132 | ## Load Raw Hist Image if it exists, otherwise, create it 133 | raw_hist_img_fname = 'raw-' + get_hist_img_fname(nr, nc, hist_tbin_size, max_tbin) 134 | raw_hist_img_fpath = os.path.join(hist_dirpath, raw_hist_img_fname) 135 | raw_hist_img_params_str = raw_hist_img_fpath.split('raw-hist-img_')[-1].split('.npy')[0] 136 | raw_hist_img_dims = raw_hist_img_params_str.split('_tres-')[0] 137 | 138 | if(os.path.exists(raw_hist_img_fpath) and (not overwrite_hist_img)): 139 | raw_hist_img = np.load(raw_hist_img_fpath) 140 | else: 141 | # timestamps_arr = [] 142 | # sync_vec_arr = [] 143 | # For each file load tstamps, make histogram, and store in hist_img 144 | for i in range(nr): 145 | for j in range(nc): 146 | fpath = fpaths_img[i,j] 147 | fname = fnames_img[i,j] 148 | scan_pos_idx = scan_pos_indeces_img[i,j] 149 | print("{}, {}".format(scan_pos_idx, fname)) 150 | sync_vec, dtime_vec = read_hydraharp_outfile_t3(fpath) 151 | # timestamps_arr.append(dtime_vec) 152 | # sync_vec_arr.append(dtime_vec) 153 | (counts, bin_edges, bins) = timestamps2histogram(dtime_vec, max_tbin=max_tbin, min_tbin_size=min_tbin_size, hist_tbin_factor=hist_tbin_factor) 154 | n_laser_cycles_img[i,j] = sync_vec.max() 155 | n_empty_laser_cycles_img[i,j] = calc_n_empty_laser_cycles(sync_vec) 156 | # roll_amount = calc_hist_shift(fname, hist_tbin_size) 157 | roll_amount = 0 158 | counts = np.roll(counts, int(roll_amount)) 159 | raw_hist_img[i,j,:] = counts 160 | np.save(raw_hist_img_fpath, raw_hist_img) 161 | np.save(os.path.join(hist_dirpath, 'n-laser-cycles-img_{}.npy'.format(raw_hist_img_dims)), n_laser_cycles_img) 162 | np.save(os.path.join(hist_dirpath, 'n-empty-laser-cycles-img_{}.npy'.format(raw_hist_img_dims)), n_empty_laser_cycles_img) 163 | 164 | ## Save intensity image 165 | plt.clf() 166 | plt.imshow(raw_hist_img.sum(axis=-1)) 167 | nphotons_img_fname = raw_hist_img_fname.replace('raw-hist-img', 'raw-nphotons-img') 168 | plt.title(nphotons_img_fname) 169 | plt.pause(0.1) 170 | # save_currfig_png(hist_dirpath, nphotons_img_fname) 171 | plt.pause(0.1) 172 | # save_img(raw_hist_img.sum(axis=-1), hist_dirpath, nphotons_img_fname ) 173 | plt.clf() 174 | plt.imshow(raw_hist_img.max(axis=-1)) 175 | maxpeak_img_fname = raw_hist_img_fname.replace('raw-hist-img', 'raw-maxpeak-img') 176 | plt.title(maxpeak_img_fname) 177 | plt.pause(0.1) 178 | # save_currfig_png(hist_dirpath, maxpeak_img_fname) 179 | plt.pause(0.1) 180 | # save_img(raw_hist_img.sum(axis=-1), hist_dirpath, maxpeak_img_fname ) 181 | 182 | 183 | plt.clf() 184 | plt.imshow(raw_hist_img.argmax(axis=-1)) 185 | argmax_img_fname = raw_hist_img_fname.replace('raw-hist-img', 'raw-argmax-img') 186 | plt.title(argmax_img_fname) 187 | plt.pause(0.1) 188 | # save_currfig_png(hist_dirpath, maxpeak_img_fname) 189 | plt.pause(0.1) 190 | # save_img(raw_hist_img.sum(axis=-1), hist_dirpath, maxpeak_img_fname ) 191 | -------------------------------------------------------------------------------- /read_hydraharp_outfile_t3.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Dec 21 10:26:46 2017 4 | 5 | @author: compoptics, modified by felipe 08-26-2021 6 | 7 | Reads a single photon timestamp data file (t3mode_*.out file), i.e., a single point of the 3D stances 8 | 9 | """ 10 | #### Standard Library Imports 11 | import os 12 | import struct 13 | import sys 14 | sys.path.append('./tof-lib') 15 | 16 | #### Library imports 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | from IPython.core import debugger 20 | breakpoint = debugger.set_trace 21 | 22 | #### Local imports 23 | from research_utils.plot_utils import * 24 | from research_utils.timer import Timer 25 | from research_utils.io_ops import load_json 26 | from scan_data_utils import * 27 | from pileup_correction import * 28 | 29 | DEBUG = False 30 | 31 | def got_photon(time_tag, channel, dtime): 32 | print('CHN',channel,'time_tag',time_tag,'dtime',dtime) 33 | 34 | def got_overflow(count): 35 | print('OFL *',count) 36 | 37 | def read_hydraharp_outfile_t3(outfilename): 38 | """Function to read .out files (headerless) saved using tttr-t3.exe binary executable. 39 | Input: 40 | .out filename with full path 41 | Outputs: 42 | sync_vec is an array of sync pulse number which just counts up sequentially. A 43 | missing number means no photons were received for that cycle. If you 44 | know the rep rate of your laser, you can compute the time difference between 45 | consecutive sync pulses by taking the difference between their indexes and multiplying 46 | by the rep interval. 47 | 48 | dtime_vec is an array of 15-bit integer values (between 0 and 32767) that 49 | gives the time of arrival of the first photon with respect to its corresponding 50 | sync pulse number in the sync_vec array. 51 | You can convert dtime_vec to picoseconds using the bin resolution that was set during 52 | data acquisition on the TCSPC. (Default 8ps). 53 | eg.: if sync_vec[10] = 42 and dtime_vec[10]=325 it means the photon was recorded 54 | after 325x8ps after the 42nd sync pulse. Also note that since the 10^th index of 55 | the vector corresponds to the 42nd sync pulse, it means many of the initial pulses 56 | did not record any returning photons, so their sync pulse numbers were missing. 57 | 58 | To avoid a truncated time response, you need to ensure that 59 | laser rep time interval <= (TCSPC bin resolution) x 32767 60 | """ 61 | T3WRAPAROUND = 1024 62 | BYTES_PER_RECORD = 4 63 | f = open(outfilename,"rb") 64 | f.seek(0,2) 65 | ftell = f.tell() 66 | # cast to int, even though ftell and BYTES_PER_RECORD are int, the division casts them to float 67 | num_recs = int(ftell/BYTES_PER_RECORD) 68 | f.seek(0) 69 | 70 | dtime_vec = np.zeros(num_recs, dtype=np.int32) 71 | sync_vec = np.zeros(num_recs, dtype=np.int32) 72 | 73 | ndrecs = 0 74 | 75 | overflow_correction= 0 76 | num_photons = 0 77 | num_overflow = 0 78 | 79 | for _ in range(num_recs): 80 | bytstr = f.read(4) 81 | if bytstr=='': 82 | print('file corrupt. wrong num of recs') 83 | break 84 | 85 | buf0 = struct.unpack('I',bytstr)[0] 86 | nsync = buf0 & 0b00000000000000000000001111111111 87 | dtime = (buf0 & 0b00000001111111111111110000000000)>>10 88 | channel = (buf0 & 0b01111110000000000000000000000000)>>25 89 | special = buf0 >> 31 90 | 91 | if not special: 92 | true_nsync = overflow_correction + nsync; 93 | sync_vec[ndrecs] = true_nsync 94 | dtime_vec[ndrecs] = dtime 95 | ndrecs+=1 96 | if DEBUG: 97 | got_photon(true_nsync, channel, dtime) 98 | num_photons+=1 #got a photon 99 | else: 100 | # special record - could be overflow or marker 101 | if channel==63: 102 | if nsync==0: 103 | overflow_correction += T3WRAPAROUND 104 | num_overflow+=1 105 | if DEBUG: 106 | got_overflow(1) 107 | else: 108 | overflow_correction += T3WRAPAROUND*nsync 109 | num_overflow+=nsync 110 | if DEBUG: 111 | got_overflow(nsync) 112 | if channel>=1 and channel<=15: 113 | print('marker received. something wrong with cables?') 114 | 115 | f.close() 116 | return (sync_vec[0:ndrecs],dtime_vec[0:ndrecs]) 117 | 118 | 119 | def read_hydraharp_outfile_t3_with_gate(outfilename): 120 | """Function to read .out files (headerless) saved using tttr-t3.exe binary executable. 121 | Input: 122 | .out filename with full path 123 | Outputs: 124 | sync_vec is an array of sync pulse number which just counts up sequentially. A 125 | missing number means no photons were received for that cycle. If you 126 | know the rep rate of your laser, you can compute the time difference between 127 | consecutive sync pulses by taking the difference between their indexes and multiplying 128 | by the rep interval. 129 | 130 | dtime_vec is an array of 15-bit integer values (between 0 and 32767) that 131 | gives the time of arrival of the first photon with respect to its corresponding 132 | sync pulse number in the sync_vec array. 133 | You can convert dtime_vec to picoseconds using the bin resolution that was set during 134 | data acquisition on the TCSPC. (Default 8ps). 135 | eg.: if sync_vec[10] = 42 and dtime_vec[10]=325 it means the photon was recorded 136 | after 325x8ps after the 42nd sync pulse. Also note that since the 10^th index of 137 | the vector corresponds to the 42nd sync pulse, it means many of the initial pulses 138 | did not record any returning photons, so their sync pulse numbers were missing. 139 | 140 | To avoid a truncated time response, you need to ensure that 141 | laser rep time interval <= (TCSPC bin resolution) x 32767 142 | """ 143 | T3WRAPAROUND = 1024 144 | BYTES_PER_RECORD = 4 145 | f = open(outfilename,"rb") 146 | f.seek(0,2) 147 | ftell = f.tell() 148 | # cast to int, even though ftell and BYTES_PER_RECORD are int, the division casts them to float 149 | num_recs = int(ftell/BYTES_PER_RECORD) 150 | f.seek(0) 151 | 152 | dtime_vec = np.zeros(num_recs, dtype=np.int32) 153 | sync_vec = np.zeros(num_recs, dtype=np.int32) 154 | 155 | dtime_vec_gate = np.zeros(num_recs, dtype=np.int32) 156 | sync_vec_gate = np.zeros(num_recs, dtype=np.int32) 157 | 158 | ndrecs = 0 159 | ndrecs_gate = 0 160 | 161 | overflow_correction= 0 162 | num_photons = 0 163 | num_overflow = 0 164 | 165 | for _ in range(num_recs): 166 | bytstr = f.read(4) 167 | if bytstr=='': 168 | print('file corrupt. wrong num of recs') 169 | break 170 | 171 | buf0 = struct.unpack('I',bytstr)[0] 172 | nsync = buf0 & 0b00000000000000000000001111111111 173 | dtime = (buf0 & 0b00000001111111111111110000000000)>>10 174 | channel = (buf0 & 0b01111110000000000000000000000000)>>25 175 | special = buf0 >> 31 176 | 177 | if not special: 178 | true_nsync = overflow_correction + nsync; 179 | if channel==0: 180 | sync_vec[ndrecs] = true_nsync 181 | dtime_vec[ndrecs] = dtime 182 | ndrecs+=1 183 | elif channel==1: 184 | sync_vec_gate[ndrecs_gate] = true_nsync 185 | dtime_vec_gate[ndrecs_gate] = dtime 186 | ndrecs_gate+=1 187 | if DEBUG: 188 | got_photon(true_nsync, channel, dtime) 189 | if channel==0: 190 | num_photons+=1 #got a photon 191 | else: 192 | # special record - could be overflow or marker 193 | if channel==63: 194 | if nsync==0: 195 | overflow_correction += T3WRAPAROUND 196 | num_overflow+=1 197 | if DEBUG: 198 | got_overflow(1) 199 | else: 200 | overflow_correction += T3WRAPAROUND*nsync 201 | num_overflow+=nsync 202 | if DEBUG: 203 | got_overflow(nsync) 204 | if channel>=1 and channel<=15: 205 | print('marker received. something wrong with cables?') 206 | 207 | f.close() 208 | return (sync_vec[0:ndrecs],dtime_vec[0:ndrecs],sync_vec_gate[0:ndrecs_gate],dtime_vec_gate[0:ndrecs_gate]) 209 | 210 | def parse_scan_pos_idx(fname): return int(fname.split('.')[-2].split('_')[-1]) 211 | 212 | def parse_is_long_cable_flag(fname): return int(fname.split('.')[-2].split('_')[1]) 213 | 214 | def parse_delay_param(fname): return int(fname.split('.')[-2].split('_')[2]) 215 | 216 | def count_params_in_fname(fname): return len(fname.split('.')[-2].split('_'))-1 217 | 218 | def calc_hist_shift(fname, hist_tbin_size): 219 | '''Function to calc shift histogram to account for time delays in system due to cables and scan time 220 | ''' 221 | start_bin = 32408+2224 222 | end_bin = start_bin + 84096 223 | # delay = int(fname.split('.')[0].split('_')[-1]) # delay due to scan time? 224 | delay = parse_delay_param(fname) # delay due to scan time? 225 | is_long_cable = parse_is_long_cable_flag(fname) 226 | cable_delay = is_long_cable * 81624 + (1 - is_long_cable) * 36864 227 | delay += cable_delay 228 | roll_amount = -(-81624 - 48384 + delay + start_bin) 229 | # roll_amount /= 8 230 | roll_amount /= hist_tbin_size # calc number of elements to shift 231 | # print((cable_delay + 48384 - delay + start_bin)) 232 | return roll_amount 233 | 234 | if __name__=='__main__': 235 | ## Load parameters shared by all 236 | io_dirpaths = load_json('io_dirpaths.json') 237 | scan_data_params = load_json('scan_params.json') 238 | 239 | data_base_dirpath = io_dirpaths['timestamp_data_base_dirpath'] 240 | ## Scene IDs: 241 | scene_id = '20190207_face_scanning_low_mu/free' 242 | fname = 't3mode_0_000000_10295.out' # Nose 243 | # fname = 't3mode_0_000000_14467.out' # Background 244 | # fname = 't3mode_0_000000_19723.out' # Upper rgiht cheeck 245 | # fname = 't3mode_0_000000_21345.out' # Right ear 246 | # scene_id = '20190207_face_scanning_low_mu/ground_truth' 247 | # fname = 't3mode_0_000000_10295.out' # Nose 248 | # fname = 't3mode_0_000000_14467.out' # Background 249 | # fname = 't3mode_0_000000_19723.out' # Upper rgiht cheeck 250 | # fname = 't3mode_0_000000_21345.out' # Right ear 251 | # scene_id = '20190205_face_scanning/free' 252 | # fname = 't3mode_0_000000_5757.out' 253 | # fname = 't3mode_0_000000_1757.out' 254 | # scene_id = '20190205_face_scanning/ext_opt_filtering' 255 | # fname = 't3mode_0_000000_5757.out' 256 | 257 | ## Read file 258 | dirpath = os.path.join(data_base_dirpath, scene_id) 259 | fpath = os.path.join(dirpath, fname) 260 | 261 | ## Timestamps and their corresponding laser pulse cycle when it was captured 262 | sync_vec, dtime_vec = read_hydraharp_outfile_t3(fpath) 263 | 264 | ## discard timestamps to make things faster 265 | max_n_tstamps = int(1e8) 266 | abs_max_n_tstamps = dtime_vec.size 267 | max_n_tstamps = np.min([max_n_tstamps, dtime_vec.size]) 268 | (dtime_vec, sync_vec) = (dtime_vec[0:max_n_tstamps], sync_vec[0:max_n_tstamps]) 269 | 270 | ## Calc Parameters for Coates Estimator 271 | n_laser_cycles = sync_vec.max() 272 | n_empty_laser_cycles = calc_n_empty_laser_cycles(sync_vec) 273 | laser_rep_freq = scan_data_params['laser_rep_freq'] # most data acquisitions were done with a 10MHz laser rep freq 274 | laser_rep_period = (1. / laser_rep_freq)*1e12 # In picosecs 275 | total_acquisition_time = n_laser_cycles*laser_rep_period # in picosecs 276 | 277 | ## Create histogram 278 | # max_tbin / min_tbnin_size determin the length of the histogram 279 | # If there are timestamps larger than max_tbin the will be discarded when building the histogram 280 | max_tbin = laser_rep_period # Period in ps 281 | min_tbin_size = scan_data_params['min_tbin_size'] # Bin size in ps 282 | hist_tbin_factor = 1.0 # increase tbin size to make histogramming faster 283 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 284 | (counts, bin_edges, bins) = timestamps2histogram(dtime_vec, max_tbin=max_tbin, min_tbin_size=min_tbin_size, hist_tbin_factor=hist_tbin_factor) 285 | n_hist_bins = counts.size 286 | 287 | ## Apply global shift to histograms. 288 | # if('calib' in fname): roll_amount = 0 289 | # else: roll_amount = calc_hist_shift(fname, hist_tbin_size) 290 | roll_amount=0 291 | if(scene_id == '20190207_face_scanning_low_mu/ext_opt_filtering'): 292 | roll_amount=-21400 293 | counts = np.roll(counts, int(roll_amount)) 294 | 295 | ## Verify that the number of laser cycles still matches n_laser_cycles gotten from sync_vec 296 | n_laser_cycles_validate = np.round(total_acquisition_time / (n_hist_bins*hist_tbin_size)) 297 | if(n_laser_cycles != n_laser_cycles_validate): print("WARNING: n_laser_cycles do not match calculated..") 298 | ## correct histogram (Coates estimate) 299 | dead_time = scan_data_params['dead_time'] # dead time in picoseconds 300 | if('free' in scene_id): 301 | corrected_counts = coates_est_free_running(counts, laser_rep_period, hist_tbin_size, dead_time, n_laser_cycles, hist_tbin_factor=hist_tbin_factor) 302 | elif(('ground_truth' in scene_id) or ('ext' in scene_id)): 303 | corrected_counts = coates_correction_sync_mode(counts, n_laser_cycles) 304 | else: 305 | corrected_counts = counts 306 | 307 | print("Hist Params:") 308 | print(" - n_bins = {}".format(bins.shape)) 309 | print(" - n_counts = {}".format(counts.sum())) 310 | print(" - n_timestamps_used = {}".format(dtime_vec.shape)) 311 | print(" - n_timestamps_avail = {}".format(abs_max_n_tstamps)) 312 | print(" - MIN timestamp in file (ps) = {}".format(min_tbin_size*np.min(dtime_vec))) 313 | print(" - MAX timestamp in file (ps) = {}".format(min_tbin_size*np.max(dtime_vec))) 314 | 315 | # Check if all laser pulses counter are unique 316 | # If all the pulses are unique, it usually means that we were operated in synchronous mode (ext triggering with laser). 317 | u, c = np.unique(sync_vec, return_counts=True) 318 | dup = u[c>1] 319 | if(dup.size > 0): print(" - sync_vec HAS duplicate entries, so multiple photons were detected within one laser period") 320 | else: print(" - sync_vec DOES NOT HAVE duplicate entries") 321 | 322 | # Plot 323 | plt.clf() 324 | plt.plot(bins, counts / n_laser_cycles, alpha=0.75, label='Uncorrected: ' + fname) 325 | plt.plot(bins, corrected_counts, alpha=0.4, label='Corrected: ' + fname) 326 | plt.title("Scene: {}".format(scene_id)) 327 | plt.legend(fontsize=12) 328 | plt.xlim([0,max_tbin]) 329 | plt.pause(0.1) 330 | # out_fname = '{}_{}'.format(scene_id.replace('/','--'),fname.replace('.out','')) 331 | # save_currfig_png(results_dirpath, out_fname) 332 | plt.pause(0.1) 333 | 334 | 335 | 336 | -------------------------------------------------------------------------------- /read_positions_file.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | import os 3 | 4 | #### Library imports 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | #### Local imports 9 | 10 | 11 | POSITIONS_FNAME='positions_file.txt' 12 | 13 | def read_positions_file(fpath): 14 | pos_data = np.genfromtxt(fpath, delimiter=',') 15 | return pos_data 16 | 17 | def get_coords(pos_data): 18 | (y_pos, x_pos) = (pos_data[:,1], pos_data[:,2]) 19 | (x_coords, x_counts) = np.unique(x_pos, return_counts=True) 20 | # x_coords[x_counts > 5] = x_coords 21 | (y_coords, y_counts) = np.unique(y_pos, return_counts=True) 22 | # y_coords[y_counts > 5] = y_coords 23 | return (x_coords, y_coords) 24 | 25 | if __name__=='__main__': 26 | base_dirpath = '/home/felipe/datasets/splidar-data-iccv2019/scanDataT3/data' 27 | ## Scene IDs: 28 | # 20181108_darkvase, 29 | # 20180922_face_no_ambient_highmu, 30 | # 20190116_face_scanning_high_mu_no_ambient, 31 | # 20190205_face_scanning 32 | # 20190207_face_scanning_low_mu 33 | # 20180923_face_mu-0.38_lambda-0.005 34 | scene_id = '20190207_face_scanning_low_mu/free' 35 | fname = POSITIONS_FNAME 36 | 37 | # Read file 38 | dirpath = os.path.join(base_dirpath, scene_id) 39 | fpath = os.path.join(dirpath, fname) 40 | pos_data = read_positions_file(fpath) 41 | 42 | (x_coords, y_coords) = get_coords(pos_data) 43 | n_rows = y_coords.size 44 | n_cols = x_coords.size -------------------------------------------------------------------------------- /research_utils/.gitignore: -------------------------------------------------------------------------------- 1 | # VS Code 2 | .vscode 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | -------------------------------------------------------------------------------- /research_utils/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Felipe Gutierrez-Barragan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /research_utils/README.md: -------------------------------------------------------------------------------- 1 | # research_utils 2 | 3 | Utility functions that I often use in research code (plotting, signal processing, io, etc) 4 | 5 | * `plot_utils`: Useful functions for plotting and saving. Also contains ways to calculate error bars. 6 | * `signal_processing_ops`: Usefule signal processing operations 7 | 8 | ## Adding as submodule 9 | 10 | To add this as a submodule go to the repository and type the following commeands: 11 | 12 | 1. Add submodule: `git submodule add git@github.com:felipegb94/fgb_research_utils.git` 13 | 2. Commit submodule: `git commit -am "add research_utils submodule"` 14 | 3. Push: `git push origin master` 15 | 16 | ## Running the tests 17 | 18 | The tests should either be run from the top-level folder (i.e., the folder where this file is), or `research_utils` should be a package that can be imported. 19 | 20 | -------------------------------------------------------------------------------- /research_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/felipegb94/WISC-SinglePhoton3DData/e0102416ffca8bfa730179a6a1f83a6de9240fc6/research_utils/__init__.py -------------------------------------------------------------------------------- /research_utils/environment.yml: -------------------------------------------------------------------------------- 1 | name: dev36 2 | channels: 3 | - conda-forge 4 | - http://conda.anaconda.org/gurobi 5 | - defaults 6 | dependencies: 7 | - _libgcc_mutex=0.1=conda_forge 8 | - _openmp_mutex=4.5=1_gnu 9 | - anyio=2.2.0=py36h5fab9bb_0 10 | - argon2-cffi=20.1.0=py36h8f6f2f9_2 11 | - async_generator=1.10=py_0 12 | - attrs=20.3.0=pyhd3deb0d_0 13 | - babel=2.9.0=pyhd3deb0d_0 14 | - backports=1.0=py_2 15 | - backports.functools_lru_cache=1.6.3=pyhd8ed1ab_0 16 | - bleach=3.3.0=pyh44b312d_0 17 | - brotlipy=0.7.0=py36h8f6f2f9_1001 18 | - ca-certificates=2020.12.5=ha878542_0 19 | - certifi=2020.12.5=py36h5fab9bb_1 20 | - cffi=1.14.5=py36hc120d54_0 21 | - chardet=4.0.0=py36h5fab9bb_1 22 | - cloudpickle=1.6.0=py_0 23 | - contextvars=2.4=py_0 24 | - cryptography=3.4.7=py36hb60f036_0 25 | - cycler=0.10.0=py_2 26 | - cytoolz=0.11.0=py36h8f6f2f9_3 27 | - dask-core=0.15.2=py36_0 28 | - dataclasses=0.8=pyh787bdff_0 29 | - decorator=5.0.5=pyhd8ed1ab_0 30 | - defusedxml=0.7.1=pyhd8ed1ab_0 31 | - entrypoints=0.3=pyhd8ed1ab_1003 32 | - enum34=1.1.10=py36h9f0ad1d_2 33 | - freetype=2.10.4=h0708190_1 34 | - icu=67.1=he1b5a44_0 35 | - idna=2.10=pyh9f0ad1d_0 36 | - imagecodecs-lite=2019.12.3=py36h92226af_3 37 | - imageio=2.9.0=py_0 38 | - immutables=0.15=py36h8f6f2f9_0 39 | - importlib-metadata=3.10.0=py36h5fab9bb_0 40 | - ipykernel=5.5.3=py36hcb3619a_0 41 | - ipython=5.8.0=py36_1 42 | - ipython_genutils=0.2.0=py_1 43 | - jinja2=2.11.3=pyh44b312d_0 44 | - jpeg=9d=h36c2ea0_0 45 | - json5=0.9.5=pyh9f0ad1d_0 46 | - jsonschema=3.2.0=pyhd8ed1ab_3 47 | - jupyter-packaging=0.7.12=pyhd8ed1ab_0 48 | - jupyter_client=6.1.12=pyhd8ed1ab_0 49 | - jupyter_core=4.7.1=py36h5fab9bb_0 50 | - jupyter_server=1.5.1=py36h5fab9bb_0 51 | - jupyterlab=3.0.12=pyhd8ed1ab_0 52 | - jupyterlab_pygments=0.1.2=pyh9f0ad1d_0 53 | - jupyterlab_server=2.4.0=pyhd8ed1ab_0 54 | - kiwisolver=1.3.1=py36h605e78d_1 55 | - lcms2=2.12=hddcbb42_0 56 | - ld_impl_linux-64=2.35.1=hea4e1c9_2 57 | - libblas=3.9.0=8_openblas 58 | - libcblas=3.9.0=8_openblas 59 | - libffi=3.3=h58526e2_2 60 | - libgcc-ng=9.3.0=h2828fa1_18 61 | - libgfortran-ng=9.3.0=hff62375_18 62 | - libgfortran5=9.3.0=hff62375_18 63 | - libgomp=9.3.0=h2828fa1_18 64 | - liblapack=3.9.0=8_openblas 65 | - libopenblas=0.3.12=pthreads_h4812303_1 66 | - libpng=1.6.37=h21135ba_2 67 | - libsodium=1.0.18=h36c2ea0_1 68 | - libstdcxx-ng=9.3.0=h6de172a_18 69 | - libtiff=4.2.0=hdc55705_0 70 | - libwebp-base=1.2.0=h7f98852_2 71 | - lz4-c=1.9.3=h9c3ff4c_0 72 | - markupsafe=1.1.1=py36h8f6f2f9_3 73 | - matplotlib=3.2.2=1 74 | - matplotlib-base=3.2.2=py36h5fdd944_1 75 | - mistune=0.8.4=py36h8f6f2f9_1003 76 | - nbclassic=0.2.6=pyhd8ed1ab_0 77 | - nbclient=0.5.3=pyhd8ed1ab_0 78 | - nbconvert=6.0.7=py36h5fab9bb_3 79 | - nbformat=5.1.3=pyhd8ed1ab_0 80 | - ncurses=6.2=h58526e2_4 81 | - nest-asyncio=1.5.1=pyhd8ed1ab_0 82 | - networkx=2.3=py_0 83 | - notebook=6.3.0=py36h5fab9bb_0 84 | - numpy=1.19.5=py36h2aa4a07_1 85 | - olefile=0.46=pyh9f0ad1d_1 86 | - openssl=1.1.1k=h7f98852_0 87 | - packaging=20.9=pyh44b312d_0 88 | - pandoc=2.12=h7f98852_0 89 | - pandocfilters=1.4.2=py_1 90 | - pathlib=1.0.1=py36h5fab9bb_4 91 | - pexpect=4.8.0=pyh9f0ad1d_2 92 | - pickleshare=0.7.5=py_1003 93 | - pillow=8.1.2=py36ha6010c0_0 94 | - pip=21.0.1=pyhd8ed1ab_0 95 | - prometheus_client=0.10.0=pyhd8ed1ab_0 96 | - prompt_toolkit=1.0.15=py_1 97 | - ptyprocess=0.7.0=pyhd3deb0d_0 98 | - pycparser=2.20=pyh9f0ad1d_2 99 | - pygments=2.8.1=pyhd8ed1ab_0 100 | - pyopenssl=20.0.1=pyhd8ed1ab_0 101 | - pyparsing=2.4.7=pyh9f0ad1d_0 102 | - pyrsistent=0.17.3=py36h8f6f2f9_2 103 | - pysocks=1.7.1=py36h5fab9bb_3 104 | - python=3.6.13=hffdb5ce_0_cpython 105 | - python-dateutil=2.8.1=py_0 106 | - python_abi=3.6=1_cp36m 107 | - pytz=2021.1=pyhd8ed1ab_0 108 | - pywavelets=1.1.1=py36h92226af_3 109 | - pyzmq=22.0.3=py36h7068817_1 110 | - readline=8.0=he28a2e2_2 111 | - requests=2.25.1=pyhd3deb0d_0 112 | - scikit-image=0.17.2=py36h284efc9_4 113 | - scipy=1.5.3=py36h9e8f40b_0 114 | - send2trash=1.5.0=py_0 115 | - setuptools=49.6.0=py36h5fab9bb_3 116 | - simplegeneric=0.8.1=py_1 117 | - six=1.15.0=pyh9f0ad1d_0 118 | - sniffio=1.2.0=py36h5fab9bb_1 119 | - sqlite=3.35.3=h74cdb3f_0 120 | - terminado=0.9.4=py36h5fab9bb_0 121 | - testpath=0.4.4=py_0 122 | - tifffile=2019.7.26.2=py36_0 123 | - tk=8.6.10=h21135ba_1 124 | - toolz=0.11.1=py_0 125 | - tornado=6.1=py36h8f6f2f9_1 126 | - traitlets=4.3.3=py36h9f0ad1d_1 127 | - typing_extensions=3.7.4.3=py_0 128 | - urllib3=1.26.4=pyhd8ed1ab_0 129 | - wcwidth=0.2.5=pyh9f0ad1d_2 130 | - webencodings=0.5.1=py_1 131 | - wheel=0.36.2=pyhd3deb0d_0 132 | - xz=5.2.5=h516909a_1 133 | - zeromq=4.3.4=h9c3ff4c_0 134 | - zipp=3.4.1=pyhd8ed1ab_0 135 | - zlib=1.2.11=h516909a_1010 136 | - zstd=1.4.9=ha95c52a_0 137 | prefix: /home/felipe/miniconda3/envs/dev36 138 | -------------------------------------------------------------------------------- /research_utils/improc_ops.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | 3 | ## Library Imports 4 | import numpy as np 5 | from IPython.core import debugger 6 | breakpoint = debugger.set_trace 7 | 8 | ## Local Imports 9 | from .shared_constants import * 10 | 11 | def gamma_tonemap(img, gamma = 1/2.2): 12 | assert(gamma <= 1.0), "Gamma should be < 1" 13 | assert(0.0 <= gamma), "Gamma should be non-neg" 14 | tmp_img = np.power(img, gamma) 15 | return tmp_img / tmp_img.max() 16 | 17 | def calc_fov(n_rows, n_cols, fov_major_axis): 18 | ''' 19 | Calculate fov for horizontal and vertical axis 20 | ''' 21 | if(n_rows > n_cols): 22 | fov_vert = fov_major_axis 23 | fov_horiz = fov_major_axis * (float(n_cols) / float(n_rows)) 24 | else: 25 | fov_horiz = fov_major_axis 26 | fov_vert = fov_major_axis * (float(n_rows) / float(n_cols)) 27 | return (float(fov_horiz), float(fov_vert)) 28 | 29 | def calc_spherical_coords(fov_horiz, fov_vert, n_rows, n_cols, is_deg=True): 30 | ''' 31 | Given the FoV along each axis, generate a view direction image where each element corresponds to the angle made between 32 | the normal along the camera center and that pixel. 33 | Inputs: 34 | * fov_horiz: field of view along horizonatal direction (horizontal direction, columns direction) 35 | * fov_vert: field of view along vertical direction (vertical direction, rows direction) 36 | * n_rows: Number of rows 37 | * n_cols: Numer of columns 38 | * is_deg: Are FoV in radians or degrees 39 | Outputs: Spherical coordinates for each pixel. 40 | * theta_img = angle with vertical direction (positive vertical direction, UP direction) 41 | * phi_img = angle with horizontal direction (positive horizontal direction, RIGHT direction) 42 | ''' 43 | offset = 90 if is_deg else 0.5*np.pi 44 | phi_range = offset - np.linspace(-0.5*fov_horiz,0.5*fov_horiz, n_cols) 45 | theta_range = np.linspace(-0.5*fov_vert,0.5*fov_vert, n_rows) + offset 46 | (phi_img, theta_img) = np.meshgrid(phi_range, theta_range) 47 | return (phi_img, theta_img) 48 | 49 | def spherical2xyz(r, phi, theta, is_deg=True): 50 | ''' 51 | Compute cartesian coordinates given spherical 52 | Here we assume that X, Y are the horizontal and vertical directions of the camera, 53 | and that positive Z points outwards of the camera 54 | This convention might be a bit different from what is in the Spherical coords Wikipedia article 55 | ''' 56 | if(is_deg): 57 | x = r*np.cos(phi*np.pi/180.)*np.sin(theta*np.pi/180.) 58 | y = r*np.cos(theta*np.pi/180.) 59 | z = r*np.sin(phi*np.pi/180.)*np.sin(theta*np.pi/180.) 60 | else: 61 | x = r*np.cos(phi)*np.sin(theta) 62 | y = r*np.cos(theta) 63 | z = r*np.sin(phi)*np.sin(theta) 64 | return (x,y,z) 65 | 66 | 67 | -------------------------------------------------------------------------------- /research_utils/io_ops.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | import os 3 | import glob 4 | import json 5 | import re 6 | import pickle 7 | 8 | #### Library imports 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | #### Local imports 13 | 14 | 15 | def load_json( json_filepath ): 16 | assert( os.path.exists( json_filepath )), "{} does not exist".format( json_filepath ) 17 | with open( json_filepath, "r" ) as json_file: 18 | return json.load( json_file ) 19 | 20 | def write_json( json_filepath, input_dict ): 21 | assert(isinstance(input_dict, dict)), "write_json only works if the input_dict is of type dict" 22 | with open(json_filepath, 'w') as output_file: 23 | json.dump(input_dict, output_file, indent=4) 24 | 25 | def save_object(obj, filepath): 26 | with open(filepath, 'wb') as output: # Overwrites any existing file. 27 | pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL) 28 | 29 | def load_object(filepath): 30 | with open(filepath, 'rb') as input_pickle_file: 31 | return pickle.load(input_pickle_file) 32 | 33 | def simple_grep( filepath, str_to_search, n_lines=-1 ): 34 | ''' 35 | Search text file and return the first n_lines containing that string 36 | If the line contains the string multiple times, it is only counted as a single line 37 | ''' 38 | assert(os.path.exists(filepath)), "{} does not exist".format(filepath) 39 | assert(n_lines >= -1), "n_lines needs to be -1 OR a non-negative integer. If it is -1 then we return all lines".format(filepath) 40 | f = open(filepath, "r") 41 | lines_with_str = [] 42 | n_lines_found = 0 43 | for line in f: 44 | # Return if we found all lines asked to. If n_lines ==-1 then we just continue searching for all lines 45 | if((n_lines_found >= n_lines) and (n_lines >= 0)): return lines_with_str 46 | # search if line contains string, and save the line if it does 47 | if re.search(str_to_search, line): 48 | n_lines_found += 1 49 | lines_with_str.append(line.split('\n')[0]) # Remove the new line characted if there is any 50 | return lines_with_str 51 | 52 | def get_dirnames_in_dir(dirpath, str_in_dirname=None): 53 | ''' 54 | Output all the dirnames inside of dirpath. 55 | If str_in_dirname is given, only return the dirnames containing that string 56 | ''' 57 | assert(os.path.exists(dirpath)), "Input dirpath does not exist" 58 | all_dirnames = next(os.walk(dirpath))[1] 59 | # If no string pattern is given return all dirnames 60 | if(str_in_dirname is None): return all_dirnames 61 | filtered_dirnames = [] 62 | for curr_dirname in all_dirnames: 63 | if(str_in_dirname in curr_dirname): 64 | filtered_dirnames.append(curr_dirname) 65 | return filtered_dirnames 66 | 67 | def get_filepaths_in_dir(dirpath, match_str_pattern=None, only_filenames=False, keep_ext=True): 68 | ''' 69 | Return a list of all filepaths inside a directory that contain the match_str_pattern. 70 | If we only want the filenames and not the filepath, set only_filenames=True 71 | ''' 72 | assert(os.path.exists(dirpath)), "Input dirpath does not exist" 73 | if(match_str_pattern is None): all_matching_filepaths = glob.glob(dirpath) 74 | else: all_matching_filepaths = glob.glob(os.path.join(dirpath, '*' + match_str_pattern + '*')) 75 | filepaths = [] 76 | for fpath in all_matching_filepaths: 77 | if(os.path.isfile(fpath)): 78 | # if not file ext, remove it 79 | if(not keep_ext): fpath = os.path.splitext(fpath)[0] 80 | # if only_filanemaes, remove the dirpath and only return the filename 81 | if(only_filenames): filepaths.append(os.path.basename(fpath)) 82 | else: filepaths.append(fpath) 83 | return filepaths 84 | 85 | def get_multi_folder_paired_fnames(dirpath_list, valid_file_ext_list): 86 | ''' 87 | Go through each folder in dirpath_list, get all filenames with the file extension in valid_file_ext_list. 88 | Then check that across all folders you can find paired filenames. 89 | If so, then return the filenames 90 | If not, generate error 91 | Example: 92 | dirpath_list = ['dir1', 'dir2', 'dir3'] 93 | valid_file_ext_list = ['npy', 'npz'] 94 | We have the following files: 95 | folder1/f1.npy, 96 | folder1/f2.npy, 97 | 98 | folder2/f1.npy, 99 | folder2/f2.npy, 100 | folder3/xkcd.txt 101 | 102 | folder3/f1.npz 103 | folder3/f2.npz 104 | folder3/random.png 105 | this function will return: 106 | paired_filenames = ['f1', 'f2'] 107 | ext_per_dirpath = ['npy', 'npy', 'npz'] 108 | Why does this function exist? Because it is useful to organize small datasets in this way. 109 | ''' 110 | assert(len(dirpath_list)>0), "empty dirpath list" 111 | n_dirpaths = len(dirpath_list) 112 | filenames_per_dirpath = [] 113 | file_ext_per_dirpath = [] 114 | all_filenames = [] 115 | n_filenames_per_dirpath = [] 116 | for i in range(n_dirpaths): 117 | curr_dirpath = dirpath_list[i] 118 | filenames_in_dir = [] 119 | for file_ext in valid_file_ext_list: 120 | curr_filenames = get_filepaths_in_dir(curr_dirpath, match_str_pattern='*.'+file_ext, only_filenames=True, keep_ext=False) 121 | if(len(curr_filenames) != 0): 122 | # We should only enter this condition once per deirpath 123 | filenames_in_dir += curr_filenames 124 | file_ext_per_dirpath.append(file_ext) 125 | filenames_per_dirpath.append(filenames_in_dir) 126 | all_filenames += filenames_in_dir 127 | n_filenames_per_dirpath.append(len(filenames_in_dir)) 128 | # Check that all dirpath have the same number of filemaes 129 | assert(len(set(n_filenames_per_dirpath)) == 1), "Check that all dirpaths have the same number of files" 130 | n_samples = n_filenames_per_dirpath[0] 131 | # Check that the filenames within each folder are the same 132 | # i.e., folder1/f1.npy, folder2/f1.npy , folder3/f1.npz 133 | paired_filenames = list(set(all_filenames)) 134 | assert(len(paired_filenames) == n_samples), "Filenames within each folder should match (folder1/f1.npy, folder2/f1.npy , folder3/f1.npz)" 135 | # Check that we only have one file extension per directory 136 | assert(len(file_ext_per_dirpath) == n_dirpaths) 137 | return (paired_filenames, file_ext_per_dirpath) 138 | 139 | def get_string_from_file(filepath): 140 | f = open(filepath) 141 | path = f.read().replace('\n','') 142 | f.close() 143 | return path -------------------------------------------------------------------------------- /research_utils/np_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Useful function when operating on numpy arrays 3 | This module should not depend on anything else other than numpy. 4 | ''' 5 | #### Standard Library Imports 6 | 7 | #### Library imports 8 | import numpy as np 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | #### Local imports 13 | from .shared_constants import * 14 | 15 | 16 | def vectorize_tensor(tensor, axis=-1): 17 | ''' 18 | Take an N-Dim Tensor and make it a 2D matrix. Leave the first or last dimension untouched, and basically squeeze the 1st-N-1 19 | dimensions. 20 | This is useful when applying operations on only the first or last dimension of a tensor. Makes it easier to input to different 21 | number of pytorch functions. 22 | ''' 23 | assert((axis==0) or (axis==-1)), 'Error: Input axis needs to be the first or last axis of tensor' 24 | tensor_shape = tensor.shape 25 | n_untouched_dim = tensor.shape[axis] 26 | n_elems = int(round(tensor.size / n_untouched_dim)) 27 | if(axis == -1): 28 | return (tensor.reshape((n_elems, n_untouched_dim)), tensor_shape) 29 | else: 30 | return (tensor.reshape((n_untouched_dim, n_elems)), tensor_shape) 31 | 32 | def unvectorize_tensor(tensor, tensor_shape): 33 | ''' 34 | Undo vectorize_tensor operation 35 | ''' 36 | return tensor.reshape(tensor_shape) 37 | 38 | def to_nparray( a ): 39 | ''' 40 | cast to np array. If a is scalar, make it a 1D 1 element vector 41 | ''' 42 | # Don't to anything if a is already an numpy array 43 | if(isinstance(a, np.ndarray)): return a 44 | # Turn it into a numpy array 45 | a_arr = np.array(a) 46 | # if it was a scalar add dimension 47 | if(a_arr.ndim == 0): return a_arr[np.newaxis] 48 | # otherwise simply return the new nparray 49 | return a_arr 50 | 51 | def extend_tensor_circularly(tensor, axis=-1): 52 | ''' 53 | Take a tensor of any dimension and create a new tensor that is 3x longer along the speified axis 54 | We take concatenate 3 copies of the tensor along the specified axis 55 | ''' 56 | return np.concatenate((tensor, tensor, tensor), axis=axis) 57 | 58 | def get_extended_domain(domain, axis=-1): 59 | ''' 60 | Take a domain defined between [min_val, max_val] with n elements. Extend it along both directions. 61 | So if we have the domain = [0, 1, 2, 3]. Then we output: [-4,-3,-2,-1, 0,1,2,3, 4,5,6,7] 62 | ''' 63 | n = domain.shape[axis] 64 | min_val = domain.min(axis=axis) 65 | assert(min_val >= 0), "get_extended_domain currentl only works for non-negative domains" 66 | max_val = domain.max(axis=axis) 67 | delta = domain[1] - domain[0] 68 | domain_left = domain-(max_val + delta) 69 | domain_right = domain+(max_val + delta) 70 | return np.concatenate((domain_left, domain, domain_right), axis=axis) 71 | 72 | def calc_mean_percentile_errors(errors, percentiles=[0.5, 0.75, 0.95, 0.99]): 73 | ''' 74 | Sort the errors from lowest to hightest. 75 | Given a list of percentiles calculate the mean of the sorted errors within each percentile. 76 | For instance, if percentiles=[0.5,0.75,1.0], then 77 | we calculate the mean of the lowest 50% errors, then the mean of the errors in the 50-75% percentile, 78 | and finally the errors in the 75-100% percentile. 79 | ''' 80 | errors_shape = errors.shape 81 | errors = errors.flatten() 82 | n_elems = errors.size 83 | # Sort errors 84 | sorted_errors = np.sort(errors) 85 | # Verify the input percentiles and find the indeces where we split the errors 86 | percentiles = to_nparray(percentiles) 87 | assert(not (np.any(percentiles > 1) or np.any(percentiles < 0))), "Percentiles need to be between 0 and 1" 88 | percentile_indeces = np.round(n_elems*percentiles).astype(np.int) 89 | # Calculate mean for each percentile 90 | percentile_mean_errors = np.zeros_like(percentiles) 91 | percentile_mask = np.zeros_like(errors)-1. 92 | for i in range(percentiles.size): 93 | start_idx = 0 94 | if(i > 0): start_idx = percentile_indeces[i-1] 95 | end_idx = percentile_indeces[i] 96 | percentile_mean_errors[i] = np.mean(sorted_errors[start_idx:end_idx]) 97 | # Find which pixels were used to calculate this percentile mae 98 | low_error_threshold = sorted_errors[start_idx] 99 | high_error_threshold = sorted_errors[end_idx-1] 100 | percentile_mask[np.logical_and(errors >= low_error_threshold, errors <= high_error_threshold)] = i 101 | errors = errors.reshape(errors_shape) 102 | percentile_mask = percentile_mask.reshape(errors_shape) 103 | return (percentile_mean_errors, percentile_mask) 104 | 105 | def calc_eps_tolerance_error(errors, eps = 0.): 106 | assert(eps >= 0.), "eps should be non-negative" 107 | n_eps_tol_errors = np.sum(errors <= (eps + EPSILON)).astype(errors.dtype) 108 | return n_eps_tol_errors / errors.size 109 | 110 | def calc_error_metrics(errors, percentiles=[0.5, 0.75, 0.95, 0.99], eps_list=[1.], delta_eps = 1.): 111 | ''' 112 | delta_eps is the delta for the X_epsilon errors 113 | ''' 114 | metrics = {} 115 | metrics['mae'] = np.mean(errors) 116 | metrics['rmse'] = np.sqrt(np.mean(np.square(errors))) 117 | metrics['medae'] = np.median(errors) 118 | (percentile_mean_errors, percentile_mask) = calc_mean_percentile_errors(errors, percentiles=percentiles) 119 | metrics['percentile_mae'] = percentile_mean_errors 120 | metrics['percentiles'] = percentiles 121 | assert(delta_eps > 0.), "delta_eps should be nonnegative" 122 | scaled_errors = errors / delta_eps 123 | metrics['0_tol_errs'] = calc_eps_tolerance_error(scaled_errors, eps = 0.) 124 | for i in range(len(eps_list)): 125 | metrics['{}_tol_errs'.format(int(eps_list[i]))] = calc_eps_tolerance_error(scaled_errors, eps = eps_list[i]) 126 | return metrics 127 | 128 | def print_error_metrics(metrics, prefix=''): 129 | print("{} mae = {:.2f}".format(prefix, metrics['mae'])) 130 | # print("{} rmse = {:.2f}".format(prefix, metrics['rmse'])) 131 | print("{} medae = {:.2f}".format(prefix, metrics['medae'])) 132 | np.set_printoptions(suppress=True) 133 | print("{} percentile_mae = {}".format(prefix, metrics['percentile_mae'].round(decimals=2))) 134 | np.set_printoptions(suppress=False) 135 | print("{} 1_tol_errs = {:.2f}".format(prefix, metrics['1_tol_errs'])) 136 | # print("{} 0_tol_errs = {}".format(prefix, metrics['0_tol_errs'])) 137 | 138 | def domain2index(val, max_domain_val, n, is_circular=True): 139 | ''' 140 | Assumes domain is between 0 and max_domain_val 141 | ''' 142 | delta = max_domain_val / n 143 | indeces = np.array(np.round(val / delta)).astype(np.int32) 144 | if(is_circular): indeces[indeces == n] = 0 # Wrap around the indeces that were closer to the top boundary 145 | else: indeces[indeces == n] = n-1 # do not wrap around if domain is not circular 146 | return indeces 147 | 148 | def are_orthogonal(v, u): 149 | ''' 150 | Check if v is orthogonal to u 151 | ''' 152 | assert(v.ndim == 1), "v should be a vector" 153 | assert(u.ndim == 1), "u should be a vector" 154 | assert(u.shape == v.shape), "u and v should match dims" 155 | return np.abs(np.dot(v, u) / v.size) <= EPSILON 156 | 157 | def is_mutually_orthogonal(X): 158 | ''' 159 | Check if all cols are mutually orthogonal 160 | ''' 161 | assert(X.ndim == 2), "X should be a matrix" 162 | (n_rows, n_cols) = X.shape 163 | for i in range(n_cols): 164 | v_i = X[:, i] 165 | for j in range(n_cols): 166 | v_j = X[:, j] 167 | # If i equals j skip, If vectors are not orthogonal return false 168 | if((i != j) and (not are_orthogonal(v_i, v_j))): 169 | return False 170 | return True 171 | 172 | def circular_signal_fit(signal): 173 | ''' 174 | Fit a periodic signal whose domain is assumed to be between 0-1 175 | ''' 176 | ## Fit a cubic spline function to be able to generate any 177 | from scipy.interpolate import interp1d 178 | nt = signal.size 179 | # Extend x and y and interpolate 180 | ext_x_fullres = np.arange(-nt, 2*nt) * (1. / nt) 181 | ext_signal = np.concatenate((signal, signal, signal), axis=-1) 182 | f = interp1d(ext_x_fullres, ext_signal, axis=-1, kind='cubic') 183 | return f -------------------------------------------------------------------------------- /research_utils/plot_utils.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | import os 3 | 4 | #### Library imports 5 | import numpy as np 6 | import matplotlib as mpl 7 | import matplotlib.pyplot as plt 8 | from mpl_toolkits.axes_grid1 import make_axes_locatable 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | #### Local imports 13 | 14 | def get_ax_if_none(ax): 15 | if(ax is None): return plt.gca() 16 | else: return ax 17 | 18 | def save_currfig( dirpath = '.', filename = 'curr_fig', file_ext = 'png', use_imsave=False ): 19 | # Create directory to store figure if it does not exist 20 | os.makedirs(dirpath, exist_ok=True) 21 | # Pause to make sure plot is fully rendered and not warnings or errors are thown 22 | plt.pause(0.02) 23 | # If filename contains file extension then ignore the input file ext 24 | # Else add the input file etension 25 | if('.{}'.format(file_ext) in filename): filepath = os.path.join(dirpath, filename) 26 | else: filepath = os.path.join(dirpath, filename) + '.{}'.format(file_ext) 27 | plt.savefig(filepath, 28 | dpi=None, 29 | # facecolor='w', 30 | # edgecolor='w', 31 | # orientation='portrait', 32 | # papertype=None, 33 | transparent=True, 34 | bbox_inches='tight', 35 | # pad_inches=0.1, 36 | # metadata=None 37 | format=file_ext 38 | ) 39 | 40 | def save_img(data, out_dirpath, out_filename, file_ext='png'): 41 | # if(max_val is None): max_val = data.mean() + 3*data.std() 42 | out_filepath = os.path.join(out_dirpath, 'image_'+out_filename+'.'+file_ext) 43 | plt.imsave(out_filepath, data) 44 | 45 | def save_currfig_png( dirpath = '.', filename = 'curr_fig' ): 46 | save_currfig( dirpath = dirpath, filename = filename, file_ext = 'png' ) 47 | 48 | def save_ax(ax = None, dirpath = '.', filename = 'curr_fig', file_ext = 'png'): 49 | ax = get_ax_if_none(ax) 50 | plt.sca(ax) 51 | save_currfig(dirpath=dirpath, filename=filename, file_ext=file_ext) 52 | 53 | def save_ax_png(ax=None, dirpath = '.', filename = 'curr_fig' ): 54 | save_ax(ax=ax, dirpath = dirpath, filename = filename, file_ext = 'png' ) 55 | 56 | def save_rgb( dirpath = '.', filename = 'curr_rgb', file_ext='svg', rm_ticks=True): 57 | if(rm_ticks): remove_ticks() 58 | save_currfig(dirpath = dirpath, filename = filename, file_ext=file_ext) 59 | 60 | def plot_and_save_rgb(data, out_dirpath='./', out_filename='out_img', min_val=None, max_val=None, add_colorbar=False, rm_ticks=True, cbar_orientation='vertical', file_ext='png', save_fig=False, add_title=False, use_imsave=False): 61 | assert(data.ndim == 2 or data.ndim == 3), "Input data should have 2 dimensions" 62 | if(data.ndim == 3): assert(data.shape[-1] == 3 or data.shape[-1] == 1), "last image dimension needs to be 3 for RGB and 1 for mono image" 63 | if(min_val is None): min_val = np.min(data) 64 | if(max_val is None): max_val = np.max(data) 65 | (fig, ax) = plt.subplots() 66 | img = ax.imshow(data, vmin=min_val, vmax=max_val) 67 | if(rm_ticks): remove_ticks() 68 | if(add_colorbar): set_cbar(cbar_orientation) 69 | if(add_title): plt.title(out_filename) 70 | if(save_fig): 71 | save_rgb(out_dirpath, out_filename, file_ext=file_ext, rm_ticks=False) 72 | if(use_imsave): save_img(data, out_dirpath, out_filename, min_val=min_val, max_val=max_val, file_ext=file_ext) 73 | 74 | def set_cbar(img, cbar_orientation='vertical', fontsize=14): 75 | fig = plt.gcf() 76 | ax = plt.gca() 77 | divider = make_axes_locatable(ax) 78 | if(cbar_orientation == 'vertical'): 79 | # cax = divider.append_axes('right', size='4%', pad=0.05) 80 | cax = divider.append_axes('right', size='10%', pad=0.05) 81 | else: cax = divider.append_axes('bottom', size='7%', pad=0.05) 82 | cb = fig.colorbar(img, cax=cax, orientation=cbar_orientation) 83 | cb.ax.tick_params(labelsize=fontsize) 84 | plt.sca(ax) # Set axis back to what it was 85 | # fig.colorbar(img, orientation=cbar_orientation, ax=ax) 86 | 87 | def draw_histogram(x, height, draw_line=True): 88 | curr_ax = plt.gca() 89 | delta_x = x[1] - x[0] 90 | curr_ax.bar(x, height, align='center', alpha=0.5, width=delta_x) 91 | if(draw_line): 92 | curr_ax.plot(x, height, linewidth=2) 93 | 94 | def remove_ticks(ax = None): 95 | ax = get_ax_if_none(ax) 96 | ax.tick_params( 97 | axis='both', # changes apply to the x-axis and y-axis 98 | which='both', # both major and minor ticks are affected 99 | bottom=False, top=False, left=False, right=False, # ticks along the bottom edge are off 100 | labelbottom=False, labeltop=False, labelleft=False, labelright=False 101 | ) # labels along the bottom edge are off 102 | 103 | def remove_xticks(ax = None): 104 | ax = get_ax_if_none(ax) 105 | ax.tick_params( 106 | axis='x', # changes apply to the x-axis and y-axis 107 | which='both', # both major and minor ticks are affected 108 | bottom=False, top=False, # ticks along the bottom edge are off 109 | labelbottom=False, labeltop=False 110 | ) # labels along the bottom edge are off 111 | 112 | def remove_yticks(ax = None): 113 | ax = get_ax_if_none(ax) 114 | ax.tick_params( 115 | axis='y', # changes apply to the x-axis and y-axis 116 | which='both', # both major and minor ticks are affected 117 | left=False, right=False, # ticks along the bottom edge are off 118 | labelleft=False, labelright=False 119 | ) # labels along the bottom edge are off 120 | 121 | def set_ticks(ax = None, fontsize=12): 122 | ax = get_ax_if_none(ax) 123 | ax.tick_params( 124 | axis='both', # changes apply to the x-axis and y-axis 125 | which='both', # both major and minor ticks are affected 126 | labelsize=fontsize 127 | ) # labels along the bottom edge are off 128 | 129 | def set_xtick_labels(x_max_val, x_labels, ax=None ): 130 | ax = get_ax_if_none(ax) 131 | ax.set_xticks(np.linspace(0, x_max_val, len(x_labels))) 132 | ax.set_xticklabels(x_labels) 133 | 134 | def set_ytick_labels(y_max_val, y_labels, ax=None ): 135 | ax = get_ax_if_none(ax) 136 | ax.set_yticks(np.linspace(0, y_max_val, len(y_labels))) 137 | ax.set_yticklabels(y_labels) 138 | 139 | def set_plot_border_visibility(top_visibility=True, bottom_visibility=True, right_visibility=True, left_visibility=True): 140 | ax = plt.gca() 141 | ax.spines['top'].set_visible(top_visibility) 142 | ax.spines['bottom'].set_visible(bottom_visibility) 143 | ax.spines['right'].set_visible(right_visibility) 144 | ax.spines['left'].set_visible(left_visibility) 145 | 146 | def remove_box(): 147 | plt.box(False) 148 | 149 | def set_axis_linewidth(ax=None, width=1): 150 | ax = get_ax_if_none(ax) 151 | # Set with of axis lines 152 | for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(width) 153 | # Set with of ticks 154 | ax.xaxis.set_tick_params(width=0.75*width) 155 | ax.yaxis.set_tick_params(width=0.75*width) 156 | 157 | def set_xy_box(linewidth=None): 158 | gca = plt.gca() 159 | gca.spines["right"].set_visible(False) 160 | gca.spines["top"].set_visible(False) 161 | 162 | def set_x_box(): 163 | gca = plt.gca() 164 | gca.spines["right"].set_visible(False) 165 | gca.spines["top"].set_visible(False) 166 | gca.spines["left"].set_visible(False) 167 | remove_yticks() 168 | 169 | def set_x_arrow(ax=None): 170 | ax = get_ax_if_none(ax) 171 | ax.plot(1, 0, ">k", transform=ax.get_yaxis_transform(), clip_on=False) 172 | 173 | def set_y_arrow(ax=None): 174 | ax = get_ax_if_none(ax) 175 | ax.plot(0, 1, "^k", transform=ax.get_xaxis_transform(), clip_on=False) 176 | 177 | def set_xy_arrow(ax=None): 178 | set_x_arrow(ax) 179 | set_y_arrow(ax) 180 | 181 | def set_legend(legend_strings=None, ax=None, fontsize=12, loc='best', n_cols=1): 182 | ax = get_ax_if_none(ax) 183 | if(legend_strings is None): 184 | ax.legend(ncol=n_cols, loc=loc, fontsize=fontsize) 185 | else: 186 | ax.legend(legend_strings, ncol=n_cols, loc=loc, fontsize=fontsize) 187 | 188 | def update_fig_size(fig=None, height=4, width=6): 189 | if(fig is None): 190 | fig = plt.gcf() 191 | fig.set_size_inches(width, height, forward=True) 192 | else: 193 | fig.set_size_inches(width, height, forward=True) 194 | return fig 195 | 196 | def get_color_cycle(): 197 | return plt.rcParams['axes.prop_cycle'].by_key()['color'] 198 | 199 | def reset_color_cycle(): 200 | curr_version = mpl.__version__.split('.') 201 | if(int(curr_version[0]) <= 1): 202 | if((int(curr_version[0]) == 1) and (int(curr_version[1]) > 5)): plt.gca().set_prop_cycle(None) 203 | else: plt.gca().set_color_cycle(None) 204 | else: plt.gca().set_prop_cycle(None) 205 | 206 | def calc_errbars(true_vals, meas_vals, axis=0): 207 | ''' 208 | Useful function to calculate errors bars for the matplotlib plt.errbars function 209 | neg_mae corresponds to the mae of all values that were LOWER than the true_val 210 | pos_mae corresponds to the mae of all values that were HIGHER than the true_val 211 | ''' 212 | true_vals = true_vals.squeeze() 213 | meas_vals = meas_vals.squeeze() 214 | assert((axis==0) or (axis==-1)), 'Error: Input axis needs to be the first or last axis of tensor' 215 | assert((meas_vals.ndim==1) or (meas_vals.ndim==2)), 'Error: meas_vals needs to be a 1D or 2D tensor' 216 | assert((meas_vals.ndim-1) == true_vals.ndim), 'Error: true_vals needs to have 1 less dim than meas_vals, i.e., if meas_vals is 2D true_vals is 1D' 217 | # place measurements in the last dimension 218 | if(axis == 0): meas_vals = meas_vals.transpose() 219 | # Reshape meas_vals and true_vals into 2D and 1D tensors respectively. 220 | if(meas_vals.ndim==1): 221 | meas_vals = np.expand_dims(meas_vals, axis=0) 222 | true_vals = np.expand_dims(true_vals, axis=0) 223 | # Calculate errors 224 | errors = meas_vals - np.expand_dims(true_vals, axis=-1) 225 | # Figure out how many elements there are and pre-allocate arrays for the positive and negative errors 226 | # Note that this steps cannot be easily vectorized, because for each element there may be a different number of positive/negative errors. 227 | n_elems = meas_vals.shape[0] # we know that the elems will be in the first dimension, because the measurements are always in the last dimension 228 | # Calculate positive errors mean absolute error 229 | pos_mae = np.zeros((n_elems,)) 230 | neg_mae = np.zeros((n_elems,)) 231 | for i in range(n_elems): 232 | curr_errors = errors[i] 233 | pos_mae[i] = np.mean(np.abs(curr_errors[curr_errors>=0])) 234 | neg_mae[i] = np.mean(np.abs(curr_errors[curr_errors<=0])) 235 | return np.stack((neg_mae, pos_mae), axis=0) 236 | 237 | def calc_mean_errbars(y, axis=0): 238 | y_mean = np.mean(y, axis=axis) 239 | y_negpos_mae = calc_errbars(y_mean, y, axis=axis) 240 | return y_negpos_mae 241 | 242 | def get_good_min_max_range(img): 243 | mean_val = np.mean(img) 244 | stddev_val = np.std(img) 245 | vmin = mean_val - 2*stddev_val 246 | vmax = mean_val + 2.5*stddev_val 247 | return (vmin, vmax) 248 | 249 | def enable_latex_fonts(): 250 | plt.rcParams.update({ 251 | "text.usetex": True, 252 | }) 253 | 254 | def disable_latex_fonts(): 255 | plt.rcParams.update({ 256 | "text.usetex": False, 257 | }) 258 | 259 | -------------------------------------------------------------------------------- /research_utils/py_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Useful functions in python 3 | ''' 4 | #### Standard Library Imports 5 | 6 | #### Library imports 7 | 8 | #### Local imports 9 | from research_utils.shared_constants import EPSILON 10 | 11 | 12 | def get_obj_functions(obj, filter_str: str = ''): 13 | ''' 14 | Get all callable functions of the object as a list of strings. 15 | filter_str only appends the functions that contain the filter_str 16 | ''' 17 | obj_funcs = [] 18 | for func_name in dir(obj): 19 | if((callable(getattr(obj, func_name))) and (filter_str in func_name)): 20 | obj_funcs.append(func_name) 21 | return obj_funcs 22 | 23 | def tuple2str(tup, separator: str = '') -> str: 24 | return separator.join(map(str,tup)) 25 | 26 | def is_float_equal(f1: float, f2: float) -> bool: 27 | return abs(f1-f2) <= EPSILON -------------------------------------------------------------------------------- /research_utils/scipy_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Useful function based on scipy 3 | ''' 4 | #### Standard Library Imports 5 | 6 | #### Library imports 7 | import numpy as np 8 | import scipy 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | #### Local imports 13 | from .shared_constants import * 14 | 15 | 16 | def npz2mat(npz_fpath): 17 | ''' 18 | Load an uncompressed .npz file and save it as a .mat MATLAB file 19 | ''' 20 | from scipy import io 21 | data_dict = np.load(npz_fpath) 22 | mat_fpath = npz_fpath.replace('.npz', '.mat') 23 | io.savemat(mat_fpath, data_dict) -------------------------------------------------------------------------------- /research_utils/shared_constants.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | 3 | ## Library Imports 4 | import numpy as np 5 | 6 | ## Local Imports 7 | 8 | HALFPI = 0.5*np.pi 9 | PI = np.pi 10 | TWOPI = 2*np.pi 11 | SPEED_OF_LIGHT = 3e8 # Speed of light in meters per second 12 | EPSILON = 1e-6 # Typically used to avoid division by 0 13 | 14 | -------------------------------------------------------------------------------- /research_utils/signalproc_ops.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | 3 | ## Library Imports 4 | import numpy as np 5 | import scipy 6 | from scipy import signal 7 | from IPython.core import debugger 8 | breakpoint = debugger.set_trace 9 | 10 | ## Local Imports 11 | from .np_utils import vectorize_tensor, unvectorize_tensor, to_nparray, get_extended_domain, extend_tensor_circularly 12 | from .shared_constants import * 13 | 14 | # Smoothing windows that are available to band-limit a signal 15 | SMOOTHING_WINDOWS = ['flat', 'impulse', 'hanning', 'hamming', 'bartlett', 'blackman'] 16 | 17 | def circular_conv( v1, v2, axis=-1 ): 18 | """Circular convolution: Calculate the circular convolution for vectors v1 and v2. v1 and v2 are the same size 19 | 20 | Args: 21 | v1 (numpy.ndarray): ...xN vector 22 | v2 (numpy.ndarray): ...xN vector 23 | Returns: 24 | v1convv2 (numpy.ndarray): convolution result. N x 1 vector. 25 | """ 26 | v1convv2 = np.fft.irfft( np.fft.rfft( v1, axis=axis ) * np.fft.rfft( v2, axis=axis ), axis=axis, n=v1.shape[axis] ) 27 | return v1convv2 28 | 29 | def circular_corr( v1, v2, axis=-1 ): 30 | """Circular correlation: Calculate the circular correlation for vectors v1 and v2. v1 and v2 are the same size 31 | 32 | Args: 33 | v1 (numpy.ndarray): Nx1 vector 34 | v2 (numpy.ndarray): Nx1 vector 35 | Returns: 36 | v1corrv2 (numpy.ndarray): correlation result. N x 1 vector. 37 | """ 38 | v1corrv2 = np.fft.ifft( np.fft.fft( v1, axis=axis ).conj() * np.fft.fft( v2, axis=axis ), axis=axis ).real 39 | return v1corrv2 40 | 41 | def circular_matched_filter(s, template, axis=-1): 42 | assert(s.shape[axis] == template.shape[axis]), "input signal and template dims need to match at axis" 43 | corrf = circular_corr(template, s, axis=axis) 44 | return np.argmax(corrf, axis=axis) 45 | 46 | def get_smoothing_window(N=100,window_len=11,window='flat'): 47 | """ 48 | smooth the data using a window with requested size. 49 | """ 50 | ## Validate Inputs 51 | if(N < window_len): 52 | raise ValueError("Input vector needs to be bigger than window size.") 53 | if(not window in SMOOTHING_WINDOWS): 54 | raise ValueError( "Chosen smoothing window needs to be one of: {}".format( SMOOTHING_WINDOWS ) ) 55 | ## Generate smoothing window 56 | w = np.zeros((N,)) 57 | if window == 'flat': #moving average 58 | w[0:int(window_len)]=np.ones(int(window_len),'d') 59 | elif window == 'impulse': 60 | w[0] = 1 61 | else: 62 | w[0:int(window_len)]=eval('np.'+window+'(int(window_len))') 63 | shift = np.argmax(w) 64 | w = np.roll(w, shift=-1*shift ) 65 | # Return normalized smoothhing window 66 | return (w / (w.sum())) 67 | 68 | def smooth(x, window_len=11, window='flat'): 69 | """smooth the data using a window with requested size. 70 | 71 | This method is based on the convolution of a scaled window with the signal. 72 | The signal is prepared by introducing reflected copies of the signal 73 | (with the window size) in both ends so that transient parts are minimized 74 | in the begining and end part of the output signal. 75 | 76 | input: 77 | x: the input signal 78 | window_len: the dimension of the smoothing window; should be an odd integer 79 | window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' 80 | flat window will produce a moving average smoothing. 81 | 82 | output: 83 | the smoothed signal 84 | 85 | example: 86 | 87 | t=linspace(-2,2,0.1) 88 | x=sin(t)+randn(len(t))*0.1 89 | y=smooth(x) 90 | 91 | see also: 92 | 93 | numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve 94 | scipy.signal.lfilter 95 | 96 | TODO: the window parameter could be the window itself if an array instead of a string 97 | NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. 98 | """ 99 | #### Validate Inputs 100 | if( x.ndim != 1 ): 101 | raise ValueError("smooth only accepts 1 dimension arrays.") 102 | if( window_len < 3 ): 103 | return x 104 | if( window_len > len(x)): 105 | print("Not smoothing. signal is smaller than window lengths") 106 | return x 107 | # Get smoothing window 108 | w = get_smoothing_window( N = len( x ), window = window, window_len = window_len ) 109 | y = np.real( circular_conv( x, w ) ) / ( w.sum() ) 110 | # y = np.real(np.fft.ifft(np.fft.fft(x)*np.fft.fft(w)))/(w.sum()) 111 | #### The line below performs the same operation as the line above but slower 112 | # np.convolve(w/(w.sum()),s,mode='valid') 113 | return y 114 | 115 | def smooth_tensor(X, window_duty=0.1, window='hanning'): 116 | assert(window_duty < 1.0), "window_duty needs to be less than one" 117 | assert(window_duty > 0.0), "window_duty needs to be greater than 0" 118 | X_shape = X.shape 119 | n = X.shape[-1] 120 | n_arrays = int(X.size / n) 121 | X = X.reshape((n_arrays,n)) 122 | 123 | window = get_smoothing_window(N=n, window_len=window_duty*n, window=window) 124 | window = window.reshape((1,n)) 125 | 126 | Y = np.real( circular_conv(X, window) ) / (window.sum()) 127 | 128 | return Y.reshape(X_shape) 129 | 130 | def smooth_codes( modfs, demodfs, window_duty=0.15 ): 131 | (N,K) = modfs.shape 132 | smoothed_modfs = np.zeros( (N,K) ) 133 | smoothed_demodfs = np.zeros( (N,K) ) 134 | #### Smooth functions. No smoothing is applied by default 135 | for i in range(0,K): 136 | smoothed_modfs[:,i] = Smooth( modfs[:,i], window_len = N*window_duty, window='hanning' ) 137 | smoothed_demodfs[:,i] = Smooth( demodfs[:,i], window_len = N*window_duty, window='hanning' ) 138 | return (smoothed_modfs, smoothed_demodfs) 139 | 140 | def circulant(f, direction = 1): 141 | """Circulant 142 | 143 | Args: 144 | f (numpy.ndarray): Vector to generate circulant matrix from 145 | direction (int, optional): Direction used to shift the vector when generating the matrix. 146 | 147 | Returns: 148 | np.ndarray: Circulant matrix. 149 | """ 150 | #### Verify input 151 | # assert(UtilsTesting.IsVector(f)),'Input Error - Circulant: f should be a vector.' 152 | # assert((direction == 1) or (direction == -1)), 'Input Error - Circulant: The direction needs \ 153 | # to be either forward (dir=1) or backward (dir=-1).' 154 | #### Get parameters 155 | N = f.size # We know f is a vector so just use its size. 156 | C = np.zeros((N,N)) 157 | isRow = (f.shape[0] == 1) # Doesn't matter for ndarrays 158 | #### Generate circulant matrix 159 | if(isRow): 160 | for i in range(0,N): 161 | C[[i],:] = np.roll(f,i*direction) 162 | else: 163 | for i in range(0,N): 164 | C[:,[i]] = np.roll(f,i*direction).reshape((N,1)) 165 | 166 | return C 167 | 168 | def sinc_interp(lres_signal, hres_n, axis=-1): 169 | ''' 170 | I found out the scipy's resample does sinc interpolation so I have replaced this code with that 171 | ''' 172 | hres_signal = signal.resample(lres_signal, hres_n, axis=axis) 173 | return hres_signal 174 | 175 | def sinc_interp_old(lres_signal, hres_n): 176 | ''' 177 | I tested the output of this code with the sinc interp function from scipy (scipy.signal.resample) 178 | and the outputs matched. So this works find. 179 | But, it is 3-5x slower than scipy so I replaced it with the scipy implementation 180 | But I am leaving this here for future reference 181 | ''' 182 | # Reshape transient to simplify vectorized operations 183 | (lres_signal, lres_signal_original_shape) = vectorize_tensor(lres_signal) 184 | n_elems = lres_signal.shape[0] 185 | lres_n = lres_signal.shape[-1] 186 | assert((hres_n % lres_n) == 0), "Current sinc_interp is only implemented for integer multiples of lres_n" 187 | upscaling_factor = hres_n / lres_n 188 | f_lres_signal = np.fft.rfft(lres_signal, axis=-1) 189 | lres_nf = f_lres_signal.shape[-1] 190 | hres_nf = (hres_n // 2) + 1 191 | f_hres_signal = np.zeros((n_elems, hres_nf), dtype=f_lres_signal.dtype) 192 | f_hres_signal[..., 0:lres_nf] = f_lres_signal 193 | # NOTE: For some reason we have to multiply by the upscaling factor if we want the output signal to have the same amplitude 194 | hres_signal = np.fft.irfft(f_hres_signal)*upscaling_factor 195 | # Reshape final vectors 196 | hres_signal_original_shape = np.array(lres_signal_original_shape) 197 | hres_signal_original_shape[-1] = hres_n 198 | hres_signal = hres_signal.reshape(hres_signal_original_shape) 199 | lres_signal = lres_signal.reshape(lres_signal_original_shape) 200 | return hres_signal 201 | 202 | def normalize_signal(v, axis=-1): return v / (v.sum(axis=axis, keepdims=True) + EPSILON) 203 | def standardize_signal(v, axis=-1): return (v - v.min(axis=axis, keepdims=True)) / (v.max(axis=axis, keepdims=True) - v.min(axis=axis, keepdims=True) + EPSILON) 204 | 205 | def gaussian_pulse(time_domain, mu, width, circ_shifted=True): 206 | ''' 207 | Generate K gaussian pulses with mean=mu and sigma=width. 208 | If circ_shifted is set to true we create a gaussian that wraps around at the boundaries. 209 | ''' 210 | mu_arr = to_nparray(mu) 211 | width_arr = to_nparray(width) 212 | assert((width_arr.size==1) or (width_arr.size==mu_arr.size)), "Input mu and width should have the same dimensions OR width should only be 1 element" 213 | if(circ_shifted): 214 | ext_time_domain = get_extended_domain(time_domain) 215 | ext_pulse = np.exp(-1*np.square((ext_time_domain[np.newaxis,:] - mu_arr[:, np.newaxis]) / width_arr[:, np.newaxis])) 216 | n_bins = time_domain.shape[-1] 217 | pulse = ext_pulse[...,0:n_bins] + ext_pulse[...,n_bins:2*n_bins] + ext_pulse[...,2*n_bins:3*n_bins] 218 | else: 219 | pulse = np.exp(-1*np.square((time_domain[np.newaxis,:] - mu_arr[:, np.newaxis]) / width_arr[:, np.newaxis])) 220 | return normalize_signal(pulse.squeeze(), axis=-1) 221 | 222 | def expgaussian_pulse_erfc(time_domain, mu, sigma, exp_lambda): 223 | if(exp_lambda is None): return gaussian_pulse(time_domain, mu, sigma) 224 | mu_arr = to_nparray(mu) 225 | sigma_sq = np.square(sigma) 226 | mu_minus_t = mu_arr[:, np.newaxis] - time_domain[np.newaxis,:] 227 | lambda_sigma_sq = exp_lambda*sigma_sq 228 | erfc_input = (mu_minus_t + lambda_sigma_sq) / sigma 229 | pulse = exp_lambda*np.exp(0.5*exp_lambda*(lambda_sigma_sq + 2*mu_minus_t))*scipy.special.erfc(erfc_input) 230 | return normalize_signal(pulse.squeeze(), axis=-1) 231 | 232 | def expgaussian_pulse_conv(time_domain, mu, sigma, exp_lambda, circ_shifted=True): 233 | gauss_pulse = gaussian_pulse(time_domain, mu, sigma, circ_shifted=circ_shifted) 234 | if(exp_lambda is None): return gauss_pulse 235 | exp_lambda = to_nparray(exp_lambda) 236 | exp_decay = np.exp(-1*exp_lambda[:, np.newaxis]*time_domain[np.newaxis,:]) 237 | expgauss_pulse = circular_conv(exp_decay, gauss_pulse, axis=-1) 238 | return normalize_signal(expgauss_pulse.squeeze(), axis=-1) 239 | 240 | def verify_time_domain(time_domain=None, n=1000): 241 | if(not (time_domain is None)): 242 | time_domain = to_nparray(time_domain) 243 | n = time_domain.shape[-1] 244 | else: 245 | time_domain = np.arange(0, n) 246 | assert(n > 1), "Number of time bins in time domain needs to be larger than 1 (n = {})".format(n) 247 | dt = time_domain[1] - time_domain[0] 248 | tau = time_domain[-1] + dt 249 | return (time_domain, n, tau, dt) 250 | 251 | def get_random_gaussian_pulse_params(time_domain=None, n=1000, min_max_sigma=None, n_samples=1): 252 | (time_domain, n, tau, dt) = verify_time_domain(time_domain, n) 253 | mu = tau*np.random.rand(n_samples) 254 | if(min_max_sigma is None): min_max_sigma = (1, 10) 255 | if(min_max_sigma[1] == min_max_sigma[0]): sigma = np.ones_like(mu)*min_max_sigma[0] 256 | else: sigma = dt*np.random.randint(low=min_max_sigma[0], high=min_max_sigma[1], size=(n_samples,)) 257 | return (mu, sigma) 258 | 259 | def get_random_expgaussian_pulse_params(time_domain=None, n=1000, min_max_sigma=None, min_max_lambda=None, n_samples=1): 260 | (time_domain, n, tau, dt) = verify_time_domain(time_domain, n) 261 | (mu, sigma) = get_random_gaussian_pulse_params(time_domain=time_domain, n=n, min_max_sigma=min_max_sigma, n_samples=n_samples) 262 | if(min_max_lambda is None): min_max_lambda = (1, 50) 263 | if(min_max_lambda[1] == min_max_lambda[0]): exp_lambda = np.ones_like(mu)*min_max_lambda[0] 264 | else: exp_lambda = dt*np.random.randint(low=min_max_lambda[0], high=min_max_lambda[1], size=(n_samples,)) 265 | exp_lambda = 1. / (dt*np.random.randint(low=min_max_lambda[0], high=min_max_lambda[1], size=(n_samples,))) 266 | return (mu, sigma, exp_lambda) 267 | 268 | def get_fourier_mat(n, freq_idx=None): 269 | ''' 270 | n is the number of samples in the primary domain 271 | freq_idx are the frequencies you want to get 272 | 273 | Return an nxk matrix where each column is a cmpx sinusoid with 274 | ''' 275 | # If no frequency indeces are given simply return the full dft matrix 276 | if(freq_idx is None): 277 | return scipy.linalg.dft(n) 278 | # For each frequency idx add them to their corresponding cmpx sinusoid to the matrix 279 | n_freqs = len(freq_idx) 280 | domain = np.arange(0, n)*(TWOPI / n) 281 | fourier_mat = np.zeros((n, n_freqs), dtype=np.complex64) 282 | for i in range(n_freqs): 283 | fourier_mat[:, i] = np.cos(freq_idx[i]*domain) - 1j*np.sin(freq_idx[i]*domain) 284 | return fourier_mat 285 | 286 | def broadcast_toeplitz( C_tensor, R_tensor=None): 287 | ''' 288 | Create a toeplitz matrix using the last dimension of the input tensor 289 | ''' 290 | if R_tensor is None: 291 | R_tensor = C_tensor.conjugate() 292 | else: 293 | R_tensor = np.asarray(R_tensor) 294 | # Form a 1D array of values to be used in the matrix, containing a reversed 295 | # copy of r[1:], followed by c. 296 | vals_tensor = np.concatenate((R_tensor[...,-1:0:-1], C_tensor), axis=-1) 297 | a, b = np.ogrid[0:C_tensor.shape[-1], R_tensor.shape[-1] - 1:-1:-1] 298 | indx = a + b 299 | # `indx` is a 2D array of indices into the 1D array `vals`, arranged so 300 | # that `vals[indx]` is the Toeplitz matrix. 301 | return vals_tensor[..., indx] 302 | 303 | def max_gaussian_center_of_mass_mle(transient, tbins=None, sigma_tbins = 1): 304 | ''' 305 | In this function we find the maximum of the transient and then calculate the center of mass in the neighborhood of the maximum. 306 | NOTE: At low SNR, low depths will have lower depth error on average than far away depths. 307 | This is because, at low SNR (low SBR/low photon counts), it becomes very likely that there are multiple maximums, some maximums are 308 | due to the signal and others due to ambient photons. And since numpy's argmax function always takes the 1st maximum it finds, then at low depths 309 | the maximum due to the signal are preferred, but at large depths the maximums due to ambient (that come before) are chosen. 310 | ''' 311 | # Reshape transient to simplify vectorized operations 312 | (transient, transient_original_shape) = vectorize_tensor(transient) 313 | n_elems = transient.shape[0] 314 | n_tbins = transient.shape[-1] 315 | # Remove ambient (assume that median is a good estimate of ambient component) 316 | ambient_estimate = np.median(transient, axis=-1, keepdims=True) 317 | transient_noamb = transient - ambient_estimate 318 | # Make sure there are not negative values 319 | transient_noamb[transient_noamb < 0] = 0 320 | # Find start and end tbin of gaussian pulse 321 | argmax_tbin = np.argmax(transient, axis=-1) 322 | # start_tbin = np.clip(argmax_tbin - int(np.ceil(2*sigma_tbins)), 0, n_tbins) 323 | # end_tbin = np.clip(argmax_tbin + int(np.ceil(2*sigma_tbins)) + 1, 0, n_tbins) 324 | start_tbin = argmax_tbin - int(np.ceil(2*sigma_tbins)) 325 | end_tbin = argmax_tbin + int(np.ceil(2*sigma_tbins)) + 1 326 | # Create a dummy tbin array if tbins are not given 327 | if(tbins is None): tbins = np.arange(0, n_tbins) 328 | assert(transient.shape[-1] == len(tbins)), 'transient and tbins should have the same number of elements' 329 | # For each 1D transient calculate the center of mass max likelihood estimate 330 | center_of_mass_mle = np.zeros((n_elems,)) 331 | extended_tbins = get_extended_domain(tbins, axis=-1) 332 | extended_transient_noamb = extend_tensor_circularly(transient_noamb, axis=-1) 333 | # for i in range(n_elems): 334 | # tbin_vec = tbins[start_tbin[i]:end_tbin[i]] 335 | # transient_vec = transient_noamb[i, start_tbin[i]:end_tbin[i]] 336 | # center_of_mass_mle[i] = np.dot(transient_vec, tbin_vec) / (np.sum(transient_vec) + EPSILON) 337 | for i in range(n_elems): 338 | start_idx = start_tbin[i]+n_tbins 339 | end_idx = end_tbin[i]+n_tbins 340 | tbin_vec = extended_tbins[start_idx:end_idx] 341 | transient_vec = extended_transient_noamb[i, start_idx:end_idx] 342 | center_of_mass_mle[i] = np.dot(transient_vec, tbin_vec) / (np.sum(transient_vec) + EPSILON) 343 | # Reshape to original shapes, useful when dealing with images 344 | transient = unvectorize_tensor(transient, transient_original_shape) 345 | center_of_mass_mle = center_of_mass_mle.reshape(transient_original_shape[0:-1]) 346 | return center_of_mass_mle 347 | 348 | def haar_matrix(n, n_levels): 349 | assert(n_levels >= 0), 'n_levels should be larger than ' 350 | n_codes = np.power(2, n_levels) 351 | assert((n % n_codes) == 0), "only implemented multiples of 2^n_levels" 352 | H = np.zeros((n, n_codes)) 353 | for i in range(n_levels+1): 354 | curr_total_codes = np.power(2, i) 355 | n_codes_at_curr_lvl = int(np.ceil(curr_total_codes / 2)) 356 | half_duty_len = int(n / curr_total_codes) 357 | curr_start_code_idx = int(curr_total_codes - n_codes_at_curr_lvl) 358 | for j in range(n_codes_at_curr_lvl): 359 | start_idx = (j*half_duty_len*2) 360 | mid_point_idx = start_idx + half_duty_len 361 | end_idx = start_idx + 2*half_duty_len 362 | H[start_idx:mid_point_idx, curr_start_code_idx+j] = 1.0 363 | H[mid_point_idx:end_idx, curr_start_code_idx+j] = -1.0 364 | return H 365 | 366 | def generate_gray_code(n_bits): 367 | assert(n_bits >= 1), "invalid n_bits" 368 | n_binary_codes = np.power(2, n_bits) 369 | n_binary_codes_over2 = int(n_binary_codes / 2) 370 | codes = np.zeros((n_binary_codes, n_bits)) 371 | codes[1, -1] = 1 372 | for i in range(n_bits-1): 373 | curr_n_bits = i + 2 374 | start_code_idx = 0 375 | end_code_idx1 = np.power(2, i+1) 376 | end_code_idx2 = 2*end_code_idx1 377 | # Reflect 378 | reflected_codes = np.flipud(codes[start_code_idx:end_code_idx1, :]) 379 | codes[end_code_idx1:end_code_idx2, :] = reflected_codes 380 | # Prefix with ones 381 | codes[end_code_idx1:end_code_idx2, -curr_n_bits] = 1 382 | return codes 383 | 384 | def get_orthogonal_binary_code(c): 385 | ''' 386 | only works for square codes with 50% duty cycle 387 | ''' 388 | assert(c.ndim == 1), "input c should be 1D vector" 389 | n = c.shape[0] 390 | shift_f0_90deg = n // 4 391 | # Find the repetition frequency of the code 392 | f_c = np.fft.rfft(c, axis=0) 393 | fk = np.abs(f_c).argmax() 394 | # Shift the code 395 | shift = int(np.round(shift_f0_90deg / fk)) 396 | c_orth = np.roll(c, shift=shift, axis=0) 397 | # print("Orthogonal Measure: {}".format(np.dot(c_orth,c))) 398 | return c_orth 399 | 400 | def get_dominant_freqs(Cmat, axis=0): 401 | f_Cmat = np.fft.rfft(Cmat, axis=axis) 402 | return np.argmax(np.abs(f_Cmat), axis=axis) 403 | 404 | def get_low_confidence_freqs(h_irf, valid_freq_thresh=0.2): 405 | ''' 406 | Look at frequency response of h_irf vector, and find frequencies with low amplitude 407 | ''' 408 | nt = h_irf.shape[-1] 409 | abs_max_freq_idx = nt // 2 410 | all_freq_idx = np.arange(0, abs_max_freq_idx+1) 411 | # Calculate FFT of IRF and get frequencies with magnitude above threshold 412 | f_h_irf = np.fft.rfft(h_irf) 413 | amp_f_h_irf = np.abs(f_h_irf) 414 | # Frequencies should have a magnitude higher than the following computed w.r.t the 1st harmonic 415 | threshold = f_h_irf[1]*valid_freq_thresh 416 | low_confidence_freqs = amp_f_h_irf < threshold 417 | low_confidence_freq_idx = all_freq_idx[low_confidence_freqs] 418 | return (low_confidence_freq_idx, low_confidence_freqs) 419 | -------------------------------------------------------------------------------- /research_utils/signalproc_ops_2D.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | 3 | ## Library Imports 4 | import numpy as np 5 | from scipy import fft 6 | from IPython.core import debugger 7 | breakpoint = debugger.set_trace 8 | 9 | ## Local Imports 10 | from .shared_constants import * 11 | 12 | def dct2(x): 13 | return fft.dct(fft.dct(x, norm='ortho', axis=-2), norm='ortho', axis=-1) 14 | 15 | def idct2(x): 16 | return fft.idct(fft.idct(x, norm='ortho', axis=-2), norm='ortho', axis=-1) 17 | 18 | def generate_dct2d_mat(nr, nc): 19 | A = np.kron( 20 | fft.idct(np.identity(nr), norm='ortho', axis=1), 21 | fft.idct(np.identity(nc), norm='ortho', axis=1) 22 | ) 23 | return A.reshape((nr*nc, nr, nc)) 24 | 25 | 26 | -------------------------------------------------------------------------------- /research_utils/tests/test_signalproc_ops_2D.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | import sys 3 | sys.path.append('../') 4 | 5 | ## Library Imports 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | from scipy import fft 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | ## Local Imports 13 | from research_utils.signalproc_ops_2D import * 14 | from research_utils.shared_constants import * 15 | 16 | 17 | def test_dct2_idct2_invertibility(img): 18 | ## Test dct2 and idct2 19 | dct2_img = dct2(img) 20 | rec_img = idct2(dct2_img) 21 | assert(np.allclose(img, rec_img, atol=EPSILON)), "dct2 and idct2 do not reverse the operation" 22 | print("PASSED test_dct2_idct2_invertibility") 23 | 24 | def test_dct2mat(img): 25 | dct2_img1 = dct2(img) 26 | (nr,nc) = img.shape 27 | dct2mat = generate_dct2d_mat(nr, nc) 28 | dct2_img2 = np.zeros((nr*nc,)) 29 | for i in range(nr*nc): 30 | dct2_img2[i] = np.sum(np.multiply(dct2mat[i,:], img)) 31 | dct2_img2 = dct2_img2.reshape((nr,nc)) 32 | assert(np.allclose(dct2_img1, dct2_img2, atol=EPSILON)), "dct2 and idct2 do not reverse the operation" 33 | print("PASSED test_dct2mat") 34 | 35 | if __name__=='__main__': 36 | from skimage import data 37 | from skimage.transform import resize 38 | 39 | ## Load sample image data and resize 40 | img = data.coins() 41 | img = resize(img, (img.shape[0]//4,img.shape[1]//4)) 42 | 43 | ## Test 44 | test_dct2_idct2_invertibility(img) 45 | test_dct2_idct2_invertibility(np.random.rand(20,20)) 46 | 47 | ## Test dct2 matrix 48 | test_dct2mat(img) 49 | test_dct2mat(np.random.rand(20,20)) 50 | test_dct2mat(np.random.rand(23,18)) 51 | test_dct2mat(np.random.rand(13,40)) 52 | -------------------------------------------------------------------------------- /research_utils/timer.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | import time 3 | 4 | ## Library Imports 5 | 6 | ## Local Imports 7 | 8 | class Timer(object): 9 | def __init__(self, name=None): 10 | self.name = name 11 | def __enter__(self): 12 | self.tstart = time.time() 13 | def __exit__(self, type, value, traceback): 14 | if self.name: print('[{}] - Elapsed: {} seconds.'.format(self.name, time.time() - self.tstart)) 15 | else: print('Elapsed: {}'.format(time.time() - self.tstart)) 16 | 17 | -------------------------------------------------------------------------------- /research_utils/torch_datasets.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Useful custom pytorch dataloaders 3 | ''' 4 | ## Standard Library Imports 5 | import os 6 | 7 | ## Library Imports 8 | import numpy as np 9 | import torch 10 | from torch.utils.data import DataLoader 11 | from IPython.core import debugger 12 | breakpoint = debugger.set_trace 13 | 14 | ## Local Imports 15 | from research_utils.io_ops import get_multi_folder_paired_fnames 16 | 17 | class MultiFolderPairedNumpyData(torch.utils.data.Dataset): 18 | ''' 19 | Dataset with pairs of numpy data files stored in different folders in the following way: 20 | dataset/ 21 | folder1/f1.npy, 22 | f2.npy, 23 | 24 | folder2/f1.npy, 25 | f2.npy, 26 | 27 | folder3/f1.npz 28 | f2.npz 29 | ''' 30 | valid_file_ext = ['npy', 'npz'] 31 | def __init__(self, dirpath_list): 32 | assert(len(dirpath_list)>0), "empty dirpath list" 33 | self.dirpath_list = dirpath_list 34 | self.n_dirpaths = len(dirpath_list) 35 | (self.base_filenames, self.file_ext_per_dirpath) = get_multi_folder_paired_fnames(dirpath_list, self.valid_file_ext) 36 | self.n_samples = len(self.base_filenames) 37 | 38 | def __len__(self): 39 | return self.n_samples 40 | 41 | def __getitem__(self, idx): 42 | np_data_sample = [] 43 | curr_base_fname = self.base_filenames[idx] 44 | for i in range(self.n_dirpaths): 45 | curr_fpath = os.path.join(self.dirpath_list[i], curr_base_fname + '.' + self.file_ext_per_dirpath[i]) 46 | np_data_sample.append(np.load(curr_fpath)) 47 | return (np_data_sample, curr_base_fname) 48 | 49 | def get_sample(self, sample_filename, **kwargs): 50 | ''' 51 | kwargs are any extra key-word args that __getitem__ may take as input 52 | ''' 53 | try: 54 | idx = self.base_filenames.index(sample_filename) 55 | return self.__getitem__(idx, **kwargs) 56 | except ValueError: 57 | print("{} not in datasets".format(sample_filename)) 58 | return None 59 | 60 | if __name__=='__main__': 61 | dirpath1 = '/home/felipe/Dropbox/research_projects/data/synthetic_data_min/data_no-conductors_no-dielectric_automatic/transient_images_120x160_nt-2000' 62 | dirpath2 = '/home/felipe/Dropbox/research_projects/data/synthetic_data_min/data_no-conductors_no-dielectric_automatic/rgb_images_120x160_nt-2000' 63 | dirpath3 = '/home/felipe/Dropbox/research_projects/data/synthetic_data_min/data_no-conductors_no-dielectric_automatic/depth_images_120x160_nt-2000' 64 | dirpaths = [dirpath1,dirpath2,dirpath3] 65 | 66 | dataset = MultiFolderPairedNumpyData(dirpaths) 67 | loader = DataLoader(dataset, batch_size=2, shuffle=False, num_workers=1) 68 | 69 | # if we use get_sample we can get the numpy object directyl 70 | (np_data, fname) = dataset.get_sample(dataset.base_filenames[0]) 71 | 72 | for step, data in enumerate(loader): 73 | # the loader automatically casts everyting as tensor 74 | (data_sample, fname) = data 75 | breakpoint() 76 | print("Loaded: {}".format(fname)) 77 | -------------------------------------------------------------------------------- /research_utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | ## Standard Library Imports 2 | 3 | ## Library Imports 4 | import numpy as np 5 | import torch 6 | import torch.nn 7 | import torchvision.transforms as T 8 | import torchvision.transforms.functional as TF 9 | from IPython.core import debugger 10 | breakpoint = debugger.set_trace 11 | 12 | ## Local Imports 13 | 14 | 15 | def normalize_known_range(x, min_val=0., max_val=1.): 16 | # First normalize signal between 0 and 1, and multiply 2 and subtract 1 to make it -1 to 1 17 | return (((x - min_val) / (max_val - min_val))*2) - 1 18 | 19 | def softmax_scoring(scores, gt_indeces, beta=300., eps=1, axis=-1): 20 | ''' 21 | apply softmax to scores to make into probability distribution 22 | then use the gt_indeces to take a look at the softmax scores of each sample in the +/- eps neightborhood 23 | return the sum of these scores 24 | ''' 25 | assert(eps >= 0),'eps should be non-negative' 26 | softmax_scores = torch.nn.functional.softmax(scores*beta, dim=axis) 27 | n_scores = (2*eps)+1 28 | (min_idx, max_idx) = (0, scores.shape[axis]) 29 | for i in range(n_scores): 30 | offset = -1*eps + i 31 | indeces = torch.clamp(gt_indeces + offset, min=min_idx, max=max_idx-1) 32 | selected_scores = softmax_scores.gather(axis, indeces.long().unsqueeze(axis)) 33 | return selected_scores.sum() 34 | 35 | def multi_img_random_hflip(img_list): 36 | ''' 37 | Apply same random hflip to a list of images 38 | ''' 39 | if np.random.rand() > 0.5: 40 | for i in range(len(img_list)): 41 | img_list[i] = TF.hflip(img_list[i]) 42 | return img_list 43 | 44 | def multi_img_random_vflip(img_list): 45 | ''' 46 | Apply same random vflip to a list of images 47 | ''' 48 | if np.random.rand() > 0.5: 49 | for i in range(len(img_list)): 50 | img_list[i] = TF.vflip(img_list[i]) 51 | return img_list 52 | 53 | def multi_img_crop(img_list, top, left, height, width): 54 | ''' 55 | Apply same crop to all images 56 | ''' 57 | for i in range(len(img_list)): 58 | img_list[i] = TF.crop(img_list[i], top, left, height, width) 59 | return img_list 60 | 61 | def multi_img_random_crop(img_list, crop_size=(32,32)): 62 | ''' 63 | Apply same random crop to all images 64 | ''' 65 | i, j, h, w = T.RandomCrop.get_params(img_list[0], crop_size) 66 | img_list = multi_img_crop(img_list, i,j,h,w) 67 | return img_list -------------------------------------------------------------------------------- /scan_data_utils.py: -------------------------------------------------------------------------------- 1 | #### Standard Library Imports 2 | import os 3 | 4 | #### Library imports 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | #### Local imports 9 | 10 | irf_dirpath = './system_irf' 11 | 12 | def verify_hist_tau(hist_img_tau, hist_tbin_size): 13 | if((hist_img_tau % hist_tbin_size) != 0): 14 | print("Invalid hist tau. Try adding {} to end time".format(hist_img_tau % hist_tbin_size)) 15 | assert((hist_img_tau % hist_tbin_size) == 0), "hist tau needs to be a multiple of the bin size" 16 | 17 | def time2bin(t, bin_size): 18 | ''' 19 | time2bin index. Bin at index 0 will have times from 0-1 (not including 1) 20 | ''' 21 | return int(np.floor(t / bin_size)) 22 | 23 | def bin2time(bin, bin_size): 24 | ''' 25 | bin2time. given bin index return the mid point in time of that bin 26 | ''' 27 | return bin*bin_size + (0.5*bin_size) 28 | 29 | def get_unimodal_nt(nt, pulse_len, tres): 30 | return nt - time2bin(pulse_len, tres) 31 | 32 | def get_nt(hist_img_tau, hist_tbin_size): 33 | verify_hist_tau(hist_img_tau, hist_tbin_size) 34 | return time2bin(hist_img_tau, hist_tbin_size) 35 | 36 | def get_hist_bins(max_tbin, tbin_size): 37 | verify_hist_tau(max_tbin, tbin_size) 38 | bin_edges = np.arange(0, max_tbin + tbin_size, tbin_size) 39 | bins = bin_edges[0:-1] + 0.5*tbin_size 40 | return (bins, bin_edges) 41 | 42 | def timestamps2histogram(tstamps_vec, max_tbin, min_tbin_size, hist_tbin_factor=1): 43 | ''' Build histogram from timestamps loaded by the above functions 44 | Outputs: 45 | * tstamps_vec: unitless timestamps 46 | * max_tbin: maximum tbin value 47 | * min_tbin_size: time resolution. tstamps_vec*min_tbin_size == tstamps in time units 48 | * hist_tbin_factor: If we want to make histogram smaller. If set to 2 the histogram will be 2x smaller, 3 --> 3x smaller, etc. 49 | ''' 50 | hist_tbin_size = min_tbin_size*hist_tbin_factor # increase size of time bin to make histogramming faster 51 | tstamps_vec = min_tbin_size*tstamps_vec # time counter to timestamps 52 | (bins, bin_edges) = get_hist_bins(max_tbin, hist_tbin_size) 53 | # Use Numpy histogram function (much faster) 54 | counts, _ = np.histogram(tstamps_vec, bins=bin_edges) 55 | # counts, _, _ = plt.hist(tstamps_vec,bins=bins) 56 | # plt.clf() 57 | return (counts, bin_edges, bins) 58 | 59 | def vector2img(v, nr, nc): 60 | ''' 61 | Transform vectorized pixels to img. This function is specifically tailored to the way that scan data was acquired 62 | ''' 63 | assert((nr*nc) == v.shape[0]), "first dim length should equal num pixels" 64 | assert((v.ndim == 1) or (v.ndim == 2)), "should be a vector or an array of vectors" 65 | if(v.ndim == 1): 66 | img = v.reshape((nr, nc)) 67 | else: 68 | img = v.reshape((nr, nc, v.shape[-1])) 69 | return np.flipud(np.swapaxes(img, 0, 1)) 70 | 71 | def get_hist_img_fname(nr, nc, tres, tlen, is_unimodal=False): 72 | if(is_unimodal): 73 | return 'unimodal-hist-img_r-{}-c-{}_tres-{}ps_tlen-{}ps.npy'.format(nr, nc, int(tres), int(tlen)) 74 | else: 75 | return 'hist-img_r-{}-c-{}_tres-{}ps_tlen-{}ps.npy'.format(nr, nc, int(tres), int(tlen)) 76 | 77 | def get_irf_fname(tres, tlen, is_unimodal=False): 78 | if(is_unimodal): 79 | return 'unimodal-irf_tres-{}ps_tlen-{}ps.npy'.format(int(tres), int(tlen)) 80 | else: 81 | return 'irf_tres-{}ps_tlen-{}ps.npy'.format(int(tres), int(tlen)) 82 | 83 | def get_nosignal_mask_fname(nr, nc): 84 | return 'nosignal-mask_r-{}-c-{}.png'.format(nr, nc) 85 | 86 | def fit_irf(irf): 87 | ## Fit a cubic spline function to be able to generate any 88 | from scipy.interpolate import interp1d 89 | nt = irf.size 90 | # Extend x and y and interpolate 91 | ext_x_fullres = np.arange(-nt, 2*nt) * (1. / nt) 92 | ext_irf = np.concatenate((irf, irf, irf), axis=-1) 93 | f = interp1d(ext_x_fullres, ext_irf, axis=-1, kind='cubic') 94 | return f 95 | 96 | # def get_irf(n, tlen, tres=8, is_unimodal=False): 97 | # ''' 98 | # Load IRF data stored for a particular histogram length (tlen) 99 | # Fit a curve to the data, and then re-sample it at the desired resolution (n) 100 | # PARAMETERS: 101 | # * n = desired resolution of irf 102 | # * tlen = length of irf in picoseconds 103 | # * tres = time resolution of irf data 104 | # NOTE: The IRF data is usually saved at the lowest tres possible (8ps) 105 | # ''' 106 | # irf_data_fname = get_irf_fname(tres, tlen, is_unimodal) 107 | # irf_data_fpath = os.path.join(irf_dirpath, irf_data_fname) 108 | # assert(os.path.exists(irf_data_fpath)), "irf does not exist. make sure to run preprocess_irf.py for this hist len first" 109 | # irf_data = np.load(irf_data_fpath) 110 | # irf_f = fit_irf(irf_data) 111 | # x_fullres = np.arange(0, n)*(1./n) 112 | # irf = irf_f(x_fullres) 113 | # irf[irf < 1e-8] = 0 114 | # return irf 115 | 116 | def get_scene_irf(scene_id, n, tlen, tres=8, is_unimodal=False): 117 | ''' 118 | Load IRF data stored for a particular histogram length (tlen) 119 | Fit a curve to the data, and then re-sample it at the desired resolution (n) 120 | PARAMETERS: 121 | * n = desired resolution of irf 122 | * tlen = length of irf in picoseconds 123 | * tres = time resolution of irf data 124 | NOTE: The IRF data is usually saved at the lowest tres possible (8ps) 125 | ''' 126 | irf_data_fname = get_irf_fname(tres, tlen, is_unimodal) 127 | irf_data_fpath = os.path.join(os.path.join(irf_dirpath, scene_id), irf_data_fname) 128 | assert(os.path.exists(irf_data_fpath)), "irf does not exist. make sure to run preprocess_irf.py for this hist len first" 129 | irf_data = np.load(irf_data_fpath) 130 | irf_f = fit_irf(irf_data) 131 | x_fullres = np.arange(0, n)*(1./n) 132 | irf = irf_f(x_fullres) 133 | irf[irf < 1e-8] = 0 134 | return irf 135 | 136 | 137 | def get_depth_lims(scene_id): 138 | # if(scene_id == '20190207_face_scanning_low_mu/ground_truth'): (min_d, max_d) = (2300, 3600) 139 | # elif(scene_id == '20190207_face_scanning_low_mu/free'): (min_d, max_d) = (3200, 4500) 140 | # else: (min_d, max_d) = (1000, 4250) 141 | if(scene_id == '20190207_face_scanning_low_mu/ground_truth'): (min_d, max_d) = (1100, 2400) 142 | elif(scene_id == '20190207_face_scanning_low_mu/free'): (min_d, max_d) = (2000, 3300) 143 | else: (min_d, max_d) = (300, 2600) 144 | return (min_d, max_d) 145 | 146 | def calc_n_empty_laser_cycles(sync_vec): 147 | max_laser_cycles = sync_vec.max() 148 | u, c = np.unique(sync_vec, return_counts=True) 149 | n_nonempty_laser_cycles = u.size 150 | assert(max_laser_cycles >= n_nonempty_laser_cycles), "something is wrong with sync_vec" 151 | return max_laser_cycles - n_nonempty_laser_cycles 152 | 153 | def linearize_phase(phase): 154 | # If phase < 0 then we need to add 2pi. 155 | corrected_phase = phase + (2*np.pi*(phase < 0)) 156 | return corrected_phase 157 | 158 | def phase2depth(phase, repetition_tau): 159 | return time2depth(phase2time(phase, repetition_tau)) 160 | 161 | def phase2time(phase, repetition_tau): 162 | ''' 163 | Assume phase is computed with np.atan2 164 | ''' 165 | # If phase < 0 then we need to add 2pi. 166 | corrected_phase = linearize_phase(phase) 167 | return (corrected_phase*repetition_tau / 2*np.pi ) 168 | 169 | SPEED_OF_LIGHT = 3e8 170 | 171 | def time2depth(time): 172 | return (SPEED_OF_LIGHT * time) / 2. 173 | 174 | def freq2depth(freq): 175 | return (SPEED_OF_LIGHT * (1./freq)) / 2. 176 | 177 | def depth2time(depth): 178 | return (2*depth / SPEED_OF_LIGHT) 179 | -------------------------------------------------------------------------------- /scan_params.json: -------------------------------------------------------------------------------- 1 | { 2 | "laser_rep_freq": 10000000.0, 3 | "dead_time": 50000.0, 4 | "min_tbin_size": 8, 5 | "scene_ids": [ 6 | "20190209_deer_high_mu/free", 7 | "20190209_deer_high_mu/det", 8 | "20190209_deer_high_mu/ext", 9 | "20190209_deer_high_mu/ext_5%", 10 | "20190207_face_scanning_low_mu/free", 11 | "20190207_face_scanning_low_mu/det", 12 | "20190207_face_scanning_low_mu/ground_truth", 13 | "20190207_face_scanning_low_mu/ext_opt_filtering", 14 | "20190207_face_scanning_low_mu/ext_5%", 15 | "20181112_blocks/extreme_flux", 16 | "20181112_blocks/high_flux", 17 | "20181112_blocks/med_flux", 18 | "20181112_blocks/low_flux", 19 | "20181105_face/low_flux", 20 | "20181105_face/opt_flux", 21 | "20181105_tajmahal" 22 | ], 23 | "hist_preprocessing_params": { 24 | "hist_start_time": 37000, 25 | "hist_end_time": 54504, 26 | "hist_shift_time": 800 27 | }, 28 | "irf_params": { 29 | "pulse_len": 850, 30 | "second_pulse_offset": 1100 31 | }, 32 | "scene_params": { 33 | "default": { 34 | "n_rows_fullres": 204, 35 | "n_cols_fullres": 116, 36 | "pulse_len": 480, 37 | "second_pulse_offset": 1100, 38 | "global_temporal_shift": 0 39 | }, 40 | "20190207_face_scanning_low_mu/free": { 41 | "n_rows_fullres": 204, 42 | "n_cols_fullres": 116, 43 | "pulse_len": 480, 44 | "second_pulse_offset": 1100, 45 | "global_temporal_shift": 0 46 | }, 47 | "20190207_face_scanning_low_mu/ground_truth": { 48 | "n_rows_fullres": 204, 49 | "n_cols_fullres": 116, 50 | "pulse_len": 850, 51 | "second_pulse_offset": 1100, 52 | "global_temporal_shift": 0 53 | }, 54 | "20190207_face_scanning_low_mu/ext_opt_filtering": { 55 | "n_rows_fullres": 204, 56 | "n_cols_fullres": 116, 57 | "pulse_len": 480, 58 | "second_pulse_offset": 1100, 59 | "global_temporal_shift": -21400 60 | }, 61 | "20190207_face_scanning_low_mu/ext_5%": { 62 | "n_rows_fullres": 204, 63 | "n_cols_fullres": 116, 64 | "pulse_len": 480, 65 | "second_pulse_offset": 1100, 66 | "global_temporal_shift": -21400 67 | }, 68 | "20190209_deer_high_mu/free": { 69 | "n_rows_fullres": 174, 70 | "n_cols_fullres": 154, 71 | "pulse_len": 480, 72 | "second_pulse_offset": 1100 73 | }, 74 | "20181105_tajmahal": { 75 | "n_rows_fullres": 110, 76 | "n_cols_fullres": 152, 77 | "pulse_len": 480, 78 | "second_pulse_offset": 1100, 79 | "global_temporal_shift": 0 80 | }, 81 | "20190209_deer_high_mu/ext": { 82 | "n_rows_fullres": 174, 83 | "n_cols_fullres": 154, 84 | "pulse_len": 480, 85 | "second_pulse_offset": 1100, 86 | "global_temporal_shift": 0 87 | }, 88 | "20190209_deer_high_mu/ext_5%": { 89 | "n_rows_fullres": 174, 90 | "n_cols_fullres": 154, 91 | "pulse_len": 480, 92 | "second_pulse_offset": 1100, 93 | "global_temporal_shift": 0 94 | }, 95 | "20190209_deer_high_mu/det": { 96 | "n_rows_fullres": 174, 97 | "n_cols_fullres": 154, 98 | "pulse_len": 480, 99 | "second_pulse_offset": 1100, 100 | "global_temporal_shift": 0 101 | }, 102 | "20181112_blocks/low_flux": { 103 | "n_rows_fullres": 123, 104 | "n_cols_fullres": 183, 105 | "pulse_len": 480, 106 | "second_pulse_offset": 1100, 107 | "global_temporal_shift": 0 108 | }, 109 | "20181112_blocks/med_flux": { 110 | "n_rows_fullres": 123, 111 | "n_cols_fullres": 183, 112 | "pulse_len": 480, 113 | "second_pulse_offset": 1100, 114 | "global_temporal_shift": 0 115 | }, 116 | "20181112_blocks/high_flux": { 117 | "n_rows_fullres": 123, 118 | "n_cols_fullres": 183, 119 | "pulse_len": 480, 120 | "second_pulse_offset": 1100, 121 | "global_temporal_shift": 0 122 | }, 123 | "20181112_blocks/extreme_flux": { 124 | "n_rows_fullres": 123, 125 | "n_cols_fullres": 183, 126 | "pulse_len": 480, 127 | "second_pulse_offset": 1100, 128 | "global_temporal_shift": 0 129 | }, 130 | "20181105_face/low_flux": { 131 | "n_rows_fullres": 240, 132 | "n_cols_fullres": 150, 133 | "pulse_len": 480, 134 | "second_pulse_offset": 1100, 135 | "global_temporal_shift": 0 136 | }, 137 | "20181105_face/opt_flux": { 138 | "n_rows_fullres": 240, 139 | "n_cols_fullres": 150, 140 | "pulse_len": 480, 141 | "second_pulse_offset": 1100, 142 | "global_temporal_shift": 0 143 | }, 144 | "20190207_face_scanning_low_mu/det": { 145 | "n_rows_fullres": 204, 146 | "n_cols_fullres": 116, 147 | "pulse_len": 480, 148 | "second_pulse_offset": 1100, 149 | "global_temporal_shift": 0 150 | } 151 | }, 152 | "fov_major_axis": 40 153 | } --------------------------------------------------------------------------------