├── .gitignore ├── README.md ├── camera.py ├── camera_baumer.py ├── camera_simulated.py ├── camera_web.py ├── cameras_helper.py ├── config.json ├── config.py ├── create_patterns.py ├── examples └── test_plate_phasogrammetry.py ├── fpp_structures.py ├── hand_set_up_camera.py ├── main.py ├── min_max_projector_calibration.py ├── processing.py ├── projector.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Python and 2 | __pycache__/ 3 | 4 | # VS Code specific 5 | .vscode/ 6 | 7 | # Project specific 8 | data/ 9 | temp/ 10 | config.json -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **Structured light project** 2 | 3 | This project is an attempt to create an easy-to-understand and flexible-to-use framework for implementing the Fringe Projection Profilometry (FPP) method in the Python. 4 | 5 | ## **Implementation** 6 | 7 | * Ability to use any camera (modules with webcams through OpenCV and Baumer cameras through NeoAPI are implemented in the project) 8 | * The Phaseshift Projection Profilometry (PSP) method with sinusoidal fringes is implemented to obtain phase fields 9 | * Projection pattern generation supports an arbitrary number of phase shifts and an arbitrary number of periods 10 | * A hierarchical approach is used to unwrap the phase fields 11 | * Implemented automatic detection of the fringe projection area on the images (ROI) 12 | * A simple gamma correction method for projected images is implemented 13 | * Flexible adjustment of the experiment and hardware parameters with the help of config files 14 | 15 | ## **How to use** 16 | 17 | 1. Install depedicies 18 | ``` 19 | pip install opencv-contrib-python numpy scipy matplotlib 20 | ``` 21 | 2. Setting the parameters of the experiment and the hardware in the file `config.py` 22 | 23 | 3. Launch main module 24 | ``` 25 | python main.py 26 | ``` 27 | 28 | In the script `examples/test_plate_phasogrammetry.py` there is an example of processing the results of the experiment to determine the shape of the surface of a granite slab using the phasogrammetric approach. To date, the measurement accuracy of about **60 µm** has been achieved. 29 | 30 | ## **References** 31 | The following sources were used to implement the algorithms 32 | 33 | [Zuo C. et al. Phase shifting algorithms for fringe projection profilometry: A review // Optics and Lasers in Engineering. 2018. Vol. 109. P. 23-59.](https://doi.org/10.1016/j.optlaseng.2018.04.019) 34 | 35 | [Zuo C. et al. Temporal phase unwrapping algorithms for fringe projectionprofilometry: A comparative review // Optics and Lasers in Engineering. 2016. Vol. 85. P. 84-103.](https://doi.org/10.1016/j.optlaseng.2016.04.022) 36 | 37 | [Feng S. et al. Calibration of fringe projection profilometry: A comparative review // Optics and Lasers in Engineering. 2021. Vol. 143. P. 106622.](https://doi.org/10.1016/j.optlaseng.2021.106622) 38 | 39 | [Zhong K. et al. Pre-calibration-free 3D shape measurement method based on fringe projection // Optics Express. 2016. Vol. 24. №. 13. P. 14196-14207.](https://doi.org/10.1364/OE.24.014196) 40 | 41 | ## **Authors** 42 | Anton Poroykov, Ph.D., associated professor 43 | 44 | Nikita Sivov, graduate student 45 | 46 | ## **Acknowledgements** 47 | The research was carried out at the expense of the grant Russian Science Foundation No. 22-21-00550 (https://rscf.ru/project/22-21-00550/). -------------------------------------------------------------------------------- /camera.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from abc import ABC, abstractmethod, abstractproperty 4 | 5 | import numpy as np 6 | 7 | 8 | class Camera(ABC): 9 | 10 | @staticmethod 11 | @abstractmethod 12 | def get_available_cameras(cameras_num_to_find:int=2) -> list[Camera]: 13 | ''' 14 | Get list of available cameras 15 | 16 | Args: 17 | cameras_num_to_find (int) = 2 : The number of cameras to try to find 18 | 19 | Returns: 20 | cameras (list of Camera): list of founded cameras 21 | ''' 22 | 23 | @abstractmethod 24 | def get_image(self) -> np.array: 25 | ''' 26 | Get image from camera 27 | 28 | Returns: 29 | image (numpy array): image as numpy array (2D or 3D depending on color mode) 30 | ''' 31 | 32 | @abstractproperty 33 | def exposure(self): 34 | '''Exposure''' 35 | 36 | @exposure.setter 37 | @abstractmethod 38 | def exposure(self): 39 | '''Set exposure''' 40 | 41 | @abstractproperty 42 | def gain(self): 43 | '''Gain''' 44 | 45 | @gain.setter 46 | @abstractmethod 47 | def gain(self): 48 | '''Set gain''' 49 | 50 | @abstractproperty 51 | def gamma(self): 52 | '''Gamma''' 53 | 54 | @gamma.setter 55 | @abstractmethod 56 | def gamma(self): 57 | '''Set gamma''' 58 | -------------------------------------------------------------------------------- /camera_baumer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import List 4 | 5 | import numpy as np 6 | 7 | try: 8 | import neoapi 9 | except ImportError: 10 | neoapi_found = False 11 | else: 12 | neoapi_found = True 13 | 14 | from camera import Camera 15 | 16 | 17 | class CameraBaumer(Camera): 18 | def __init__(self, camera: neoapi.Cam, serial_number: str = None): 19 | self.camera = camera 20 | if serial_number is not None: 21 | self.camera.Connect(serial_number) 22 | else: 23 | self.camera.Connect() 24 | self.type = 'baumer' 25 | 26 | @staticmethod 27 | def get_available_cameras(cameras_num_to_find: int = 2, cameras_serial_numbers: List[str] = []) -> list[Camera]: 28 | cameras = [] 29 | 30 | if neoapi_found: 31 | i = 0 32 | 33 | while i < cameras_num_to_find: 34 | # Get next camera from neoapi 35 | if len(cameras_serial_numbers) == 0: 36 | camera = CameraBaumer(neoapi.Cam()) 37 | else: 38 | camera = CameraBaumer(neoapi.Cam(), cameras_serial_numbers[i]) 39 | if camera.camera.f.DeviceSerialNumber.value not in cameras_serial_numbers: 40 | raise Exception(f'Error, camera serial number is not {cameras_serial_numbers[i]}') 41 | 42 | # Set default cameras parameters 43 | camera.exposure = 20_000 44 | camera.gain = 2.0 45 | camera.frame_rate_enable = True 46 | camera.frame_rate = 10.0 47 | camera.camera.f.PixelFormat = neoapi.PixelFormat_Mono8 48 | 49 | # Set first camera as master for triggering 50 | if i == 0: 51 | camera.trigger_mode = neoapi.TriggerMode_Off 52 | camera.line_selector = neoapi.LineSelector_Line1 53 | camera.line_mode = neoapi.LineMode_Output 54 | camera.line_source = neoapi.LineSource_ExposureActive 55 | else: 56 | # Set next camera as slave for trigger wait 57 | camera.trigger_mode = neoapi.TriggerMode_On 58 | camera.line_selector = neoapi.LineSelector_Line1 59 | camera.line_mode = neoapi.LineMode_Input 60 | 61 | cameras.append(camera) 62 | i = i + 1 63 | 64 | return cameras 65 | 66 | def get_image(self) -> np.array: 67 | img = self.camera.GetImage().GetNPArray() 68 | return img.reshape(img.shape[0], img.shape[1]) 69 | 70 | @property 71 | def exposure(self): 72 | return self.camera.f.ExposureTime.value 73 | 74 | @exposure.setter 75 | def exposure(self, x): 76 | self.camera.f.ExposureTime.value = x 77 | 78 | @property 79 | def gain(self): 80 | return self.camera.f.Gain.value 81 | 82 | @gain.setter 83 | def gain(self, x): 84 | self.camera.f.Gain.value = x 85 | 86 | @property 87 | def gamma(self): 88 | return self.camera.f.Gamma.value 89 | 90 | @gamma.setter 91 | def gamma(self, x): 92 | self.camera.f.Gamma.value = x 93 | 94 | @property 95 | def exposure_auto(self): 96 | return self.camera.f.ExposureAuto.value 97 | 98 | @exposure_auto.setter 99 | def exposure_auto(self, x): 100 | self.camera.f.ExposureAuto.value = x 101 | 102 | @property 103 | def trigger_mode(self): 104 | return self.camera.f.TriggerMode.value 105 | 106 | @trigger_mode.setter 107 | def trigger_mode(self, x): 108 | self.camera.f.TriggerMode.value = x 109 | 110 | @property 111 | def line_selector(self): 112 | return self.camera.f.LineSelector.value 113 | 114 | @line_selector.setter 115 | def line_selector(self, x): 116 | self.camera.f.LineSelector.value = x 117 | 118 | @property 119 | def line_mode(self): 120 | return self.camera.f.LineMode.value 121 | 122 | @line_mode.setter 123 | def line_mode(self, x): 124 | self.camera.f.LineMode.value = x 125 | 126 | @property 127 | def line_source(self): 128 | return self.camera.f.LineSource.value 129 | 130 | @line_source.setter 131 | def line_source(self, x): 132 | self.camera.f.LineSource.value = x 133 | 134 | @property 135 | def frame_rate_enable(self): 136 | return self.camera.f.AcquisitionFrameRateEnable.value 137 | 138 | @frame_rate_enable.setter 139 | def frame_rate_enable(self, x): 140 | self.camera.f.AcquisitionFrameRateEnable.value = x 141 | 142 | @property 143 | def frame_rate(self): 144 | return self.camera.f.AcquisitionFrameRate.value 145 | 146 | @frame_rate.setter 147 | def frame_rate(self, x): 148 | self.camera.f.AcquisitionFrameRate.value = x 149 | -------------------------------------------------------------------------------- /camera_simulated.py: -------------------------------------------------------------------------------- 1 | '''Module for simulated camera class for test purpose''' 2 | 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | 7 | from camera import Camera 8 | from projector import Projector 9 | 10 | 11 | class CameraSimulated(Camera): 12 | def __init__(self): 13 | self.type = 'simulated' 14 | self._projector = None 15 | print(f'Simulated camera created') 16 | 17 | @staticmethod 18 | def get_available_cameras(cameras_num_to_find:int=2) -> list[Camera]: 19 | cameras = [] 20 | 21 | for _ in range(cameras_num_to_find): 22 | cameras.append(CameraSimulated()) 23 | 24 | return cameras 25 | 26 | def get_image(self) -> np.array: 27 | if self.projector is not None: 28 | img = self._projector.corrected_pattern 29 | return img 30 | else: 31 | raise ValueError() 32 | 33 | @property 34 | def projector(self) -> Projector: 35 | return self._projector 36 | 37 | @projector.setter 38 | def projector(self, projector: Projector): 39 | self._projector = projector 40 | 41 | @property 42 | def exposure(self): 43 | return self._exposure 44 | 45 | @exposure.setter 46 | def exposure(self, x): 47 | self._exposure = x 48 | 49 | @property 50 | def gain(self): 51 | return self._gain 52 | 53 | @gain.setter 54 | def gain(self, x): 55 | self._gain = x 56 | 57 | @property 58 | def gamma(self): 59 | return self._gamma 60 | 61 | @gamma.setter 62 | def gamma(self, x): 63 | self._gamma = x 64 | -------------------------------------------------------------------------------- /camera_web.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | from camera import Camera 7 | 8 | 9 | class CameraWeb(Camera): 10 | 11 | def __init__(self, width=1920, height=1080, number = 0): 12 | self.camera = cv2.VideoCapture(number, cv2.CAP_DSHOW) 13 | if self.camera.isOpened(): 14 | self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, width) 15 | self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, height) 16 | self.camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) 17 | self.type = 'web' 18 | else: 19 | # If camera is not opened, it is used in another place 20 | raise ValueError() 21 | 22 | @staticmethod 23 | def get_available_cameras(cameras_num_to_find:int=2) -> list[Camera]: 24 | cameras = [] 25 | # Try to find defined number of cameras 26 | for i in range(cameras_num_to_find): 27 | try: 28 | cameras.append(CameraWeb(number=i)) 29 | except ValueError: 30 | # Skip camera with i id 31 | pass 32 | return cameras 33 | 34 | def get_image(self) -> np.array: 35 | if self.camera.isOpened: 36 | return self.camera.read()[1] 37 | 38 | @property 39 | def focus(self): 40 | return self.camera.get(cv2.CAP_PROP_FOCUS) 41 | 42 | @focus.setter 43 | def focus(self, x): 44 | self.camera.set(cv2.CAP_PROP_FOCUS, x) 45 | 46 | @property 47 | def exposure(self): 48 | return self.camera.get(cv2.CAP_PROP_EXPOSURE) 49 | 50 | @exposure.setter 51 | def exposure(self, x): 52 | self.camera.set(cv2.CAP_PROP_EXPOSURE, x) 53 | 54 | @property 55 | def brightness(self): 56 | return self.camera.get(cv2.CAP_PROP_BRIGHTNESS) 57 | 58 | @brightness.setter 59 | def brightness(self, x): 60 | self.camera.set(cv2.CAP_PROP_BRIGHTNESS, x) 61 | 62 | @property 63 | def gamma(self): 64 | return self.camera.get(cv2.CAP_PROP_GAMMA) 65 | 66 | @gamma.setter 67 | def gamma(self, x): 68 | self.camera.set(cv2.CAP_PROP_GAMMA, x) 69 | 70 | @property 71 | def gain(self): 72 | raise NotImplemented() 73 | 74 | @gain.setter 75 | def gain(self): 76 | raise NotImplemented() 77 | 78 | #cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1) 79 | #cap.set(cv2.CAP_PROP_AUTO_WB, 1) -------------------------------------------------------------------------------- /cameras_helper.py: -------------------------------------------------------------------------------- 1 | '''Module with helper functions to work with Baumer cameras''' 2 | 3 | import os 4 | import sys 5 | import json 6 | import datetime 7 | import time 8 | import glob 9 | from pathlib import Path 10 | import multiprocessing as mp 11 | 12 | from functools import reduce 13 | 14 | import PIL 15 | from PIL import Image 16 | from PIL import ImageDraw 17 | from PIL import ImageFont 18 | 19 | import numpy as np 20 | 21 | import cv2 22 | 23 | import neoapi 24 | 25 | from camera_web import CameraWeb 26 | from camera_baumer import CameraBaumer 27 | 28 | """ 29 | def _get_image(camera_id): 30 | '''Get image from PYbgapi2 camera. 31 | 32 | Arguments: 33 | camera_id -- id of camera to capture image from 34 | ''' 35 | im = PYbgapi2.get_image_from_stream(camera_id) 36 | width = im['width'] 37 | height = im['height'] 38 | buffer = im['image_array'] 39 | frameID = im['frameID'] 40 | timestamp = im['timestamp'] 41 | pixel_format = im['pixel_format'] 42 | 43 | if (width > 0 and height > 0): 44 | print(f'Grab image {width}x{height} succefully from camera {camera_id} -- frameID={frameID}, timestamp={timestamp}') 45 | #buf = np.frombuffer(buffer, dtype=np.uint8, count=height*width).reshape((height, width)) 46 | #return buf 47 | if pixel_format == 'Mono8': 48 | format = 'L' 49 | else: 50 | format = 'I;16' 51 | 52 | return Image.frombytes(format, (width, height), buffer, 'raw') 53 | else: 54 | print(f'Error while grabbing image from camera {camera_id}') 55 | print(PYbgapi2.get_log()) 56 | return None 57 | """ 58 | 59 | def calibrate_cameras(markers_x, markers_y, images_count=15, use_stream=True, 60 | wait_period=3000, save_calibrating_images=True, 61 | save_path='', blur_threshold=100): 62 | '''Capture images for calibrating stereosystem with chessboard pattern. Function capture images 63 | from two Baumer cameras and wait for a chessboard appears on both images. If all points of pattern 64 | are detected by cv2.findChessboardCorners on both images these images are saved as files. 65 | 66 | Keyword arguments: 67 | images_count -- count of images to use for calibration (default 15) 68 | use_stream -- use stream option of PYbgapi2 (default True) 69 | wait_period -- period of time between two images captured for calibration (default 1000 ms) 70 | save_calibrating_images -- save images which are used for calibration (default True) 71 | save_path -- path to save images 72 | ''' 73 | 74 | ''' 75 | print(f'Init PYbgapi2 system -- {PYbgapi2.init_system()}') 76 | cameras = Camera.get_camera_list(PYbgapi2.get_camera_names()) 77 | 78 | if len(cameras) == 0: 79 | print(f'Cameras in PYbgapi2 system not found') 80 | return 81 | 82 | print(f'Cameras in PYbgapi2 system -- {cameras}') 83 | 84 | while not reduce(lambda x,y: x.started and y.started, cameras): 85 | print(f'Cameras not started, try to reinit system...') 86 | PYbgapi2.deinit_system() 87 | PYbgapi2.init_system() 88 | cameras = Camera.get_camera_list(PYbgapi2.get_camera_names()) 89 | ''' 90 | 91 | print(f'Init Baumer NeoAPI system ...') 92 | 93 | cameras = CameraBaumer.get_available_cameras(cameras_num_to_find=2) 94 | 95 | for cam_num, camera in enumerate(cameras): 96 | cv2.namedWindow(f'camera_{cam_num}', cv2.WINDOW_NORMAL) 97 | cv2.resizeWindow(f'camera_{cam_num}', 800, 600) 98 | 99 | images = [[] for _ in cameras] 100 | 101 | left_upper_corners = [[], []] 102 | right_bottom_corners = [[], []] 103 | 104 | calibrate_time = time.time() 105 | 106 | i = 0 107 | 108 | main_cornes_founded = False 109 | 110 | while True: 111 | # Get images and measure capturing time 112 | start_time = time.time() 113 | 114 | for cam_num, camera in enumerate(cameras): 115 | images[cam_num] = camera.get_image() 116 | 117 | end_time = time.time() - start_time 118 | 119 | if images[0] is not None and images[1] is not None: 120 | # Bool to store if corners is found on images or not 121 | # cornes_founded = True 122 | cornes_founded = [False, False] 123 | # Variables to store corners for area 124 | lu_corner = [None, None] 125 | rb_corner = [None, None] 126 | 127 | # Wait for wait_period to move chessboard on images 128 | do_calibrate = (time.time() - calibrate_time) > wait_period / 1000 129 | 130 | for k, img in enumerate(images): 131 | gray = img.copy() 132 | if cameras[k].type == 'web': 133 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 134 | 135 | img_to_draw = gray.copy() 136 | img_to_draw = cv2.cvtColor(img_to_draw, cv2.COLOR_GRAY2RGB) 137 | 138 | blur_index = cv2.Laplacian(gray, cv2.CV_64F).var() 139 | 140 | # Try to find chessboard on images 141 | if do_calibrate and blur_index > blur_threshold: 142 | # termination criteria 143 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 144 | 145 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) 146 | objp = np.zeros((markers_y * markers_x, 3), np.float32) 147 | objp[:, :2] = np.mgrid[0:markers_x, 0:markers_y].T.reshape(-1, 2) 148 | 149 | # Find the chess board corners 150 | ret, corners = cv2.findChessboardCorners(gray, (markers_x, markers_y), flags=cv2.CALIB_CB_FAST_CHECK) 151 | 152 | # If found, add object points, image points (after refining them) 153 | if ret == True: 154 | cornes_founded[k] = True 155 | # Store left upper and right bottom corners 156 | lu_corner[k] = corners[0] 157 | rb_corner[k] = corners[-1] 158 | 159 | corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria) 160 | 161 | calibrate_time = time.time() 162 | 163 | # Draw and display the corners 164 | img_to_draw = cv2.drawChessboardCorners(img_to_draw, (markers_x, markers_y), corners2, ret) 165 | 166 | # Draw calibrated area on image 167 | fill_area = np.array(img_to_draw) 168 | if left_upper_corners[0] and left_upper_corners[1]: 169 | for j, _ in enumerate(left_upper_corners[k]): 170 | fill_area = cv2.rectangle(fill_area, 171 | (int(left_upper_corners[k][j][0][0]), int(left_upper_corners[k][j][0][1])), 172 | (int(right_bottom_corners[k][j][0][0]), int(right_bottom_corners[k][j][0][1])), 173 | (255, 0, 0), -1) 174 | img_to_draw = cv2.addWeighted(img_to_draw, 0.7, fill_area, 0.3, 0) 175 | 176 | 177 | cv2.putText(img_to_draw, f'Images captured {i} from {images_count}', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (255, 0, 0), 2) 178 | cv2.putText(img_to_draw, f'Blur index {blur_index:.2f}', (50, 100), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (255, 0, 0), 2) 179 | cv2.imshow(f"camera_{k}", img_to_draw) 180 | k = cv2.waitKey(10) 181 | if k == 27: # Escape 182 | return 183 | elif k != -1: 184 | print(k) 185 | 186 | if all(cornes_founded) and main_cornes_founded: 187 | for k, _ in enumerate(images): 188 | left_upper_corners[k].append(lu_corner[k]) 189 | right_bottom_corners[k].append(rb_corner[k]) 190 | 191 | if save_calibrating_images: 192 | for cam_num, image in enumerate(images): 193 | cv2.imwrite(f'{save_path}camera_{cam_num}_image{i}.tif', image) 194 | 195 | if i == images_count: 196 | break 197 | 198 | i = i + 1 199 | 200 | cornes_founded[k] = False 201 | main_cornes_founded = False 202 | elif cornes_founded and not main_cornes_founded: 203 | main_cornes_founded = True 204 | else: 205 | print('Failed to grab images from camera 0 and 1') 206 | 207 | 208 | def calculate_calibration(force_recalculate=False, file_mask1='camera_2_image*.png', file_mask2='camera_1_image*.png', camera_type = "web"): 209 | markers_x = 25 #37 210 | markers_y = 17 #23 211 | 212 | square_size_x = 5 # mm 213 | square_size_y = 5 # mm 214 | 215 | # VCXG-32M 216 | sensor_x_size = 6.9632 # mm 217 | sensor_y_size = 5.2224 # mm 218 | 219 | # VLG-24M 220 | # sensor_x_size = 7.06 # mm 221 | # sensor_y_size = 5.29 # mm 222 | 223 | data_loaded = False 224 | 225 | if not force_recalculate: 226 | try: 227 | with open('calibrated_data.json', 'r') as fp: 228 | calibration_data = json.load(fp) 229 | data_loaded = True 230 | print('Calibration data is load from calibration_data.json') 231 | except: 232 | print('Calibration data is not founded in calibration_data.json') 233 | 234 | if not data_loaded or force_recalculate: 235 | # termination criteria 236 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6) 237 | 238 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) 239 | objp = np.zeros((markers_x * markers_y, 3), np.float32) 240 | objp[:, :2] = np.mgrid[0:markers_y, 0:markers_x].T.reshape(-1, 2) 241 | 242 | objp[:, 1] *= square_size_x 243 | objp[:, 0] *= square_size_y 244 | 245 | # Arrays to store object points and image points from all the images. 246 | objpoints = [] # 3d point in real world space 247 | 248 | imgpoints1 = [] # 2d points in image plane. 249 | imgpoints2 = [] 250 | 251 | images_for_camera1 = glob.glob(file_mask1) 252 | images_for_camera2 = glob.glob(file_mask2) 253 | 254 | cv2.namedWindow('img1', cv2.WINDOW_NORMAL) 255 | cv2.resizeWindow('img1', 800, 600) 256 | cv2.namedWindow('img2', cv2.WINDOW_NORMAL) 257 | cv2.resizeWindow('img2', 800, 600) 258 | 259 | files_to_delete = [] 260 | 261 | for fname1, fname2 in zip(images_for_camera1, images_for_camera2): 262 | img1 = cv2.imread(fname1, cv2.IMREAD_GRAYSCALE) 263 | img2 = cv2.imread(fname2, cv2.IMREAD_GRAYSCALE) 264 | gray1 = img1.copy() 265 | gray2 = img2.copy() 266 | 267 | img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR) 268 | img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR) 269 | 270 | # if camera_type == "web": 271 | # gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) 272 | # gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) 273 | 274 | # cv2.imshow('img1', img1) 275 | # cv2.waitKey() 276 | 277 | # Find the chess board corners 278 | ret1, corners1 = cv2.findChessboardCorners(gray1, (markers_y, markers_x), cv2.CALIB_CB_ADAPTIVE_THRESH) 279 | ret2, corners2 = cv2.findChessboardCorners(gray2, (markers_y, markers_x), cv2.CALIB_CB_ADAPTIVE_THRESH) 280 | 281 | if corners1[0,0,0] + corners1[0,0,1] > corners1[-1,0,0] + corners1[-1,0,1]: 282 | corners1 = corners1[::-1,:,:].copy() 283 | if corners2[0,0,0] + corners2[0,0,1] > corners2[-1,0,0] + corners2[-1,0,1]: 284 | corners2 = corners2[::-1,:,:].copy() 285 | 286 | # If found, add object points, image points (after refining them) 287 | if ret1 and ret2: 288 | objpoints.append(objp) 289 | corners_subpix1 = cv2.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria) 290 | corners_subpix2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria) 291 | imgpoints1.append(corners_subpix1) 292 | imgpoints2.append(corners_subpix2) 293 | 294 | # Draw and display the corners 295 | cv2.drawChessboardCorners(img1, (markers_y, markers_x), corners_subpix1, ret1) 296 | cv2.drawChessboardCorners(img2, (markers_y, markers_x), corners_subpix2, ret2) 297 | cv2.waitKey(100) 298 | else: 299 | files_to_delete.append((fname1, fname2)) 300 | 301 | cv2.imshow('img1', img1) 302 | cv2.imshow('img2', img2) 303 | cv2.waitKey(100) 304 | 305 | for file in files_to_delete: 306 | images_for_camera1.remove(file[0]) 307 | images_for_camera2.remove(file[1]) 308 | 309 | cv2.destroyAllWindows() 310 | 311 | camera_matrix = np.array([[50, 0, gray1.shape[1] / 2], [0, 50, gray1.shape[0] / 2], [0, 0, 1]]) 312 | dist_coef = np.zeros(12) 313 | 314 | ret1, mtx1, dist1, rvecs1, tvecs1, stdDeviationsIntrinsics1, stdDeviationsExtrinsics1, perViewErrors1 = cv2.calibrateCameraExtended(objpoints, imgpoints1, gray1.shape[::-1], camera_matrix, dist_coef, flags=cv2.CALIB_FIX_PRINCIPAL_POINT, criteria=criteria) 315 | fovx1, fovy1, focalLength1, principalPoint1, aspectRatio1 = cv2.calibrationMatrixValues(mtx1, gray1.shape[::-1], sensor_x_size, sensor_y_size) 316 | 317 | print('Camera 1 calibration results:') 318 | print(f'RMS error {ret1:<15.4f}') 319 | print(f'Camera matrix:') 320 | print(f'{mtx1}') 321 | print(f'Focal length {focalLength1:<15.2f}') 322 | 323 | ret2, mtx2, dist2, rvecs2, tvecs2, stdDeviationsIntrinsics2, stdDeviationsExtrinsics2, perViewErrors2 = cv2.calibrateCameraExtended(objpoints, imgpoints2, gray1.shape[::-1], camera_matrix, dist_coef, flags=cv2.CALIB_FIX_PRINCIPAL_POINT, criteria=criteria) 324 | fovx2, fovy2, focalLength2, principalPoint2, aspectRatio2 = cv2.calibrationMatrixValues(mtx2, gray1.shape[::-1], sensor_x_size, sensor_y_size) 325 | 326 | print('Camera 2 calibration results:') 327 | print(f'RMS error {ret2:<15.4f}') 328 | print(f'Camera matrix:') 329 | print(f'{mtx2}') 330 | print(f'Focal length {focalLength2:<15.2f}') 331 | 332 | retval = 10 333 | perViewErrors = None 334 | 335 | while retval > 1 or np.max(perViewErrors) > 1: 336 | if perViewErrors is not None: 337 | std = np.std(perViewErrors) 338 | avg = np.average(perViewErrors) 339 | 340 | outliers = [] 341 | for i in range(len(perViewErrors)): 342 | if np.average(perViewErrors[i]) > 3*std and np.average(perViewErrors[i]) > 1.2*avg: 343 | outliers.append(i) 344 | if len(outliers) == 0: 345 | print(f'Stereo calibrate stoped at retval = {retval:.3f} with {len(perViewErrors)} images in calibration set as no outliers is founded...') 346 | break 347 | for i in sorted(outliers, reverse=True): 348 | objpoints.pop(i) 349 | imgpoints1.pop(i) 350 | imgpoints2.pop(i) 351 | print(f'Stereo calibrate itteration, outliers {len(outliers)} founded... retval = {retval:.3f}') 352 | retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F, perViewErrors = cv2.stereoCalibrateExtended(objpoints, imgpoints1, imgpoints2, 353 | mtx1, dist1, mtx2, dist2, 354 | gray1.shape[::-1], None, None, flags=cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_K1 + cv2.CALIB_FIX_K2 + cv2.CALIB_FIX_K3 + 355 | + cv2.CALIB_FIX_PRINCIPAL_POINT + cv2.CALIB_SAME_FOCAL_LENGTH) 356 | else: 357 | print(f'Stereo calibrate stoped at retval = {retval:.3f} with {len(perViewErrors)} images in calibration set') 358 | 359 | _, _, focalLength1, _, _ = cv2.calibrationMatrixValues(cameraMatrix1, gray1.shape[::-1], sensor_x_size, sensor_y_size) 360 | 361 | print('Camera 1 after stereocalibration results:') 362 | print(f'RMS error {retval:<15.4f}') 363 | print(f'Camera matrix:') 364 | print(f'{cameraMatrix1}') 365 | print(f'Focal length {focalLength1:<15.2f}') 366 | 367 | _, _, focalLength2, _, _ = cv2.calibrationMatrixValues(cameraMatrix2, gray2.shape[::-1], sensor_x_size, sensor_y_size) 368 | 369 | print('Camera 2 after stereocalibration results:') 370 | print(f'RMS error {retval:<15.4f}') 371 | print(f'Camera matrix:') 372 | print(f'{cameraMatrix2}') 373 | print(f'Focal length {focalLength2:<15.2f}') 374 | 375 | print(f'Distance between cameras {np.sum(T**2)**0.5:<15.2f}') 376 | 377 | 378 | R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, gray1.shape[::-1], R, T, alpha=-1, flags=0) 379 | 380 | mapx1, mapy1 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, gray1.shape[::-1], cv2.CV_32F) 381 | mapx2, mapy2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, gray1.shape[::-1], cv2.CV_32F) 382 | 383 | width = max(roi1[2], roi2[2]) 384 | height = max(roi1[3], roi2[3]) 385 | 386 | for fname1, fname2 in zip(images_for_camera1, images_for_camera2): 387 | img1 = cv2.imread(fname1) 388 | img2 = cv2.imread(fname2) 389 | gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) 390 | gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) 391 | 392 | img_rect1 = cv2.remap(gray1, mapx1, mapy1, cv2.INTER_LINEAR)[roi1[1]:roi1[1]+height, roi1[0]:roi1[0]+width] 393 | img_rect2 = cv2.remap(gray2, mapx2, mapy2, cv2.INTER_LINEAR)[roi2[1]:roi2[1]+height, roi2[0]:roi2[0]+width] 394 | 395 | # draw the images side by side 396 | total_size = (max(img_rect1.shape[0], img_rect2.shape[0]), img_rect1.shape[1] + img_rect2.shape[1]) 397 | img = np.zeros(total_size, dtype=np.uint8) 398 | img[: img_rect1.shape[0], : img_rect1.shape[1]] = img_rect1 399 | img[: img_rect2.shape[0], img_rect1.shape[1] :] = img_rect2 400 | 401 | # draw horizontal lines every 25 px accross the side by side image 402 | for i in range(20, img.shape[0], 25): 403 | cv2.line(img, (0, i), (img.shape[1], i), (255, 0, 0)) 404 | 405 | cv2.namedWindow('imgRectified', cv2.WINDOW_NORMAL) 406 | cv2.resizeWindow('imgRectified', 550, 450) 407 | cv2.imshow('imgRectified', img) 408 | cv2.namedWindow('img1', cv2.WINDOW_NORMAL) 409 | cv2.resizeWindow('img1', 550, 450) 410 | cv2.imshow('img1', cv2.remap(gray1, mapx1, mapy1, cv2.INTER_LINEAR)) 411 | cv2.namedWindow('img2', cv2.WINDOW_NORMAL) 412 | cv2.resizeWindow('img2', 550, 450) 413 | cv2.imshow('img2', cv2.remap(gray2, mapx2, mapy2, cv2.INTER_LINEAR)) 414 | cv2.waitKey(100) 415 | 416 | 417 | calibration_data = { 418 | 'camera_0': 419 | {'camera_id': 0, 420 | 'ret': ret1, 421 | 'mtx': mtx1.tolist(), 422 | 'dist': dist1.tolist(), 423 | 'rvecs': [el.tolist() for el in rvecs1], 424 | 'tvecs': [el.tolist() for el in tvecs1], 425 | 'perViewErrors': perViewErrors1.tolist() 426 | }, 427 | 'camera_1': 428 | {'camera_id': 1, 429 | 'ret': ret2, 430 | 'mtx': mtx2.tolist(), 431 | 'dist': dist2.tolist(), 432 | 'rvecs': [el.tolist() for el in rvecs2], 433 | 'tvecs': [el.tolist() for el in tvecs2], 434 | 'perViewErrors': perViewErrors2.tolist() 435 | }, 436 | 'R': R.tolist(), 437 | 'T': T.tolist(), 438 | 'ret': retval, 439 | 'perViewErrors': perViewErrors.tolist() 440 | } 441 | 442 | with open('calibrated_data.json', 'w') as fp: 443 | json.dump(calibration_data, fp, indent=4) 444 | print('Calibration data is saved to calibration_data.json') 445 | 446 | return cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T 447 | 448 | else: 449 | return np.array(calibration_data['camera_0']['mtx']), np.array(calibration_data['camera_0']['dist']), \ 450 | np.array(calibration_data['camera_1']['mtx']), np.array(calibration_data['camera_1']['dist']), \ 451 | np.array(calibration_data['R']), np.array(calibration_data['T']) 452 | 453 | 454 | def experiment_registration(): 455 | 456 | path_to_save = os.path.join('C:/exp_img/', datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) 457 | 458 | background_substruct = False 459 | preprocessing = False 460 | threshold = 70 461 | registration = False 462 | reg_image_num = 0 463 | capturing = True 464 | 465 | ''' 466 | print(f'Init PYbgapi2 system -- {PYbgapi2.init_system()}') 467 | cameras = Camera.get_camera_list(PYbgapi2.get_camera_names()) 468 | 469 | if len(cameras) == 0: 470 | print(f'Cameras in PYbgapi2 system not found') 471 | return 472 | 473 | print(f'Cameras in PYbgapi2 system -- {cameras}') 474 | 475 | while not reduce(lambda x,y: x.started and y.started, cameras): 476 | print(f'Cameras not started, try to reinit system...') 477 | PYbgapi2.deinit_system() 478 | PYbgapi2.init_system() 479 | cameras = Camera.get_camera_list(PYbgapi2.get_camera_names()) 480 | ''' 481 | 482 | print(f'Init Baumer NeoAPI system ...') 483 | cameras = [Camera(), Camera()] 484 | 485 | for camera in cameras: 486 | camera.gain = 5 487 | camera.triger_mode = neoapi.TriggerMode_On 488 | camera.pixel_format = neoapi.PixelFormat_Mono8 489 | 490 | for camera in cameras: 491 | cv2.namedWindow(camera.name, cv2.WINDOW_NORMAL) 492 | cv2.resizeWindow(camera.name, 550, 450) 493 | 494 | images = [[] for camera in cameras] 495 | 496 | while capturing: 497 | # Get images and measure capturing time 498 | 499 | start_time = time.time() 500 | 501 | for i, camera in enumerate(cameras): 502 | images[i] = camera.get_image() 503 | 504 | end_time = time.time() - start_time 505 | 506 | print(f'Images from cameras grabbed in {end_time} sec') 507 | 508 | for i, im in enumerate(images): 509 | if im is not None: 510 | 511 | im = np.array(im) 512 | 513 | if background_substruct: 514 | im = cv2.subtract(im, background[i]) 515 | 516 | if preprocessing: 517 | if im.dtype == np.uint16: 518 | max_value = 4095 519 | else: 520 | max_value = 255 521 | _, im = cv2.threshold(im, threshold, max_value, cv2.THRESH_BINARY) 522 | kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) 523 | im = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel) 524 | im = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel) 525 | 526 | if im.dtype == np.uint16: 527 | im = cv2.normalize(im, dst=None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX) 528 | cv2.imshow(cameras[i].name, im) 529 | 530 | if registration: 531 | if os.path.exists(path_to_save): 532 | cv2.imwrite(os.path.join(path_to_save, f'camera{i}/IMG{reg_image_num:>04}.tif'), im) 533 | else: 534 | os.makedirs(path_to_save) 535 | for cam_num, _ in enumerate(cameras): 536 | os.makedirs(os.path.join(path_to_save, f"camera{cam_num}")) 537 | 538 | if registration: 539 | reg_image_num += 1 540 | 541 | key = cv2.waitKey(1) 542 | if key == 27: # Esc key to stop 543 | capturing = False 544 | elif key == -1: # normally -1 returned,so don't print it 545 | continue 546 | elif key == 32: # Space 547 | images[0].save('test0.tiff') 548 | images[1].save('test1.tiff') 549 | elif key == 104: # h 550 | if registration: 551 | registration = False 552 | print(f'Image registration is OFF') 553 | else: 554 | registration = True 555 | reg_image_num = 0 556 | print(f'Image registration is ON') 557 | elif key == 119: # w 558 | for camera in cameras: 559 | camera.gain += 1 560 | print(f'Camera {camera.name} gain set to {camera.gain}') 561 | elif key == 115: # s 562 | for camera in cameras: 563 | camera.gain -= 1 564 | print(f'Camera {camera.name} gain set to {camera.gain}') 565 | elif key == 100: # d 566 | for camera in cameras: 567 | camera.exposure_time += 1000 568 | print(f'Camera {camera.name} exposure time set to {camera.exposure_time} us') 569 | elif key == 97: # a 570 | for camera in cameras: 571 | camera.exposure_time -= 1000 572 | print(f'Camera {camera.name} exposure time set to {camera.exposure_time} us') 573 | elif key == 113: # q 574 | if cameras[0].triger_mode.value == neoapi.TriggerMode_On: 575 | value = neoapi.TriggerMode_Off 576 | else: 577 | value = neoapi.TriggerMode_On 578 | for camera in cameras: 579 | camera.triger_mode = value 580 | print(f'Camera {camera.name} trigger mode set to {camera.triger_mode.GetString()}') 581 | elif key == 101: # e 582 | if cameras[0].pixel_format.value == neoapi.PixelFormat_Mono8: 583 | value = neoapi.PixelFormat_Mono12 584 | else: 585 | value = neoapi.PixelFormat_Mono8 586 | for camera in cameras: 587 | camera.pixel_format = value 588 | print(f'Camera {camera.name} pixel format set to {camera.pixel_format.GetString()}') 589 | elif key == 98: # b 590 | if not background_substruct: 591 | background = [[] for camera in cameras] 592 | for i in range(len(cameras)): 593 | background[i] = np.array(images[i]) 594 | print(f'Backgrounds saved. Background substruct is on') 595 | background_substruct = True 596 | else: 597 | background_substruct = False 598 | print(f'Background substruct is off') 599 | elif key == 112: # p 600 | preprocessing = not preprocessing 601 | print(f'Preprocessing is -- {preprocessing}') 602 | elif key == 49: # 1 603 | if threshold > 0: 604 | threshold -= 1 605 | print(f'Threshold = {threshold}') 606 | elif key == 50: # 2 607 | if threshold < 255: 608 | threshold += 1 609 | print(f'Threshold = {threshold}') 610 | else: 611 | print(key) # else print its value 612 | 613 | # for i in range(len(cameras)): 614 | # print(f'Stop camera {i} in PYbgapi2 system -- {PYbgapi2.stop_camera(i)}') 615 | 616 | # print(f'DeInit PYbgapi2 system -- {PYbgapi2.deinit_system()}') 617 | 618 | 619 | if __name__ == '__main__': 620 | 621 | MARKERS_X = 37 # 25 #17 622 | MARKERS_Y = 23 # 17 #13 623 | 624 | CALIBRATE_IMAGES_COUNT = 50 625 | CALIBRATE_IMAGES_PATH = r'.\calibrate_images\\' 626 | CALIBRATE_FILE_MASK_1 = r'.\calibrate_images\camera_0_image*.tif' 627 | CALIBRATE_FILE_MASK_2 = r'.\calibrate_images\camera_1_image*.tif' 628 | RECALCULATE_CALIBRATION = True 629 | 630 | calibrate_cameras( 631 | MARKERS_X, 632 | MARKERS_Y, 633 | images_count=CALIBRATE_IMAGES_COUNT, 634 | save_calibrating_images=True, 635 | save_path=CALIBRATE_IMAGES_PATH, 636 | blur_threshold=50, 637 | ) 638 | 639 | cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T = \ 640 | calculate_calibration( 641 | RECALCULATE_CALIBRATION, 642 | CALIBRATE_FILE_MASK_1, 643 | CALIBRATE_FILE_MASK_2, 644 | camera_type="baumer", 645 | ) 646 | 647 | # experiment_registration() 648 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "projector": { 3 | "width": 1280, 4 | "height": 720, 5 | "min_brightness": 11, 6 | "max_brightness": 177 7 | }, 8 | "cameras": { 9 | "baumer": [ 10 | { 11 | "exposure": 4850, 12 | "gamma": 0.49, 13 | "gain": 0.98 14 | }, 15 | { 16 | "exposure": 5000, 17 | "gamma": 0.5, 18 | "gain": 1.0 19 | } 20 | ] 21 | }, 22 | "capture_parameters": { 23 | "delay": 300 24 | } 25 | } -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module to store program configuration 3 | 4 | Calibration data is stored in a separate config.json file and loaded when the module is imported 5 | ''' 6 | 7 | import json 8 | 9 | # Projector configuration 10 | # Projector resolution width and height in pixels 11 | PROJECTOR_WIDTH = 1280 12 | PROJECTOR_HEIGHT = 720 13 | # OpenCV GUI windows shift relative to first screen of the system 14 | PROJECTOR_WINDOW_SHIFT = 1920 15 | 16 | # Maximum and minimum brightness for projected image correction 17 | # Given values are default, the current values are loaded from the calibration file 18 | PROJECTOR_MIN_BRIGHTNESS = 0.0 19 | PROJECTOR_MAX_BRIGHTNESS = 1.0 20 | 21 | # Gamma correction coefficients for formula Iout = a * (Iin + c) ^ b 22 | # Given values are default, the current values are loaded from the calibration file 23 | PROJECTOR_GAMMA_A = 1.0 24 | PROJECTOR_GAMMA_B = 2.2 25 | PROJECTOR_GAMMA_C = 0 26 | 27 | # Cameras configuration 28 | # Number of cameras used in measurement 29 | CAMERAS_COUNT = 2 30 | 31 | # Type of cameras used in measurement 32 | CAMERA_TYPE = 'baumer' 33 | 34 | # Cameras parameters default values, the current values are loaded from the calibration file 35 | CAMERA_EXPOSURE = [20000, 20000] 36 | CAMERA_GAIN = [1, 1] 37 | CAMERA_GAMMA = [1, 1] 38 | 39 | # Measurement configuration 40 | # Path to save measurement data 41 | DATA_PATH = './data' 42 | 43 | # Images filenames mask 44 | IMAGES_FILENAME_MASK = 'frame_{2}_{0}_{1}.png' 45 | 46 | # Measurement filenames mask 47 | MEASUREMENT_FILENAME_MASK = 'fpp_measurement.json' 48 | 49 | # Cameras folders in measurment folder 50 | CAMERAS_FOLDER_NAMES = ['cam1', 'cam2'] 51 | 52 | # Save measurement image files 53 | SAVE_MEASUREMENT_IMAGE_FILES = False 54 | 55 | # Delay between pattern projection and camera image capture in miliseconds 56 | MEASUREMENT_CAPTURE_DELAY = 300 # ms 57 | 58 | # File name for calibration data 59 | CONFIG_FILENAME = r"./config.json" 60 | 61 | # Use multiprocessing to increase speed of processing 62 | USE_MULTIPROCESSING = False 63 | 64 | # Number of Pools to use in parallel processing 65 | POOLS_NUMBER = 5 66 | 67 | # Path to last measurement results 68 | LAST_MEASUREMENT_PATH = None 69 | 70 | 71 | # Load calibration data from json file 72 | try: 73 | with open('config.json') as f: 74 | calibration_data = json.load(f) 75 | 76 | try: 77 | PROJECTOR_MIN_BRIGHTNESS = float(calibration_data['projector']['min_brightness']) 78 | PROJECTOR_MAX_BRIGHTNESS = float(calibration_data['projector']['max_brightness']) 79 | 80 | PROJECTOR_GAMMA_A = float(calibration_data['projector']['gamma_a']) 81 | PROJECTOR_GAMMA_B = float(calibration_data['projector']['gamma_b']) 82 | PROJECTOR_GAMMA_C = float(calibration_data['projector']['gamma_c']) 83 | except: 84 | pass 85 | 86 | try: 87 | CAMERA_EXPOSURE = [int(calibration_data['cameras']['baumer'][0]['exposure']), 88 | int(calibration_data['cameras']['baumer'][1]['exposure'])] 89 | CAMERA_GAIN = [float(calibration_data['cameras']['baumer'][0]['gain']), 90 | float(calibration_data['cameras']['baumer'][1]['gain'])] 91 | CAMERA_GAMMA = [float(calibration_data['cameras']['baumer'][0]['gamma']), 92 | float(calibration_data['cameras']['baumer'][1]['gamma'])] 93 | except: 94 | pass 95 | 96 | try: 97 | LAST_MEASUREMENT_PATH = calibration_data['measurements']['last_measurement_path'] 98 | except: 99 | pass 100 | except: 101 | pass 102 | 103 | 104 | def save_calibration_data() -> None: 105 | ''' 106 | Save calibration data to config.json file 107 | ''' 108 | try: 109 | with open("config.json") as f: 110 | calibration_data = json.load(f) 111 | 112 | calibration_data['projector']['gamma_a'] = PROJECTOR_GAMMA_A 113 | calibration_data['projector']['gamma_b'] = PROJECTOR_GAMMA_B 114 | calibration_data['projector']['gamma_c'] = PROJECTOR_GAMMA_C 115 | 116 | for i in range(CAMERAS_COUNT): 117 | calibration_data["cameras"]["baumer"][i]["exposure"] = CAMERA_EXPOSURE[i] 118 | calibration_data["cameras"]["baumer"][i]["gain"] = CAMERA_GAIN[i] 119 | calibration_data["cameras"]["baumer"][i]["gamma"] = CAMERA_GAIN[i] 120 | 121 | calibration_data['measurements']['last_measurement_path'] = LAST_MEASUREMENT_PATH 122 | except: 123 | pass 124 | else: 125 | with open("config.json", "w") as f: 126 | json.dump(calibration_data, f, ensure_ascii=False, indent=4) 127 | -------------------------------------------------------------------------------- /create_patterns.py: -------------------------------------------------------------------------------- 1 | '''Module for creating FPP patterns''' 2 | 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | 7 | from fpp_structures import PhaseShiftingAlgorithm 8 | 9 | 10 | def create_psp_template(width: int, height: int, frequency: float, shifts_number: int = 4, vertical: bool = True, delta_fi: float = 0) -> tuple[list[np.ndarray], list[float]]: 11 | ''' 12 | Create set of patterns for phase shift profilometry for one frequency. 13 | Patterns returned as list of numpy arrays. 14 | 15 | Args: 16 | width (int): width of patterns to generate 17 | height (int): height of patterns to generate 18 | frequency (float): frequency of patterns to generate 19 | shifts_number (int): number of phase shifts for generated patterns 20 | vertical (bool): create vertical fringes, if False create horizontal 21 | delta_fi (float): additional phase shift added to each point of pattern 22 | Returns: 23 | patterns (list[np.ndarray]): list of generated patterns 24 | phase_shifts (list[float]): list of phase shifts for generated patterns 25 | ''' 26 | patterns = [] 27 | 28 | # Determine length of cos sequence 29 | if vertical: 30 | length = width 31 | else: 32 | length = height 33 | 34 | # Create x sequence 35 | x = np.linspace(0, length, length) 36 | 37 | # Calculate phase shifts 38 | phase_shifts = [2 * np.pi / shifts_number * i + delta_fi for i in range(shifts_number)] 39 | 40 | for phase_shift in phase_shifts: 41 | # Calculate cos sequence with defined parameters 42 | cos = 0.5 + 0.5 * np.cos(2 * np.pi * frequency * (x / length) - phase_shift) 43 | 44 | # Tile cos sequence for vertical or horizontal fringe orientation 45 | if vertical: 46 | pattern = np.tile(cos, (height, 1)) 47 | else: 48 | pattern = np.tile(cos.reshape((-1, 1)), (1, width)) 49 | 50 | patterns.append(pattern) 51 | 52 | return patterns, phase_shifts 53 | 54 | 55 | def create_psp_templates(width: int, height: int, frequencies: list[float], phase_shift_type: PhaseShiftingAlgorithm, shifts_number: int = 4, vertical: bool = True) -> tuple[list[list[np.ndarray]], list[float]]: 56 | ''' 57 | Create set of patterns for phase shift profilometry for defined frequencies and 58 | defined phase shift algorithm. Patterns returned as list of list of numpy arrays. 59 | Outer list contains list with shifts_numbers patterns for each frequency. 60 | Patterns for one frequency generated via create_psp_template(). 61 | 62 | Args: 63 | width (int): width of patterns to generate 64 | height (int): height of patterns to generate 65 | frequencies (int or list[float]): frequencies (number or list) of patterns to generate 66 | phase_shift_type (PhaseShiftingAlgorithm): type of phase shift algorithm 67 | shifts_number (int): number of phase shifts for one frequency 68 | vertical (bool): create vertical patterns, if False create horizontal 69 | 70 | Returns: 71 | patterns (list[list[np.ndarray]]): set of generated patterns 72 | phase_shifts (list[float]): list of phase shifts for generated patterns 73 | ''' 74 | patterns = [] 75 | 76 | # Generate patterns depending on phase_shift_type 77 | if phase_shift_type == PhaseShiftingAlgorithm.n_step: 78 | for frequency in frequencies: 79 | template, phase_shifts = create_psp_template(width, height, frequency, shifts_number, vertical) 80 | patterns.append(template) 81 | elif phase_shift_type == PhaseShiftingAlgorithm.double_three_step: 82 | for frequency in frequencies: 83 | template, phase_shifts = create_psp_template(width, height, frequency, 3, vertical) 84 | patterns.append(template) 85 | # Add a set of 3-step shifted patterns with PI/3 offset 86 | template, phase_shifts2 = create_psp_template(width, height, frequency, 3, vertical, np.pi / 3) 87 | # Overall 6 patterns with 6 phase shifts generated 88 | patterns[-1].extend(template) 89 | phase_shifts.extend(phase_shifts2) 90 | 91 | return patterns, phase_shifts 92 | 93 | 94 | def linear_gradient(width: int, height: int, vertical: bool = True) -> np.ndarray: 95 | ''' 96 | Create linear gradient pattern. It can be used for calibration purpose. 97 | 98 | Args: 99 | width (int): width of patterns to generate 100 | height (int): height of patterns to generate 101 | vertical (bool): create vertical gradient, if False create horizontal 102 | 103 | Returns: 104 | gradient (np.ndarray): generated linear gradient pattern 105 | ''' 106 | # Determine length of cos sequence 107 | if vertical: 108 | length = width 109 | else: 110 | length = height 111 | 112 | # Create x sequence 113 | x = np.linspace(0, length, length) 114 | 115 | # Calculate gradient sequence 116 | gradient = x / length 117 | 118 | # Tile gradient sequence for vertical or horizontal orientation 119 | if vertical: 120 | gradient = np.tile(gradient, (height, 1)) 121 | else: 122 | gradient = np.tile(gradient.reshape((-1, 1)), (1, width)) 123 | 124 | return gradient -------------------------------------------------------------------------------- /examples/test_plate_phasogrammetry.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | 5 | import numpy as np 6 | from scipy import linalg 7 | from matplotlib import pyplot as plt 8 | 9 | # Import modules from parent directory 10 | sys.path.insert(1, os.path.join(sys.path[0], '..')) 11 | 12 | import config 13 | from fpp_structures import FPPMeasurement, PhaseShiftingAlgorithm 14 | from processing import calculate_phase_for_fppmeasurement, create_polygon, process_fppmeasurement_with_phasogrammetry, get_phase_field_ROI, get_phase_field_LUT, triangulate_points 15 | from utils import get_images_from_config, load_fpp_measurements 16 | 17 | 18 | def fit_to_plane(x, y, z): 19 | # From https://math.stackexchange.com/questions/99299/best-fitting-plane-given-a-set-of-points 20 | tmp_A = [] 21 | tmp_b = [] 22 | 23 | for i in range(z.shape[0]): 24 | tmp_A.append([x[i], y[i], 1]) 25 | tmp_b.append(z[i]) 26 | b = np.matrix(tmp_b).T 27 | A = np.matrix(tmp_A) 28 | 29 | fit, residual, rnk, s = linalg.lstsq(A, b) 30 | return fit 31 | 32 | 33 | def process_with_phasogrammetry(measurement: FPPMeasurement): 34 | # If there are no images in measurement - load them 35 | if len(measurement.camera_results[0].imgs_list) == 0: 36 | print('Load images from files...', end='', flush=True) 37 | for cam_result in measurement.camera_results: 38 | cam_result.imgs_list = get_images_from_config(cam_result.imgs_file_names) 39 | print('Done') 40 | 41 | # Display FPPMeasurement parameters 42 | if measurement.phase_shifting_type == PhaseShiftingAlgorithm.n_step: 43 | algortihm_type = f'{len(measurement.shifts)}-step' 44 | elif measurement.phase_shifting_type == PhaseShiftingAlgorithm.double_three_step: 45 | algortihm_type = 'double 3-step' 46 | print(f'\nPhase shift algorithm: {algortihm_type}') 47 | print(f'Phase shifts: {measurement.shifts}') 48 | print(f'Frequencies: {measurement.frequencies}\n') 49 | 50 | # Load calibration data for cameras stero system 51 | print('Load calibration data for cameras stereo system...', end='', flush=True) 52 | with open(config.DATA_PATH + r'/calibrated_data.json', 'r') as fp: 53 | calibration_data = json.load(fp) 54 | print('Done') 55 | 56 | # Calculate phase fields 57 | print('Calculate phase fields for first and second cameras...', end='', flush=True) 58 | calculate_phase_for_fppmeasurement(measurement) 59 | print('Done') 60 | 61 | # Plot unwrapped phases 62 | plt.subplot(221) 63 | plt.imshow(measurement.camera_results[0].unwrapped_phases[-1], cmap='gray') 64 | plt.subplot(222) 65 | plt.imshow(measurement.camera_results[1].unwrapped_phases[-1], cmap='gray') 66 | plt.subplot(223) 67 | plt.imshow(measurement.camera_results[2].unwrapped_phases[-1], cmap='gray') 68 | plt.subplot(224) 69 | plt.imshow(measurement.camera_results[3].unwrapped_phases[-1], cmap='gray') 70 | plt.show() 71 | 72 | print('Determine phase fields ROI...', end='', flush=True) 73 | get_phase_field_ROI(measurement) 74 | print('Done') 75 | 76 | # Set ROI manually for test plate 77 | # measurement.camera_results[0].ROI = np.array([[285, 103], [1513, 90], [1441, 1210], [411, 1471]]) 78 | measurement.camera_results[0].ROI = np.array([[470, 230], [1420, 170], [1350, 1150], [520, 1350]]) 79 | # measurement.camera_results[0].ROI = np.array([[810, 580], [1085, 530], [1075, 780], [830, 840]]) 80 | ROI_mask = create_polygon(measurement.camera_results[0].imgs_list[0][0].shape, measurement.camera_results[0].ROI) 81 | measurement.camera_results[0].ROI_mask = ROI_mask 82 | measurement.camera_results[2].ROI_mask = ROI_mask 83 | 84 | # Plot signal to noise ration 85 | plt.subplot(221) 86 | plt.imshow(measurement.camera_results[0].modulated_intensities[-1]/measurement.camera_results[0].average_intensities[-1], cmap='gray') 87 | # Draw ROI 88 | plt.plot(measurement.camera_results[0].ROI[:, 0], measurement.camera_results[0].ROI[:, 1], 'r-') 89 | plt.plot([measurement.camera_results[0].ROI[-1, 0], measurement.camera_results[0].ROI[0, 0]], 90 | [measurement.camera_results[0].ROI[-1, 1], measurement.camera_results[0].ROI[0, 1]], 'r-') 91 | plt.subplot(222) 92 | plt.imshow(measurement.camera_results[1].modulated_intensities[-1]/measurement.camera_results[1].average_intensities[-1], cmap='gray') 93 | plt.subplot(223) 94 | plt.imshow(measurement.camera_results[2].modulated_intensities[-1]/measurement.camera_results[2].average_intensities[-1], cmap='gray') 95 | plt.subplot(224) 96 | plt.imshow(measurement.camera_results[3].modulated_intensities[-1]/measurement.camera_results[3].average_intensities[-1], cmap='gray') 97 | plt.show() 98 | 99 | print('Calculate phase fields LUT...', end='', flush=True) 100 | LUT = get_phase_field_LUT(measurement) 101 | print('Done') 102 | 103 | # Process FPPMeasurements with phasogrammetry approach 104 | print('Calculate 2D corresponding points with phasogrammetry approach...') 105 | points_2d_1, points_2d_2 = process_fppmeasurement_with_phasogrammetry(measurement, 5, 5, LUT) 106 | print(f'Found {points_2d_1.shape[0]} corresponding points') 107 | print('Done') 108 | 109 | print('\nCalculate 3D points with triangulation...') 110 | points_3d, rms1, rms2, reproj_err1, reproj_err2 = triangulate_points(calibration_data, points_2d_1, points_2d_2) 111 | print(f'Reprojected RMS for camera 1 = {rms1:.3f}') 112 | print(f'Reprojected RMS for camera 2 = {rms2:.3f}') 113 | print('Done') 114 | 115 | print('\nFit points to plane') 116 | fit = fit_to_plane(points_3d[:,0], points_3d[:,1], points_3d[:,2]) 117 | distance_to_plane = np.abs(points_3d[:, 2] - (fit[0] * points_3d[:, 0] + fit[1] * points_3d[:, 1] + fit[2])) 118 | 119 | print(f'Fitting deviation mean = {np.mean(distance_to_plane):.4f} mm') 120 | print(f'Fitting deviation max = {np.max(distance_to_plane):.4f} mm') 121 | print(f'Fitting deviation std = {np.std(distance_to_plane):.4f} mm') 122 | 123 | # plt.hist(distance_to_plane, 30) 124 | # plt.show() 125 | 126 | # Filter outliers by reprojection error 127 | reproj_err_threshold = 1.0 # pixel 128 | 129 | print('\nTry to filter outliers with reprojection error threshold...') 130 | filter_condition = (reproj_err1 < reproj_err_threshold) & (reproj_err2 < reproj_err_threshold) 131 | x = points_3d[filter_condition, 0] 132 | y = points_3d[filter_condition, 1] 133 | z = points_3d[filter_condition, 2] 134 | points_2d_1 = points_2d_1[filter_condition,:] 135 | points_2d_2 = points_2d_2[filter_condition,:] 136 | print(f'Found {points_3d.shape[0] - x.shape[0]} outliers') 137 | 138 | print('\nCalculate 3D points with triangulation without outliers...') 139 | points_3d, rms1, rms2, reproj_err1, reproj_err2 = triangulate_points(calibration_data, points_2d_1, points_2d_2) 140 | print(f'Reprojected RMS for camera 1 = {rms1:.3f}') 141 | print(f'Reprojected RMS for camera 2 = {rms2:.3f}') 142 | print('Done') 143 | 144 | print('\nFit points to plane without outliers') 145 | fit2 = fit_to_plane(x, y, z) 146 | distance_to_plane = np.abs(z - (fit2[0] * x + fit2[1] * y + fit2[2])) 147 | print(f'Fitting deviation mean = {np.mean(distance_to_plane):.4f} mm') 148 | print(f'Fitting deviation max = {np.max(distance_to_plane):.4f} mm') 149 | print(f'Fitting deviation std = {np.std(distance_to_plane):.4f} mm') 150 | 151 | print('\nTry to filter outliers with distance to fitted surface...') 152 | filter_condition = distance_to_plane < 3*np.std(distance_to_plane) 153 | x = points_3d[filter_condition, 0] 154 | y = points_3d[filter_condition, 1] 155 | z = points_3d[filter_condition, 2] 156 | points_2d_1 = points_2d_1[filter_condition,:] 157 | points_2d_2 = points_2d_2[filter_condition,:] 158 | print(f'Found {points_3d.shape[0] - x.shape[0]} outliers') 159 | 160 | print('\nFit points to plane without outliers') 161 | fit2 = fit_to_plane(x, y, z) 162 | distance_to_plane = np.abs(z - (fit2[0] * x + fit2[1] * y + fit2[2])) 163 | print(f'Fitting deviation mean = {np.mean(distance_to_plane):.4f} mm') 164 | print(f'Fitting deviation max = {np.max(distance_to_plane):.4f} mm') 165 | print(f'Fitting deviation std = {np.std(distance_to_plane):.4f} mm\n') 166 | 167 | # plt.hist(distance_to_plane, 30) 168 | # plt.show() 169 | 170 | # Plot 3D point cloud 171 | fig = plt.figure() 172 | ax = fig.add_subplot(111, projection='3d') 173 | 174 | ax.scatter(x, y, z) 175 | 176 | ax.set_xlabel('X, mm') 177 | ax.set_ylabel('Y, mm') 178 | ax.set_zlabel('Z, mm') 179 | 180 | ax.view_init(elev=-75, azim=-89) 181 | 182 | plt.show() 183 | 184 | plt.tricontourf(x, y, distance_to_plane, levels=100) 185 | plt.colorbar() 186 | plt.show() 187 | 188 | 189 | if __name__ == '__main__': 190 | 191 | # Load FPPMeasurements from files 192 | print('Load FPPMeasurements from files...', end='', flush=True) 193 | measurement = load_fpp_measurements(config.LAST_MEASUREMENT_PATH + r'\fpp_measurement.json') 194 | print('Done') 195 | 196 | process_with_phasogrammetry(measurement) -------------------------------------------------------------------------------- /fpp_structures.py: -------------------------------------------------------------------------------- 1 | '''Module to store FPP data structures''' 2 | 3 | from __future__ import annotations 4 | from typing import Optional 5 | 6 | import enum 7 | from dataclasses import dataclass, field 8 | 9 | import numpy as np 10 | 11 | 12 | class PhaseShiftingAlgorithm(enum.IntEnum): 13 | '''Enum for phase shift algorithm type''' 14 | n_step = 1 15 | double_three_step = 2 16 | 17 | 18 | @dataclass 19 | class CameraMeasurement: 20 | ''' 21 | Class to store result of measurement for one camera 22 | ''' 23 | fringe_orientation: Optional[str] = 'vertical' 24 | imgs_list: Optional[list[list[np.ndarray]]] = field(default_factory=lambda:list()) 25 | imgs_file_names: Optional[list[list[str]]] = field(default_factory=lambda:list()) 26 | 27 | # Calculated attributes 28 | phases: Optional[list[np.ndarray]] = field(init=False) 29 | unwrapped_phases: Optional[list[np.ndarray]] = field(init=False) 30 | average_intensities: Optional[list[np.ndarray]] = field(init=False) 31 | modulated_intensities: Optional[list[np.ndarray]] = field(init=False) 32 | signal_to_noise_mask: Optional[np.ndarray] = field(init=False) 33 | ROI: Optional[np.array[list]] = field(init=False) 34 | ROI_mask: Optional[np.ndarray] = field(init=False) 35 | use_ROI_mask: bool = field(init=False, default=True) 36 | 37 | 38 | @dataclass 39 | class FPPMeasurement: 40 | ''' 41 | Class to store FPP measurement data 42 | ''' 43 | phase_shifting_type: PhaseShiftingAlgorithm 44 | frequencies: list[float] 45 | shifts: list[float] 46 | 47 | camera_results: list[CameraMeasurement] = field(default_factory=lambda:list()) 48 | 49 | @property 50 | def frequency_counts(self) -> int: 51 | return len(self.frequencies) 52 | 53 | @property 54 | def shifts_count(self) -> int: 55 | return len(self.shifts) -------------------------------------------------------------------------------- /hand_set_up_camera.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import json 3 | 4 | 5 | def camera_adjust(camera): 6 | 7 | def on_focus_change(value): 8 | camera.focus = value 9 | 10 | def on_exposure_change(value): 11 | camera.exposure = value * -0.1 12 | 13 | def on_brightness_change(value): 14 | camera.brightness = value 15 | 16 | def on_gamma_change(value): 17 | camera.gamma = (value - 50) * 0.1 18 | 19 | cv2.namedWindow('cam', cv2.WINDOW_NORMAL) 20 | cv2.resizeWindow('cam', 800, 600) 21 | 22 | cv2.createTrackbar('Focus', 'cam', 0, 100, on_focus_change) 23 | cv2.createTrackbar('Exposure', 'cam', 0, 100, on_exposure_change) 24 | cv2.createTrackbar('Brightness', 'cam', 0, 200, on_brightness_change) 25 | cv2.createTrackbar('Gamma', 'cam', 0, 100, on_gamma_change) 26 | 27 | while (True): 28 | img = camera.get_image() 29 | cv2.imshow('cam', img) 30 | k = cv2.waitKey(1) 31 | if k == 27: 32 | break 33 | 34 | focus = cv2.getTrackbarPos('Focus', 'cam') 35 | exposure = cv2.getTrackbarPos('Focus', 'cam') * -0.1 36 | brightness = cv2.getTrackbarPos('Brightness', 'cam') 37 | gamma = (cv2.getTrackbarPos('Gamma', 'cam') - 50) * 0.1 38 | 39 | cv2.destroyAllWindows() 40 | return focus, exposure, brightness, gamma 41 | 42 | 43 | def camera_baumer_adjust(camera): 44 | 45 | def on_exposure_change(value): 46 | camera.exposure = 5000 + 150 * value 47 | 48 | def on_gamma_change(value): 49 | camera.gamma = 1 + (value - 50) * 0.01 50 | 51 | def on_gain_change(value): 52 | camera.gain = 1 + 0.02 * value 53 | 54 | cv2.namedWindow('cam', cv2.WINDOW_NORMAL) 55 | cv2.resizeWindow('cam', 800, 600) 56 | 57 | cv2.createTrackbar('Exposure', 'cam', 0, 200, on_exposure_change) 58 | cv2.createTrackbar('Gain', 'cam', 0, 100, on_gain_change) 59 | cv2.createTrackbar('Gamma', 'cam', 0, 100, on_gamma_change) 60 | 61 | while True: 62 | img = camera.get_image() 63 | cv2.imshow('cam', img) 64 | k = cv2.waitKey(1) 65 | if k == 27: 66 | break 67 | 68 | # cv2.destroyAllWindows() 69 | 70 | exposure = 5000 + 150 * cv2.getTrackbarPos('Exposure', 'cam') 71 | gamma = 1 + (cv2.getTrackbarPos('Gamma', 'cam') - 50) * 0.01 72 | gain = 1 + 0.02 * cv2.getTrackbarPos('Gain', 'cam') 73 | 74 | cv2.destroyAllWindows() 75 | return exposure, gamma, gain -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | import json 5 | from datetime import datetime 6 | from typing import List, Tuple 7 | 8 | import cv2 9 | import numpy as np 10 | from scipy import optimize 11 | from matplotlib import pyplot as plt 12 | 13 | import config 14 | from camera import Camera 15 | from projector import Projector 16 | from camera_web import CameraWeb 17 | from camera_baumer import CameraBaumer 18 | from camera_simulated import CameraSimulated 19 | from create_patterns import create_psp_templates 20 | from hand_set_up_camera import camera_adjust, camera_baumer_adjust 21 | from min_max_projector_calibration import MinMaxProjectorCalibration 22 | from fpp_structures import FPPMeasurement, PhaseShiftingAlgorithm, CameraMeasurement 23 | 24 | 25 | from examples.test_plate_phasogrammetry import process_with_phasogrammetry 26 | 27 | def initialize_cameras( 28 | camera_type: str, 29 | projector: Projector=None, 30 | cam_to_found_number: int = 2, 31 | cameras_serial_numbers: List[str] = [] 32 | ) -> list[Camera]: 33 | ''' 34 | Search for connected cameras of specified type and links them with projector instance, returns list of detected cameras 35 | 36 | Args: 37 | camera_type (str): type of cameras to search 38 | projector (Projector): porjector instance to link with cameras instancies 39 | cam_to_found_number (int): number of cameras to search 40 | cameras_serial_numbers (List[str]): list of cameras' serial numbers to search 41 | 42 | Returns: 43 | cameras (list[camera]): list of detected cameras 44 | ''' 45 | if camera_type == 'web': 46 | cameras = CameraWeb.get_available_cameras(cam_to_found_number) 47 | elif camera_type == 'baumer': 48 | cameras = CameraBaumer.get_available_cameras(cam_to_found_number, cameras_serial_numbers) 49 | elif camera_type == 'simulated': 50 | cameras = CameraSimulated.get_available_cameras(cam_to_found_number) 51 | # Set projector for simulated cameras 52 | if projector is not None: 53 | for camera in cameras: 54 | camera.projector = projector 55 | return cameras 56 | 57 | 58 | def adjust_cameras(cameras: list[Camera]) -> None: 59 | ''' 60 | Adjust camera capture parameters (focus length, exposure time, etc) 61 | with visual control 62 | ''' 63 | for i, camera in enumerate(cameras): 64 | if camera.type == "web": 65 | camera_adjust(camera) 66 | elif camera.type == "baumer": 67 | exposure, gamma, gain = camera_baumer_adjust(camera) 68 | config.CAMERA_EXPOSURE[i] = exposure 69 | config.CAMERA_GAIN[i] = gain 70 | config.CAMERA_GAMMA[i] = gamma 71 | # Save calibration data to file 72 | config.save_calibration_data() 73 | 74 | 75 | def calibrate_projector(cameras: list[Camera], projector: Projector) -> None: 76 | ''' 77 | Сalibrate projector image with gamma correction 78 | 79 | Args: 80 | cameras (list[Camera]): list of available cameras to capture measurement images 81 | projector (Projector): porjector to project patterns 82 | ''' 83 | brightness, _ = get_brightness_vs_intensity(cameras, projector, use_correction=False) 84 | 85 | # Calculate gamma coeficient 86 | # Mare intensity linsapce 87 | intensity = np.linspace(0, np.max(brightness), len(brightness)) 88 | 89 | # Find saturation level 90 | saturation_level = 0.95 91 | k = 0 92 | for i in range(len(intensity)): 93 | if brightness[i] > np.max(brightness) * saturation_level: 94 | k = k + 1 95 | if k > 3: 96 | saturation = i - 2 97 | break 98 | 99 | # Reduce sequency to saturation level 100 | int_reduced = intensity[:saturation] 101 | brt_reduced = brightness[:saturation] 102 | 103 | # Gamma function to fit 104 | lam = lambda x, a, b, c: a * (x + c) ** b 105 | 106 | # Fit gamma function parameters for reduced brightness vs intensity sequence 107 | popt, pcov = optimize.curve_fit(lam, int_reduced, brt_reduced, p0=(1, 1, 1)) 108 | print( 109 | f"Fitted gamma function - Iout = {popt[0]:.3f} * (Iin + {popt[2]:.3f}) ^ {popt[1]:.3f}" 110 | ) 111 | 112 | # Draw fitted gamma function 113 | gg = lam(intensity, *popt) 114 | 115 | plt.plot(intensity, brightness, "b+") 116 | plt.plot(intensity, gg, "r-") 117 | plt.xlabel("Intensity, relative units") 118 | plt.ylabel("Brightness, relative units") 119 | plt.xlim((0, 1)) 120 | plt.ylim((0, 1)) 121 | plt.grid() 122 | plt.show() 123 | 124 | # Store new gamma correction coefficients 125 | config.PROJECTOR_GAMMA_A = popt[0] 126 | config.PROJECTOR_GAMMA_B = popt[1] 127 | config.PROJECTOR_GAMMA_C = popt[2] 128 | config.save_calibration_data() 129 | 130 | # Check gamma correction 131 | brt_corrected, _ = get_brightness_vs_intensity( 132 | cameras, projector, use_correction=True 133 | ) 134 | 135 | # Draw corrected brightness vs intensity 136 | plt.plot(intensity, brt_corrected, "b+") 137 | plt.xlabel("Intensity, relative units") 138 | plt.ylabel("Brightness, relative units") 139 | plt.xlim((0, 1)) 140 | plt.ylim((0, 1)) 141 | plt.grid() 142 | plt.show() 143 | 144 | 145 | def get_brightness_vs_intensity(cameras : List[Camera], projector: Projector, use_correction: bool) -> Tuple(List[float], List[float]): 146 | ''' 147 | Get brightness vs intensity dependence by projecting constant intensity 148 | on screen and capture images with cameras. Brightness is averaged in small 149 | region for several captured images. 150 | 151 | Args: 152 | cameras (list[Camera]): list of available cameras to capture measurement images 153 | projector (Projector): porjector to project patterns 154 | use_correction (bool): use correction to project patterns 155 | ''' 156 | cv2.namedWindow('cam1', cv2.WINDOW_NORMAL) 157 | cv2.resizeWindow('cam1', 600, 400) 158 | cv2.namedWindow('cam2', cv2.WINDOW_NORMAL) 159 | cv2.resizeWindow('cam2', 600, 400) 160 | 161 | # TODO: Add parameters to config 162 | win_size_x = 50 163 | win_size_y = 50 164 | max_intensity = 1024 165 | average_num = 5 166 | border_width = 20 167 | 168 | projector.set_up_window() 169 | 170 | # TODO: Make generic to number of cameras 171 | brightness1 = [] 172 | brightness2 = [] 173 | 174 | # Make thin black and white borders 175 | image = np.zeros((projector.height, projector.width)) 176 | image[border_width:-border_width, border_width:-border_width] = max_intensity 177 | 178 | temp_img = cameras[0].get_image() 179 | 180 | for intensity in range(max_intensity): 181 | image[2 * border_width: -2 * border_width, 2 * border_width: -2 * border_width] = intensity / max_intensity 182 | projector.project_pattern(image, use_correction) 183 | 184 | img1 = np.zeros(temp_img.shape, dtype=np.float64) 185 | img2 = np.zeros(temp_img.shape, dtype=np.float64) 186 | 187 | for _ in range(average_num): 188 | cv2.waitKey(config.MEASUREMENT_CAPTURE_DELAY) 189 | 190 | img1 = img1 + cameras[0].get_image() 191 | img2 = img2 + cameras[1].get_image() 192 | 193 | img1 = img1 / average_num 194 | img2 = img2 / average_num 195 | roi_x = slice(int(img1.shape[1] / 2 - win_size_x), int(img1.shape[1] / 2 + win_size_x)) 196 | roi_y = slice(int(img1.shape[0] / 2 - win_size_y), int(img1.shape[0] / 2 + win_size_y)) 197 | brt1 = np.mean(img1[roi_y, roi_x]) / max_intensity 198 | brt2 = np.mean(img2[roi_y, roi_x]) / max_intensity 199 | 200 | brightness1.append(brt1) 201 | brightness2.append(brt2) 202 | 203 | img_to_display1 = img1.astype(np.uint16) 204 | cv2.rectangle( 205 | img_to_display1, 206 | (roi_x.start, roi_y.start), 207 | (roi_x.stop, roi_y.stop), 208 | (255, 0, 0), 3, 209 | ) 210 | cv2.putText( 211 | img_to_display1, 212 | f"{intensity = }", 213 | (50, 50), 214 | cv2.FONT_HERSHEY_PLAIN, 215 | 5, (255, 0, 0), 2, 216 | ) 217 | cv2.putText( 218 | img_to_display1, 219 | f"Brightness = {brt1:.3f}", 220 | (50, 100), 221 | cv2.FONT_HERSHEY_PLAIN, 222 | 5, (255, 0, 0), 2, 223 | ) 224 | cv2.imshow('cam1', img_to_display1) 225 | 226 | img_to_display2 = img2.astype(np.uint16) 227 | cv2.rectangle( 228 | img_to_display2, 229 | (roi_x.start, roi_y.start), 230 | (roi_x.stop, roi_y.stop), 231 | (255, 0, 0), 3, 232 | ) 233 | cv2.putText( 234 | img_to_display2, 235 | f"{intensity = }", 236 | (50, 50), 237 | cv2.FONT_HERSHEY_PLAIN, 238 | 5, (255, 0, 0), 2, 239 | ) 240 | cv2.putText( 241 | img_to_display2, 242 | f"Brightness = {brt2:.3f}", 243 | (50, 100), 244 | cv2.FONT_HERSHEY_PLAIN, 245 | 5, (255, 0, 0), 2, 246 | ) 247 | cv2.imshow('cam2', img_to_display2) 248 | 249 | projector.close_window() 250 | cv2.destroyWindow('cam1') 251 | cv2.destroyWindow('cam2') 252 | 253 | return brightness1, brightness2 254 | 255 | 256 | def capture_measurement_images( 257 | cameras: List[Camera], 258 | projector: Projector, 259 | phase_shift_type: PhaseShiftingAlgorithm = PhaseShiftingAlgorithm.n_step 260 | ) -> FPPMeasurement: 261 | ''' 262 | Do fringe projection measurement. Generate pattern, project them via projector and capture images with cameras. 263 | 264 | Args: 265 | cameras (list[Camera]): list of available cameras to capture measurement images 266 | projector (Projector): porjector to project patterns 267 | vertical (bool): create vertical patterns, if False create horizontal 268 | 269 | Returns: 270 | meas (FPPMeasurement): measurement for first and second camera 271 | ''' 272 | # Create OpenCV GUI windows to show captured images 273 | cv2.namedWindow('cam1', cv2.WINDOW_NORMAL) 274 | cv2.resizeWindow('cam1', 600, 400) 275 | cv2.namedWindow('cam2', cv2.WINDOW_NORMAL) 276 | cv2.resizeWindow('cam2', 600, 400) 277 | 278 | shift_num = 4 279 | frequencies = [1, 4, 12, 48, 90] 280 | 281 | # Create phase shift profilometry patterns 282 | patterns_v, _ = create_psp_templates( 283 | config.PROJECTOR_WIDTH, 284 | config.PROJECTOR_HEIGHT, 285 | frequencies, 286 | phase_shift_type, 287 | shifts_number=shift_num, 288 | vertical=True, 289 | ) 290 | patterns_h, phase_shifts = create_psp_templates( 291 | config.PROJECTOR_WIDTH, 292 | config.PROJECTOR_HEIGHT, 293 | frequencies, 294 | phase_shift_type, 295 | shifts_number=shift_num, 296 | vertical=False, 297 | ) 298 | 299 | patterns_vh = {'vertical': patterns_v, 'horizontal': patterns_h} 300 | 301 | cam_results = [ 302 | CameraMeasurement(fringe_orientation='vertical'), 303 | CameraMeasurement(fringe_orientation='vertical'), 304 | CameraMeasurement(fringe_orientation='horizontal'), 305 | CameraMeasurement(fringe_orientation='horizontal'), 306 | ] 307 | 308 | # Create FPPMeasurement instance with results 309 | meas = FPPMeasurement(phase_shift_type, frequencies, phase_shifts, cam_results) 310 | 311 | # Create folders to save measurement results if defined in config 312 | if config.SAVE_MEASUREMENT_IMAGE_FILES: 313 | measure_name = f'{datetime.now():%d-%m-%Y_%H-%M-%S}' 314 | last_measurement_path = f'{config.DATA_PATH}/{measure_name}' 315 | os.makedirs(f'{last_measurement_path}/') 316 | os.makedirs(f'{last_measurement_path}/{config.CAMERAS_FOLDER_NAMES[0]}/') 317 | os.makedirs(f'{last_measurement_path}/{config.CAMERAS_FOLDER_NAMES[1]}/') 318 | 319 | # Set up projector 320 | projector.set_up_window() 321 | 322 | for res1, res2 in ((cam_results[0], cam_results[1]), (cam_results[2], cam_results[3])): 323 | 324 | orientation = res1.fringe_orientation 325 | patterns = patterns_vh[orientation] 326 | 327 | # Iter thru generated patterns 328 | for i in range(len(patterns)): 329 | 330 | if config.SAVE_MEASUREMENT_IMAGE_FILES: 331 | res1.imgs_file_names.append([]) 332 | res2.imgs_file_names.append([]) 333 | else: 334 | res1.imgs_list.append([]) 335 | res2.imgs_list.append([]) 336 | 337 | for j in range(len(patterns[i])): 338 | projector.project_pattern(patterns[i][j]) 339 | 340 | # Capture one frame before measurement for wecams 341 | if cameras[0].type == 'web': 342 | cameras[0].get_image() 343 | if cameras[1].type == 'web': 344 | cameras[1].get_image() 345 | 346 | # Wait delay time before pattern projected and images captures 347 | cv2.waitKey(config.MEASUREMENT_CAPTURE_DELAY) 348 | 349 | # Capture images 350 | frames_1 = [] 351 | frames_2 = [] 352 | for _ in range(1): 353 | frames_1.append(cameras[0].get_image()) 354 | frames_2.append(cameras[1].get_image()) 355 | 356 | frame_1 = np.mean(frames_1, axis=0).astype(np.uint8) 357 | frame_2 = np.mean(frames_2, axis=0).astype(np.uint8) 358 | 359 | cv2.imshow('cam1', frame_1) 360 | cv2.imshow('cam2', frame_2) 361 | 362 | # Save images if defined in config 363 | if config.SAVE_MEASUREMENT_IMAGE_FILES: 364 | filename1 = f'{last_measurement_path}/{config.CAMERAS_FOLDER_NAMES[0]}/' + config.IMAGES_FILENAME_MASK.format(i, j, orientation) 365 | filename2 = f'{last_measurement_path}/{config.CAMERAS_FOLDER_NAMES[1]}/' + config.IMAGES_FILENAME_MASK.format(i, j, orientation) 366 | saved1 = cv2.imwrite(filename1, frame_1) 367 | saved2 = cv2.imwrite(filename2, frame_2) 368 | 369 | # Store saved images filenames 370 | if saved1 and saved2: 371 | res1.imgs_file_names[-1].append(filename1) 372 | res2.imgs_file_names[-1].append(filename2) 373 | else: 374 | raise Exception('Error during image saving!') 375 | else: 376 | res1.imgs_list[-1].append(frame_1) 377 | res2.imgs_list[-1].append(frame_2) 378 | 379 | # Stop projector 380 | projector.close_window() 381 | 382 | # Close OpenCV GUI windows 383 | cv2.destroyWindow('cam1') 384 | cv2.destroyWindow('cam2') 385 | 386 | # Save results of measurement in json file if defined in config 387 | if config.SAVE_MEASUREMENT_IMAGE_FILES: 388 | with open(f'{last_measurement_path}/' + config.MEASUREMENT_FILENAME_MASK.format(measure_name), 'x') as f: 389 | json.dump(meas, f, ensure_ascii=False, indent=4, default=vars) 390 | config.LAST_MEASUREMENT_PATH = last_measurement_path 391 | config.save_calibration_data() 392 | 393 | return meas 394 | 395 | 396 | if __name__ == '__main__': 397 | 398 | projector = Projector( 399 | config.PROJECTOR_WIDTH, 400 | config.PROJECTOR_HEIGHT, 401 | config.PROJECTOR_MIN_BRIGHTNESS, 402 | config.PROJECTOR_MAX_BRIGHTNESS, 403 | ) 404 | 405 | cameras = initialize_cameras(config.CAMERA_TYPE, projector, cam_to_found_number=2) 406 | 407 | choices = {i for i in range(6)} 408 | 409 | while True: 410 | print(f"Connected {len(cameras)} camera(s)") 411 | print("==========================================================") 412 | print("1 - Adjust cameras") 413 | print("2 - Projector gamma correction calibration") 414 | print("3 - Check brightness profile") 415 | print("4 - Take measurements") 416 | print("==========================================================") 417 | print("0 - Exit script") 418 | answer = input("Type something from the suggested list above: ") 419 | 420 | try: 421 | if int(answer) not in choices: 422 | raise Exception() 423 | except: 424 | continue 425 | else: 426 | choice = int(answer) 427 | 428 | if choice == 0: 429 | break 430 | 431 | elif choice == 1: 432 | adjust_cameras(cameras) 433 | 434 | elif choice == 2: 435 | calibrate_projector(cameras, projector) 436 | 437 | elif choice == 3: 438 | frequencies = [1, 4, 16, 64, 100, 120] 439 | test_pattern, _ = create_psp_templates( 440 | config.PROJECTOR_WIDTH, 441 | config.PROJECTOR_HEIGHT, 442 | frequencies, 443 | PhaseShiftingAlgorithm.n_step, 444 | 1, 445 | vertical=False, 446 | ) 447 | MinMaxProjectorCalibration(test_pattern, cameras, projector) 448 | 449 | elif choice == 4: 450 | measurement = capture_measurement_images( 451 | cameras, projector, phase_shift_type=PhaseShiftingAlgorithm.n_step 452 | ) 453 | process_with_phasogrammetry(measurement) 454 | -------------------------------------------------------------------------------- /min_max_projector_calibration.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import json 3 | matplotlib.use('TkAgg') 4 | # from numpy import arange, sin, pi 5 | from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk 6 | # Implement the default mpl key bindings 7 | from matplotlib.backend_bases import key_press_handler 8 | 9 | 10 | from matplotlib.figure import Figure 11 | 12 | import sys 13 | if sys.version_info[0] < 3: 14 | import Tkinter as Tk 15 | else: 16 | import tkinter as Tk 17 | from tkinter import ttk 18 | 19 | import numpy as np 20 | import cv2 21 | 22 | 23 | def MinMaxProjectorCalibration(patterns, cameras, projector): 24 | end = False 25 | pattern_num = 0 26 | 27 | def quit(root): 28 | nonlocal end 29 | end = True 30 | root.quit() # Stops mainloop 31 | root.destroy() # This is necessary on Windows to prevent 32 | # Fatal Python Error: PyEval_RestoreThread: NULL tstate 33 | 34 | root = Tk.Tk() 35 | root.wm_title("Embedding in TK") 36 | 37 | # Callback function to show next pattern 38 | def next_pattern(root): 39 | nonlocal pattern_num 40 | pattern_num = pattern_num + 1 41 | if pattern_num == len(patterns): 42 | pattern_num = 0 43 | 44 | f = Figure(figsize=(5, 4), dpi=100) 45 | 46 | # A tk.DrawingArea 47 | canvas = FigureCanvasTkAgg(f, master=root) 48 | canvas.draw() 49 | canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) 50 | 51 | toolbar = NavigationToolbar2Tk(canvas, root) 52 | toolbar.update() 53 | canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) 54 | 55 | button = Tk.Button(master=root, text='Quit', command=lambda: quit(root)) 56 | button.pack(side=Tk.BOTTOM) 57 | button = Tk.Button(master=root, text='Next', command=lambda: next_pattern(root)) 58 | button.pack(side=Tk.BOTTOM) 59 | 60 | def slider_max_changed(event): 61 | projector.max_image_brightness = current_max_brightness.get() 62 | 63 | def slider_min_changed(event): 64 | projector.min_image_brightness = current_min_brightness.get() 65 | 66 | current_max_brightness = Tk.DoubleVar(value=projector.max_image_brightness) 67 | current_min_brightness = Tk.DoubleVar(value=projector.min_image_brightness) 68 | scale_max = Tk.Scale(root, orient="horizontal", 69 | from_=0, to=1.0, 70 | digits=4, 71 | resolution=0.01, 72 | variable=current_max_brightness, 73 | command=slider_max_changed, 74 | length=300) 75 | scale_min = Tk.Scale(root, orient="horizontal", 76 | from_=0, to=1.0, 77 | digits=4, 78 | resolution=0.01, 79 | variable=current_min_brightness, 80 | command=slider_min_changed, 81 | length=300) 82 | 83 | label_1 = Tk.Label(root, text="Max Brightness") 84 | label_2 = Tk.Label(root, text="Min Brightness") 85 | 86 | label_1.pack(side=Tk.TOP) 87 | scale_max.pack(side=Tk.TOP) 88 | label_2.pack(side=Tk.TOP) 89 | scale_min.pack(side=Tk.TOP) 90 | 91 | projector.set_up_window() 92 | 93 | while True: 94 | projector.project_pattern(patterns[pattern_num][0]) 95 | 96 | if cameras[0].type == 'web': 97 | _1 = cameras[0].get_image() 98 | if cameras[1].type == 'web': 99 | _2 = cameras[1].get_image() 100 | 101 | frame_1 = cameras[0].get_image() 102 | frame_2 = cameras[1].get_image() 103 | 104 | if cameras[0].type == 'web': 105 | frame_1 = cv2.cvtColor(frame_1, cv2.COLOR_BGR2GRAY) 106 | if cameras[1].type == 'web': 107 | frame_2 = cv2.cvtColor(frame_2, cv2.COLOR_BGR2GRAY) 108 | 109 | delta_height = 50 110 | ROI = slice(int(frame_1.shape[0] / 2 - delta_height), int(frame_1.shape[0] / 2 + delta_height)) 111 | 112 | a1 = f.add_subplot(221) 113 | a1.plot(np.mean(frame_1[ROI, :], axis=0)) 114 | # a1.set_ylim((0, 4096)) 115 | b1 = f.add_subplot(222) 116 | b1.imshow(frame_1) 117 | 118 | a2 = f.add_subplot(223) 119 | a2.plot(np.mean(frame_2[ROI, :], axis=0)) 120 | # a2.set_ylim((0, 4096)) 121 | b2 = f.add_subplot(224) 122 | b2.imshow(frame_2) 123 | root.update() 124 | 125 | if (end): 126 | # Save results of calibration 127 | with open('config.json') as f: 128 | data = json.load(f) 129 | 130 | data['projector']["min_brightness"] = projector.min_image_brightness 131 | data['projector']["max_brightness"] = projector.max_image_brightness 132 | 133 | with open('config.json', 'w') as f: 134 | json.dump(data, f, ensure_ascii=False, indent=4) 135 | 136 | projector.close_window() 137 | 138 | break 139 | canvas.draw() 140 | f.delaxes(a1) 141 | f.delaxes(b1) 142 | f.delaxes(a2) 143 | f.delaxes(b2) -------------------------------------------------------------------------------- /processing.py: -------------------------------------------------------------------------------- 1 | '''Module to process FPP images''' 2 | 3 | from __future__ import annotations 4 | import multiprocessing 5 | from multiprocessing import Pool 6 | from typing import Optional, Tuple 7 | 8 | import cv2 9 | import numpy as np 10 | from scipy import signal 11 | from scipy.optimize import fsolve 12 | 13 | import config 14 | from fpp_structures import FPPMeasurement, PhaseShiftingAlgorithm 15 | 16 | 17 | def calculate_phase_generic(images: list[np.ndarray], phase_shifts: Optional[list[float]]=None, frequency: Optional[float]=None, phase_shifting_type: PhaseShiftingAlgorithm = PhaseShiftingAlgorithm.n_step, direct_formula: bool = False) -> tuple[np.ndarray, np.ndarray, np.ndarray] : 18 | ''' 19 | Calculate wrapped phase from several Phase Shifting Profilometry images by 20 | generic formula (8) in https://doi.org/10.1016/j.optlaseng.2018.04.019 21 | 22 | Args: 23 | images (list of numpy arrays): the list of Phase Shifting Profilometry images 24 | phase_shifts = None (list): the list of phase shifts for each image from images, 25 | if phase_shifts is not defined, its calculated automatical for uniform step 26 | frequency = None (float): the frequency of measurement to add PI for unity frequency images 27 | phase_shifting_type = n_step (enum(int)): type of phase shifting algorithm should be used for phase calculating 28 | direct_formula = False (bool): use direct formulas to calculate phases for 3- and 4-step phase shifts 29 | 30 | Returns: 31 | result_phase (2D numpy array): wrapped phase from images 32 | average_intensity (2D numpy array): average intensity on images 33 | modulated_intensity (2D numpy array): modulated intensity on images 34 | ''' 35 | def calculate_n_step_phase(imgs: list[np.ndarray], phase_shifts: list[float]): 36 | # Use specific case for phase shifts length 37 | if direct_formula and len(phase_shifts) == 3: 38 | # Calculate formula (14-16) in https://doi.org/10.1016/j.optlaseng.2018.04.019 39 | sum12 = imgs[1] - imgs[2] 40 | sum012 = 2 * imgs[0] - imgs[1] - imgs[2] 41 | result_phase = np.arctan2(np.sqrt(3) * (sum12), sum012) 42 | average_intensity = (imgs[0] + imgs[1] + imgs[2]) / 3 43 | modulated_intensity = 1 / 3 * np.sqrt(3 * (sum12) ** 2 + (sum012) ** 2) 44 | elif direct_formula and len(phase_shifts) == 4: 45 | # Calculate formula (21-23) in https://doi.org/10.1016/j.optlaseng.2018.04.019 46 | sum13 = imgs[1] - imgs[3] 47 | sum02 = imgs[0] - imgs[2] 48 | result_phase = np.arctan2(sum13, sum02) 49 | average_intensity = (imgs[0] + imgs[1] + imgs[2] + imgs[3]) / 4 50 | modulated_intensity = 0.5 * np.sqrt(sum13**2 + sum02**2) 51 | else: 52 | # Reshape phase shifts for broadcasting multiplying 53 | phase_shifts = np.array(phase_shifts).reshape((-1,) + (1, 1)) 54 | 55 | # Add suplementary phase to get phase for unity frequency measurment 56 | phase_sup = 0 57 | if frequency is not None and frequency == 1: 58 | phase_sup = np.pi 59 | 60 | # Calculate formula (8) in https://doi.org/10.1016/j.optlaseng.2018.04.019 61 | temp1 = np.multiply(imgs, np.sin(phase_shifts + phase_sup)) 62 | temp2 = np.multiply(imgs, np.cos(phase_shifts + phase_sup)) 63 | 64 | sum1 = np.sum(temp1, 0) 65 | sum2 = np.sum(temp2, 0) 66 | 67 | result_phase = np.arctan2(sum1, sum2) 68 | 69 | # Calculate formula (9-10) in https://doi.org/10.1016/j.optlaseng.2018.04.019 70 | average_intensity = np.mean(imgs, 0) 71 | modulated_intensity = 2 * np.sqrt(np.power(sum1, 2) + np.power(sum2, 2)) / len(images) 72 | return result_phase, average_intensity, modulated_intensity 73 | 74 | # Calculate shifts if its not defined 75 | if phase_shifts is None: 76 | phase_shifts = [2 * np.pi / len(images) * n for n in range(len(images))] 77 | 78 | # Form numpy array for broadcasting 79 | imgs = np.zeros((len(images), images[0].shape[0], images[0].shape[1])) 80 | 81 | # Add images to formed numpy array 82 | for i in range(len(images)): 83 | imgs[i] = images[i] 84 | 85 | # Depending on phase shift algorithm calculate wrapped phase field 86 | if phase_shifting_type == PhaseShiftingAlgorithm.n_step: 87 | # Classic N-step approach 88 | result_phase, average_intensity, modulated_intensity = calculate_n_step_phase(images, phase_shifts) 89 | elif phase_shifting_type == PhaseShiftingAlgorithm.double_three_step: 90 | # Double three-step approach - average of two 3-step phases (second shifted by PI/3) 91 | # Calculate formula (26-31) from section 3.2 in https://doi.org/10.1016/j.optlaseng.2018.04.019 92 | result_phase1, average_intensity1, modulated_intensity1 = calculate_n_step_phase(imgs[:3,:,:], phase_shifts[:3]) 93 | result_phase2, average_intensity2, modulated_intensity2 = calculate_n_step_phase(imgs[3:,:,:], phase_shifts[3:]) 94 | 95 | result_phase = (result_phase1 + result_phase2) / 2 96 | average_intensity = (average_intensity1 + average_intensity2) / 2 97 | modulated_intensity = (modulated_intensity1 + modulated_intensity2) / 2 98 | 99 | return result_phase, average_intensity, modulated_intensity 100 | 101 | 102 | def calculate_unwraped_phase(phase_l: np.ndarray, phase_h: np.ndarray, lamb_l:float , lamb_h: float) -> np.ndarray: 103 | ''' 104 | Calculate unwrapped phase from two sets of Phase Shifting Profilometry images by 105 | formula (94-95) in https://doi.org/10.1016/j.optlaseng.2018.04.019 106 | with standard temporal phase unwrapping (TPU) algorithm 107 | 108 | Args: 109 | phase_l (2D numpy array): The calculated phase for set of PSP images with low frequency (lamb_l) 110 | phase_h (2D numpy array): The calculated phase for set of PSP images with high frequency (lamb_h) 111 | lamb_l (float): The low spatial frequency for first phase array (phase_l) 112 | lamb_h (float): The high spatial frequency for second phase array (phase_h) 113 | 114 | Returns: 115 | unwrapped_phase (2D numpy array): unwrapped phase 116 | ''' 117 | assert phase_h.shape == phase_l.shape, \ 118 | 'Shapes of phase_l and phase_h must be equals' 119 | 120 | # Formula (95) in https://doi.org/10.1016/j.optlaseng.2018.04.019 121 | k = np.round(((lamb_l / lamb_h) * phase_l - phase_h) / (2 * np.pi)).astype(int) 122 | 123 | # Formula (94) in https://doi.org/10.1016/j.optlaseng.2018.04.019 124 | unwrapped_phase = phase_h + 2 * np.pi * k 125 | 126 | return unwrapped_phase 127 | 128 | 129 | def calculate_phase_for_fppmeasurement(measurement: FPPMeasurement): 130 | ''' 131 | Calculate unwrapped phase for FPP measurement instance with the help 132 | of calculate_phase_generic and calculate_unwraped_phase functions. 133 | Calculated phase fields will be stored in input measurement argument. 134 | 135 | Args: 136 | measurement (FPPMeasurement): FPP measurement with images 137 | ''' 138 | # Load measurement data 139 | frequencies = measurement.frequencies 140 | shifts = measurement.shifts 141 | frequency_counts = len(measurement.frequencies) 142 | 143 | for cam_result in measurement.camera_results: 144 | # Empty lists for phase, unwrapped phase, average and modulated intensity 145 | phases = [] 146 | unwrapped_phases = [] 147 | avg_ints = [] 148 | mod_ints = [] 149 | 150 | # Get images for one camera 151 | images = cam_result.imgs_list 152 | 153 | # Calculate fields for each frequency 154 | for i in range(frequency_counts): 155 | 156 | images_for_one_frequency = images[i] 157 | 158 | phase, avg_int, mod_int = calculate_phase_generic(images_for_one_frequency, shifts, frequencies[i], phase_shifting_type=measurement.phase_shifting_type) 159 | 160 | # Filter phase field with threshold 161 | mask = np.where(mod_int > 5, 1, 0) 162 | phase = phase * mask 163 | 164 | phases.append(phase) 165 | avg_ints.append(avg_int) 166 | mod_ints.append(mod_int) 167 | 168 | if i == 0: 169 | # First phase field should be unit-frequency without ambiguity 170 | unwrapped_phases.append(phase) 171 | else: 172 | # Next phase fields should be unwrapped 173 | unwrapped_phase = calculate_unwraped_phase(unwrapped_phases[i-1], phases[i], 1 / frequencies[i-1], 1 / frequencies[i]) 174 | unwrapped_phases.append(unwrapped_phase) 175 | 176 | # Set current camera results instance with calculated fields 177 | cam_result.phases = phases 178 | cam_result.unwrapped_phases = unwrapped_phases 179 | cam_result.average_intensities = avg_ints 180 | cam_result.modulated_intensities = mod_ints 181 | 182 | 183 | def create_polygon(shape: Tuple[int], vertices: np.ndarray) -> np.ndarray: 184 | ''' 185 | Creates 2D numpy array with poligon defined by vertices. 186 | Points inside polygon fills with ones, all overs with zeros. 187 | 188 | From https://stackoverflow.com/a/37123933 189 | 190 | Args: 191 | shape (tuple of int): The shape of 2D numpy array to generate 192 | vertices (2D numpy array): The 2D numpy array with shape (N, 2) and coordinates of polygon vercities in it ([[x0, y0], ..., [xn, yn]]) 193 | 194 | Returns: 195 | base_array (2D numpy array): 2D numpy array with poligon filled by ones 196 | ''' 197 | 198 | def check(p1, p2, arr): 199 | """ 200 | Uses the line defined by p1 and p2 to check array of 201 | input indices against interpolated value 202 | 203 | Returns boolean array, with True inside and False outside of shape 204 | """ 205 | # Create 3D array of indices 206 | idxs = np.indices(arr.shape) 207 | 208 | p1 = p1[::-1].astype(float) 209 | p2 = p2[::-1].astype(float) 210 | 211 | # Calculate max column idx for each row idx based on interpolated line between two points 212 | if p1[0] == p2[0]: 213 | max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1] 214 | sign = np.sign(p2[1] - p1[1]) 215 | else: 216 | max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1] 217 | sign = np.sign(p2[0] - p1[0]) 218 | 219 | return idxs[1] * sign <= max_col_idx * sign 220 | 221 | base_array = np.zeros(shape, dtype=float) # Initialize your array of zeros 222 | 223 | # Initialize boolean array defining shape fill 224 | fill = np.ones(base_array.shape) * True 225 | 226 | # Create check array for each edge segment, combine into fill array 227 | for k in range(vertices.shape[0]): 228 | fill = np.all([fill, check(vertices[k - 1], vertices[k], base_array)], axis=0) 229 | 230 | # Set all values inside polygon to one 231 | base_array[fill] = 1 232 | 233 | return base_array 234 | 235 | 236 | def point_inside_polygon(x: int, y: int, poly: list[tuple(int, int)] , include_edges: bool = True) -> bool: 237 | ''' 238 | Test if point (x,y) is inside polygon poly 239 | 240 | Point is inside polygon if horisontal beam to the right 241 | from point crosses polygon even number of times. Works fine for non-convex polygons. 242 | From: https://stackoverflow.com/questions/39660851/deciding-if-a-point-is-inside-a-polygon 243 | 244 | Args: 245 | x (int): horizontal point coordinate 246 | y (int): vertical point coordinate 247 | poly (list[tuple(int, int)]): N-vertices polygon defined as [(x1,y1),...,(xN,yN)] or [(x1,y1),...,(xN,yN),(x1,y1)] 248 | 249 | Returns: 250 | inside (bool): if point inside the polygon 251 | ''' 252 | n = len(poly) 253 | 254 | inside = False 255 | 256 | p1x, p1y = poly[0] 257 | 258 | for i in range(1, n + 1): 259 | p2x, p2y = poly[i % n] 260 | if p1y == p2y: 261 | if y == p1y: 262 | if min(p1x, p2x) <= x <= max(p1x, p2x): 263 | # point is on horisontal edge 264 | inside = include_edges 265 | break 266 | # point is to the left from current edge 267 | elif x < min(p1x, p2x): 268 | inside = not inside 269 | else: # p1y!= p2y 270 | if min(p1y, p2y) <= y <= max(p1y, p2y): 271 | xinters = (y - p1y) * (p2x - p1x) / float(p2y - p1y) + p1x 272 | 273 | # point is right on the edge 274 | if x == xinters: 275 | inside = include_edges 276 | break 277 | 278 | # point is to the left from current edge 279 | if x < xinters: 280 | inside = not inside 281 | 282 | p1x, p1y = p2x, p2y 283 | 284 | return inside 285 | 286 | 287 | def triangulate_points(calibration_data: dict, image1_points: np.ndarray, image2_points: np.ndarray) -> tuple[np.ndarray, float, float, np.ndarray, np.ndarray]: 288 | ''' 289 | Triangulate two set of 2D point in one set of 3D points 290 | 291 | Args: 292 | calibration_data (dictionary): calibration data used for triangulating 293 | image1_points (numpy arrray [N, 2]): first set of 2D points 294 | image2_points (numpy arrray [N, 2]): second set of 2D points 295 | Returns: 296 | points_3d (numpy arrray [N, 3]): triangulated 3D points 297 | rms1 (float): overall reprojection error for first camera 298 | rms2 (float): overall reprojection error for second camera 299 | reproj_err1, reproj_err2 (numpy arrray [N]): reprojected error for each triangulated point for first and second camera 300 | ''' 301 | # Calculate the projective matrices according to the stereo calibration data 302 | cam1_mtx = np.array(calibration_data['camera_0']['mtx']) 303 | cam2_mtx = np.array(calibration_data['camera_1']['mtx']) 304 | dist1_mtx = np.array(calibration_data['camera_0']['dist']) 305 | dist2_mtx = np.array(calibration_data['camera_1']['dist']) 306 | 307 | # Calculate projective matrices for cameras 308 | proj_mtx_1 = np.dot(cam1_mtx, np.hstack((np.identity(3), np.zeros((3,1))))) 309 | proj_mtx_2 = np.dot(cam2_mtx, np.hstack((calibration_data['R'], calibration_data['T']))) 310 | 311 | # Undistort 2d points 312 | points_2d_1 = np.array(image1_points, dtype=np.float32) 313 | points_2d_2 = np.array(image2_points, dtype=np.float32) 314 | undist_points_2d_1 = cv2.undistortPoints(points_2d_1, cam1_mtx, dist1_mtx, P=cam1_mtx) 315 | undist_points_2d_2 = cv2.undistortPoints(points_2d_2, cam2_mtx, dist2_mtx, P=cam2_mtx) 316 | 317 | # Calculate the triangulation of 3D points 318 | points_hom = cv2.triangulatePoints(proj_mtx_1, proj_mtx_2, undist_points_2d_1, undist_points_2d_2) 319 | points_3d = cv2.convertPointsFromHomogeneous(points_hom.T) 320 | 321 | points_3d = np.reshape(points_3d, (points_3d.shape[0], points_3d.shape[2])) 322 | 323 | # Reproject triangulated points 324 | reproj_points, _ = cv2.projectPoints(points_3d, np.identity(3), np.zeros((3,1)), cam1_mtx, dist1_mtx) 325 | reproj_points2, _ = cv2.projectPoints(points_3d, np.array(calibration_data['R']), np.array(calibration_data['T']), cam2_mtx, dist2_mtx) 326 | 327 | # Calculate reprojection error 328 | reproj_err1 = np.sum(np.square(points_2d_1[:,np.newaxis,:] - reproj_points), axis=2) 329 | rms1 = np.sqrt(np.sum(reproj_err1)/reproj_points.shape[0]) 330 | 331 | reproj_err2 = np.sum(np.square(points_2d_2[:,np.newaxis,:] - reproj_points2), axis=2) 332 | rms2 = np.sqrt(np.sum(reproj_err2)/reproj_points.shape[0]) 333 | 334 | reproj_err1 = np.reshape(reproj_err1, (reproj_err1.shape[0])) 335 | reproj_err2 = np.reshape(reproj_err2, (reproj_err2.shape[0])) 336 | 337 | return points_3d, rms1, rms2, reproj_err1, reproj_err2 338 | 339 | 340 | def calculate_bilinear_interpolation_coeficients(points: tuple[tuple]) -> np.ndarray: 341 | ''' 342 | Calculate coeficients for bilinear interploation of 2d data. Bilinear interpolation is defined as 343 | polinomal fit f(x0, y0) = a0 + a1 * x0 + a2 * y0 + a3 * x0 * y0. Equations is used from wiki: 344 | https://en.wikipedia.org/wiki/Bilinear_interpolation 345 | 346 | Args: 347 | points (tuple[tuple]): four elements in format (x, y, f(x, y)) 348 | 349 | Returns: 350 | bilinear_coeficients (numpy array): four coeficients for bilinear interploation for input points 351 | ''' 352 | # Sort points 353 | points = sorted(points) 354 | 355 | # Get x, y coordinates and values for this points 356 | (x1, y1, q11), (_, y2, q12), (x2, _, q21), (_, _, q22) = points 357 | 358 | # Get matrix A 359 | A = np.array( 360 | [ 361 | [x2 * y2, -x2 * y1, -x1 * y2, x1 * y1], 362 | [-y2, y1, y2, -y1], 363 | [-x2, x2, x1, -x1], 364 | [1, -1, -1, 1], 365 | ] 366 | ) 367 | 368 | # Get vector B 369 | B = np.array([q11, q12, q21, q22]) 370 | 371 | # Calculate coeficients for bilinear interploation 372 | bilinear_coeficients = (1 / ((x2 - x1) * (y2 - y1))) * A.dot(B) 373 | return bilinear_coeficients 374 | 375 | 376 | def bilinear_phase_fields_approximation(p: tuple[float, float], *data: tuple) -> tuple[float, float]: 377 | ''' 378 | Calculate residiuals for bilinear interploation of horizontal and vertical phase fields. 379 | Function is used in find_phasogrammetry_corresponding_point fsolve function. 380 | 381 | Args: 382 | p (tuple[float, float]): x and y coordinates of point in which residiual is calculated 383 | data (tuple): data to calculate residiuals 384 | - a (numpy array): four coeficients which defines linear interploation for horizontal phase field 385 | - b (numpy array): four coeficients which defines linear interploation for vertical phase field 386 | - p_h (float): horizontal phase to match in interplotated field 387 | - p_v (float): vertical phase to match in interplotated field 388 | 389 | Returns: 390 | res_h, res_v (tuple[float, float]): residiuals for horizontal and vertical field in point (x, y) 391 | ''' 392 | x, y = p 393 | 394 | a, b, p_h, p_v = data 395 | 396 | return ( 397 | a[0] + a[1] * x + a[2] * y + a[3] * x * y - p_h, 398 | b[0] + b[1] * x + b[2] * y + b[3] * x * y - p_v, 399 | ) 400 | 401 | 402 | def find_phasogrammetry_corresponding_point(p1_h: np.ndarray, p1_v: np.ndarray, p2_h: np.ndarray, p2_v: np.ndarray, x: int, y: int, LUT:list[list[list[int]]]=None) -> tuple[float, float]: 403 | ''' 404 | Finds the corresponding point coordinates for the second image using the phasogrammetry approach 405 | 406 | For the given coordinates x and y, the phase values on the fields for the vertical and horizontal fringes 407 | for the images of the first camera are determined. Then two isolines with defined values of the phase on 408 | the corresponding fields for the second camera are found. The intersection of the isolines gives the 409 | coordinates of the corresponding point on the image from the second camera. 410 | 411 | Args: 412 | p1_h (numpy array): phase field for horizontal fringes for first camera 413 | p1_v (numpy array): phase field for vertical fringes for first camera 414 | p2_h (numpy array): phase field for horizontal fringes for second camera 415 | p2_v (numpy array): phase field for vertical fringes for second camera 416 | x (int): horizontal coordinate of point for first camera 417 | y (int): vertical coordinate of point for first camera 418 | 419 | Returns: 420 | x2, y2 (tuple[float, float]): horizontal and vertical coordinate of corresponding point for second camera 421 | ''' 422 | # Get the phase values on vertical and horizontal phase fields 423 | phase_h = p1_h[y, x] 424 | phase_v = p1_v[y, x] 425 | 426 | retval = [np.inf, np.inf] 427 | 428 | # If LUT available calculate corresponding points with it 429 | if LUT is not None: 430 | # Get value for x, y coordinate from LUT as first approximation 431 | try: 432 | phase_h_index = np.argmin(np.abs(LUT[-2] - phase_h)) 433 | phase_v_index = np.argmin(np.abs(LUT[-1] - phase_v)) 434 | except: 435 | # phases values not found in LUT 436 | return -1, -1, retval 437 | 438 | cor_points = LUT[phase_v_index][phase_h_index] 439 | cor_points = np.array(cor_points) 440 | 441 | if len(cor_points) > 0 and len(cor_points) < 20: 442 | # Get mean value for x, y coordinate for points from LUT as second approximation 443 | x0, y0 = np.mean(cor_points, axis=0) 444 | # Get x, y coordinate from point with minimum phase difference as second approximation 445 | p2_h_d = np.abs(p2_h[cor_points[:,1], cor_points[:,0]] - phase_h) 446 | p2_v_d = np.abs(p2_v[cor_points[:,1], cor_points[:,0]] - phase_v) 447 | x0, y0 = cor_points[np.argmin(p2_h_d + p2_v_d)] 448 | 449 | iter_num = 0 450 | 451 | # Iterate thru variants of x and y where fields are near to phase_v and phase_h 452 | while iter_num < 5: 453 | # Get neareast coords to current values of x and y 454 | if int(np.round(x0)) - x0 == 0: 455 | x1 = int(x0 - 1) 456 | x2 = int(x0 + 1) 457 | else: 458 | x1 = int(np.floor(x0)) 459 | x2 = int(np.ceil(x0)) 460 | 461 | if int(np.round(y0)) - y0 == 0: 462 | y1 = int(y0 - 1) 463 | y2 = int(y0 + 1) 464 | else: 465 | y1 = int(np.floor(y0)) 466 | y2 = int(np.ceil(y0)) 467 | 468 | # Check if coords are on field (are positive and less than field shape) 469 | if x1 > 0 and x2 > 0 and y1 > 0 and y2 > 0 and x1 < p1_h.shape[1] and x2 < p1_h.shape[1] and y1 < p1_h.shape[0] and y2 < p1_h.shape[0]: 470 | 471 | # Get coeficients for bilinear interploation for horizontal phase 472 | aa = calculate_bilinear_interpolation_coeficients(((x1, y1, p2_h[y1, x1]), (x1, y2, p2_h[y2, x1]), 473 | (x2, y2, p2_h[y2, x2]), (x2, y1, p2_h[y2, x1]))) 474 | # Get coeficients for bilinear interploation for vertical phase 475 | bb = calculate_bilinear_interpolation_coeficients(((x1, y1, p2_v[y1, x1]), (x1, y2, p2_v[y2, x1]), 476 | (x2, y2, p2_v[y2, x2]), (x2, y1, p2_v[y2, x1]))) 477 | 478 | # Find there bilinear interploation is equal to phase_h and phase_v 479 | x0, y0 = fsolve(bilinear_phase_fields_approximation, (x1, y1), args=(aa, bb, phase_h, phase_v)) 480 | # x0, y0 = minimize( 481 | # bilinear_phase_fields_approximation, 482 | # (x0, y0), 483 | # args=(aa, bb, phase_h, phase_v), 484 | # bounds=Bounds([x1, y1], [x2, y2]), 485 | # method="Powell", 486 | # ).x 487 | 488 | # Calculate residiuals 489 | h_res, v_res = bilinear_phase_fields_approximation((x0, y0), aa, bb, phase_h, phase_v) 490 | 491 | # Check if x and y are between x1, x2, y1 and y2 492 | if x2 >= x0 >= x1 and y2 >= y0 >= y1: 493 | return x0, y0, [h_res, v_res] 494 | else: 495 | iter_num = iter_num + 1 496 | else: 497 | return -1, -1, [np.inf, np.inf] 498 | 499 | return -1, -1, [np.inf, np.inf] 500 | else: 501 | return -1, -1, [np.inf, np.inf] 502 | 503 | # Find coords of isophase curves 504 | y_h, x_h = np.where(np.isclose(p2_h, phase_h, atol=10**-1)) 505 | y_v, x_v = np.where(np.isclose(p2_v, phase_v, atol=10**-1)) 506 | 507 | # Break if isoline not found 508 | if y_h.size == 0 or y_v.size == 0: 509 | return -1, -1 510 | 511 | # A faster way to calculate using a flatten array 512 | # _, yx_h = np.unravel_index(np.where(np.isclose(p2_h, p1_h[y, x], atol=10**-1)), p2_h.shape) 513 | # _, yx_v = np.unravel_index(np.where(np.isclose(p2_v, p1_v[y, x], atol=10**-1)), p2_v.shape) 514 | 515 | # Find ROI of coords for intersection 516 | y_h_min = np.min(y_h) 517 | y_h_max = np.max(y_h) 518 | x_v_min = np.min(x_v) 519 | x_v_max = np.max(x_v) 520 | 521 | # Apply ROI for coords of isophase curves 522 | y_h = y_h[(x_h >= x_v_min) & (x_h <= x_v_max)] 523 | x_h = x_h[(x_h >= x_v_min) & (x_h <= x_v_max)] 524 | x_v = x_v[(y_v >= y_h_min) & (y_v <= y_h_max)] 525 | y_v = y_v[(y_v >= y_h_min) & (y_v <= y_h_max)] 526 | 527 | # Break if too much points in isophase line 528 | if len(y_h) > 500 or len(y_v) > 500: 529 | return -1, -1 530 | 531 | # Break if no points found 532 | if x_h.size == 0 or x_v.size == 0: 533 | return -1, -1 534 | 535 | # Reshape coords to use broadcasting 536 | x_h = x_h[:, np.newaxis] 537 | y_h = y_h[:, np.newaxis] 538 | y_v = y_v[np.newaxis, :] 539 | x_v = x_v[np.newaxis, :] 540 | 541 | # Calculate distance between points in coords 542 | distance = np.sqrt((x_h - x_v) ** 2 + (y_h - y_v) ** 2) 543 | 544 | # Find indicies of minimum distance 545 | i_h_min, i_v_min = np.where(distance == distance.min()) 546 | i_v_min = i_v_min[0] 547 | i_h_min = i_h_min[0] 548 | 549 | # A faster way to calculate using a flatten array 550 | # i_h_min, i_v_min = np.unravel_index(np.where(distance.ravel()==distance.min()), distance.shape) 551 | # i_v_min = i_v_min[0][0] 552 | # i_h_min = i_h_min[0][0] 553 | 554 | x2, y2 = ((x_v[0, i_v_min] + x_h[i_h_min, 0]) / 2, (y_v[0, i_v_min] + y_h[i_h_min, 0]) / 2) 555 | 556 | return x2, y2 557 | 558 | 559 | def get_phasogrammetry_correlation(p1_h: np.ndarray, p1_v: np.ndarray, p2_h: np.ndarray, p2_v: np.ndarray, x1: int, y1: int, x2: int, y2: int, window_size: int) -> np.ndarray: 560 | ''' 561 | Calculate correlation function for horizontal and vertical phase fields 562 | 563 | Args: 564 | p1_h (numpy array): phase field for horizontal fringes for first camera 565 | p1_v (numpy array): phase field for vertical fringes for first camera 566 | p2_h (numpy array): phase field for horizontal fringes for second camera 567 | p2_v (numpy array): phase field for vertical fringes for second camera 568 | x (int): horizontal coordinate of point for first camera 569 | y (int): vertical coordinate of point for first camera 570 | window_size (int): size of window to calculate correlation function 571 | 572 | Returns: 573 | corelation_field (numpy array): calculated correlation field 574 | ''' 575 | p1_h_ij = p1_h[int(y1 - window_size//2):int(y1 + window_size//2), int(x1 - window_size//2):int(x1 + window_size//2)] 576 | p1_v_ij = p1_v[int(y1 - window_size//2):int(y1 + window_size//2), int(x1 - window_size//2):int(x1 + window_size//2)] 577 | p1_h_m = np.mean(p1_h_ij) 578 | p1_v_m = np.mean(p1_v_ij) 579 | t1_h = (p1_h_ij - p1_h_m) ** 2 580 | t1_v = (p1_v_ij - p1_v_m) ** 2 581 | 582 | corelation_field = np.zeros((window_size, window_size)) 583 | 584 | xx = np.linspace(x2 - window_size // 2, x2 + window_size // 2, window_size) 585 | yy = np.linspace(y2 - window_size // 2, y2 + window_size // 2, window_size) 586 | 587 | for j in range(yy.shape[0]): 588 | for i in range(xx.shape[0]): 589 | x0 = xx[i] 590 | y0 = yy[j] 591 | p2_h_ij = p2_h[int(y0 - window_size //2):int(y0 + window_size //2), int(x0 - window_size//2):int(x0 + window_size//2)] 592 | p2_v_ij = p2_v[int(y0 - window_size //2):int(y0 + window_size //2), int(x0 - window_size//2):int(x0 + window_size//2)] 593 | p2_h_m = np.mean(p2_h_ij) 594 | p2_v_m = np.mean(p2_v_ij) 595 | t2_h = (p2_h_ij - p2_h_m) ** 2 596 | t2_v = (p2_v_ij - p2_v_m) ** 2 597 | 598 | if p2_h_ij.size == p1_h_ij.size and p2_v_ij.size == p1_v_ij.size: 599 | t = np.sum(t1_h * t1_v * t2_h * t2_v) / np.sqrt(np.sum(t1_h * t1_v) * np.sum(t2_h * t2_v)) 600 | # if t < 1: 601 | corelation_field[j, i] = t 602 | 603 | # Find maximum indexes for x and y 604 | maximum = np.unravel_index(corelation_field.argmax(), corelation_field.shape) 605 | 606 | # Get neighborhood pixels of maximum at X axis 607 | cx0 = np.fabs(corelation_field[maximum[0], maximum[1] - 1]) 608 | cx1 = np.fabs(corelation_field[maximum[0], maximum[1] ]) 609 | 610 | if maximum[1] == corelation_field.shape[1]: 611 | cx2 = np.fabs(corelation_field[maximum[0], maximum[1] + 1]) 612 | else: 613 | cx2 = np.fabs(corelation_field[maximum[0], 0]) 614 | 615 | # Get neighborhood pixels of maximum at Y axis 616 | cy0 = np.fabs(corelation_field[maximum[0] - 1, maximum[1]]) 617 | cy1 = np.fabs(corelation_field[maximum[0] , maximum[1]]) 618 | 619 | if maximum[0] == corelation_field.shape[0]: 620 | cy2 = np.fabs(corelation_field[maximum[0] + 1, maximum[1]]) 621 | else: 622 | cy2 = np.fabs(corelation_field[0, maximum[1]]) 623 | 624 | # 3-point gauss fit 625 | try: 626 | x_max = maximum[1] + (np.log(np.abs(cx0)) - np.log(np.abs(cx2)))/(2 * np.log(np.abs(cx0)) - 4 * np.log(np.abs(cx1)) + 2 * np.log(np.abs(cx2))) 627 | except (ZeroDivisionError, ValueError): 628 | x_max = 0 629 | try: 630 | y_max = maximum[0] + (np.log(np.abs(cy0)) - np.log(np.abs(cy2)))/(2 * np.log(np.abs(cy0)) - 4 * np.log(np.abs(cy1)) + 2 * np.log(np.abs(cy2))) 631 | except (ZeroDivisionError, ValueError): 632 | y_max = 0 633 | 634 | # Shift maximum due to pereodic of correlation function 635 | if x_max > corelation_field.shape[0] / 2: 636 | x_max = x_max - corelation_field.shape[0] 637 | elif np.fabs(x_max) < 0.01: 638 | x_max = 0 639 | 640 | # Shift maximum due to pereodic of correlation function 641 | if y_max > corelation_field.shape[1] / 2: 642 | y_max = y_max - corelation_field.shape[1] 643 | elif np.fabs(y_max) < 0.01: 644 | y_max = 0 645 | 646 | if not np.isnan(x_max) and not np.isnan(y_max): 647 | return x2 + x_max, y2 + y_max 648 | else: 649 | return -1, -1 650 | 651 | 652 | def get_phase_field_ROI(fpp_measurement: FPPMeasurement, signal_to_nose_threshold: float = 0.25): 653 | ''' 654 | Get ROI for FPP measurement with the help of signal to noise thresholding. 655 | ROI is stored as a mask (fpp_measurement.signal_to_noise_mask) with values 0 for points 656 | with signal-to-noise ratio below threshold and 1 for points with ratio above threshold. 657 | Additionally ROI is stored as a quadrangle defined by four points consisting of minimum 658 | and maximum x and y coordinates for points with signal-to-noise ratio above the threshold. 659 | Calculated ROI will be stored in input fpp_measurement argument. 660 | 661 | Args: 662 | fpp_measurement (FPPMeasurement): FPP measurment for calcaulating ROI 663 | signal_to_nose_threshold (float) = 0.25: threshold for signal to noise ratio to calcaulate ROI 664 | ''' 665 | # For each camera result 666 | for cam_result in fpp_measurement.camera_results: 667 | # Calculate signal to noise ratio 668 | signal_to_nose = cam_result.modulated_intensities[-1] / cam_result.average_intensities[-1] 669 | # Threshold signal to noise with defined threshold level 670 | thresholded_coords = np.argwhere(signal_to_nose > signal_to_nose_threshold) 671 | 672 | # Store ROI mask 673 | cam_result.signal_to_noise_mask = np.zeros(signal_to_nose.shape, dtype=int) 674 | cam_result.signal_to_noise_mask[signal_to_nose > signal_to_nose_threshold] = 1 675 | 676 | # Determine four points around thresholded area 677 | x_min = np.min(thresholded_coords[:, 1]) 678 | x_max = np.max(thresholded_coords[:, 1]) 679 | y_min = np.min(thresholded_coords[:, 0]) 680 | y_max = np.max(thresholded_coords[:, 0]) 681 | 682 | # Store determined ROI 683 | cam_result.ROI = np.array([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]) 684 | 685 | 686 | def get_phase_field_LUT(measurement: FPPMeasurement) -> list[list[list]]: 687 | ''' 688 | Get LUT for horizontal and vertical phase field to increase phasogrammetry calculation speed. 689 | LUT is a two-dimensional array of coordinates whose indices correspond to the values of horizontal 690 | and vertical phase in these coordinates. Knowing the values of the horizontal and vertical phase, 691 | you can quickly find the coordinates of points with these values. 692 | The LUT is a list of lists of lists of two-dimensional coordinates. 693 | 694 | Args: 695 | measurement (FPPMeasurement): FPP measurment with horizontal and vertical fringes 696 | Returns: 697 | LUT (list[list[list]]): LUT structure containing the coordinates of points for the horizontal and vertical phase values 698 | ''' 699 | assert len(measurement.camera_results) == 4, 'It should be four (4) camera results in camera_results of fpp_measurement' 700 | 701 | # Get horizontal and vertical unwrapped phases for second camera 702 | cam1_meas_h = measurement.camera_results[2] 703 | cam1_meas_v = measurement.camera_results[0] 704 | cam2_meas_h = measurement.camera_results[3] 705 | cam2_meas_v = measurement.camera_results[1] 706 | 707 | p_h = cam2_meas_h.unwrapped_phases[-1] 708 | p_v = cam2_meas_v.unwrapped_phases[-1] 709 | 710 | # Find range for horizontal and vertical phase 711 | if cam1_meas_h.ROI_mask is not None: 712 | # If ROI mask defined - get min max phases from ROI area for first camera 713 | p_h1 = cam1_meas_h.unwrapped_phases[-1][cam1_meas_h.ROI_mask == 1] 714 | p_v1 = cam1_meas_v.unwrapped_phases[-1][cam1_meas_v.ROI_mask == 1] 715 | ph_max = np.max(p_h1) 716 | ph_min = np.min(p_h1) 717 | pv_max = np.max(p_v1) 718 | pv_min = np.min(p_v1) 719 | else: 720 | ph_max = np.max(p_h) 721 | ph_min = np.min(p_h) 722 | pv_max = np.max(p_v) 723 | pv_min = np.min(p_v) 724 | 725 | step = 1.0 726 | 727 | h_range = np.arange(ph_min, ph_max, step) 728 | v_range = np.arange(pv_min, pv_max, step) 729 | 730 | # Determine size of LUT structure 731 | w, h = h_range.shape[0] + 1, v_range.shape[0] + 1 732 | 733 | # Create LUT structure 734 | LUT = [[[] for x in range(w)] for y in range(h)] 735 | 736 | w = p_h.shape[1] 737 | h = p_h.shape[0] 738 | 739 | # Phase rounding with an offset so that they start from zero 740 | p_h_r = np.round((p_h - ph_min) / step).astype(int).tolist() 741 | p_v_r = np.round((p_v - pv_min) / step).astype(int).tolist() 742 | 743 | # Fill LUT with coordinates of points with horizontal and vertical values as indicies 744 | for y in range(h): 745 | for x in range(w): 746 | if cam2_meas_h.signal_to_noise_mask[y, x] == 1 and cam2_meas_v.signal_to_noise_mask[y, x] == 1: 747 | if (pv_max - pv_min) / step >= p_v_r[y][x] >= 0 and (ph_max - ph_min) / step >= p_h_r[y][x] >= 0: 748 | LUT[p_v_r[y][x]][p_h_r[y][x]].append([x, y]) 749 | 750 | # Add range of horizontal and vertical phases at the end of LUT 751 | LUT.append(h_range) 752 | LUT.append(v_range) 753 | 754 | return LUT 755 | 756 | 757 | def process_fppmeasurement_with_phasogrammetry(measurement: FPPMeasurement, step_x: float, step_y: float, LUT:list[list[list[int]]]=None) -> tuple[np.ndarray, np.ndarray]: 758 | ''' 759 | Find 2D corresponding points for two phase fields sets with phasogrammetry approach 760 | 761 | Args: 762 | measurement (FPPMeasurement): FPPMeasurement instances with horizontal and vertical phase field for two cameras 763 | step_x, step_y (float): horizontal and vertical steps to calculate corresponding points 764 | LUT (list[list[list]]): LUT structure containing the coordinates of points for the horizontal and vertical phase values 765 | Returns: 766 | points_1 (numpy array [N, 2]): corresponding 2D points from first camera 767 | points_2 (numpy array [N, 2]): corresponding 2D points from second camera 768 | ''' 769 | # Take phases with highest frequencies 770 | p1_h = measurement.camera_results[2].unwrapped_phases[-1] 771 | p2_h = measurement.camera_results[3].unwrapped_phases[-1] 772 | 773 | p1_v = measurement.camera_results[0].unwrapped_phases[-1] 774 | p2_v = measurement.camera_results[1].unwrapped_phases[-1] 775 | 776 | # Get ROI from measurement object 777 | ROI1 = measurement.camera_results[0].ROI 778 | 779 | # Cut ROI from phase fields for second camera 780 | ROIx = slice(0, measurement.camera_results[1].unwrapped_phases[-1].shape[1]) 781 | ROIy = slice(0, measurement.camera_results[1].unwrapped_phases[-1].shape[0]) 782 | 783 | p2_h = p2_h[ROIy, ROIx] 784 | p2_v = p2_v[ROIy, ROIx] 785 | 786 | # Calculation of the coordinate grid on first image 787 | xx = np.arange(0, p1_h.shape[1], step_x, dtype=np.int32) 788 | yy = np.arange(0, p1_h.shape[0], step_y, dtype=np.int32) 789 | 790 | coords1 = [] 791 | 792 | for y in yy: 793 | for x in xx: 794 | if measurement.camera_results[0].ROI_mask[y, x] == 1: 795 | coords1.append((x, y)) 796 | 797 | coords2 = [] 798 | errors = [] 799 | 800 | coords_to_delete = [] 801 | 802 | if config.USE_MULTIPROCESSING: 803 | # Use parallel calaculation to increase processing speed 804 | with multiprocessing.Pool(config.POOLS_NUMBER) as p: 805 | coords2 = p.starmap(find_phasogrammetry_corresponding_point, [(p1_h, p1_v, p2_h, p2_v, coords1[i][0], coords1[i][1], LUT) for i in range(len(coords1))]) 806 | 807 | # Search for corresponding points not found 808 | for i in range(len(coords2)): 809 | if coords2[i][0] < 0 and coords2[i][1] < 0: 810 | coords_to_delete.append(i) 811 | else: 812 | # Add ROI left and top coordinates 813 | coords2[i] = (coords2[i][0] + ROIx.start, coords2[i][1] + ROIy.start) 814 | 815 | # If no point found, delete coordinate from grid 816 | for index in reversed(coords_to_delete): 817 | coords1.pop(index) 818 | coords2.pop(index) 819 | else: 820 | for i in range(len(coords1)): 821 | # Find corresponding point coordinate on second image 822 | x, y, err = find_phasogrammetry_corresponding_point(p1_h, p1_v, p2_h, p2_v, coords1[i][0], coords1[i][1], LUT) 823 | # If no point found, delete coordinate from grid 824 | if (x == -1 and y == -1) or (abs(err[0]) > 0.1 or abs(err[1]) > 0.1): 825 | coords_to_delete.append(i) 826 | else: 827 | coords2.append((x + ROIx.start, y + ROIy.start)) 828 | errors.append(err) 829 | 830 | # Delete point in grid with no coresponding point on second image 831 | for index in reversed(coords_to_delete): 832 | coords1.pop(index) 833 | 834 | # Form a set of coordinates of corresponding points on the first and second images 835 | image1_points = [] 836 | image2_points = [] 837 | distance = [] 838 | 839 | for point1, point2 in zip(coords1, coords2): 840 | image1_points.append([point1[0], point1[1]]) 841 | image2_points.append([point2[0], point2[1]]) 842 | distance.append(((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)**0.5) 843 | 844 | # Remove outliers 845 | std_d = np.std(distance) 846 | indicies_to_delete = [i for i in range(len(distance)) if distance[i] > std_d * 10] 847 | for index in reversed(indicies_to_delete): 848 | image1_points.pop(index) 849 | image2_points.pop(index) 850 | errors.pop(index) 851 | 852 | # Convert list to array before returning result from function 853 | image1_points = np.array(image1_points, dtype=np.float32) 854 | image2_points = np.array(image2_points, dtype=np.float32) 855 | errors = np.array(errors, dtype=np.float32) 856 | 857 | return image1_points, image2_points, errors 858 | 859 | 860 | def calculate_displacement_field(field1: np.ndarray, field2: np.ndarray, win_size_x: int, win_size_y: int, step_x: int, step_y: int) -> np. ndarray: 861 | ''' 862 | Calculate displacement field between two scalar fields thru correlation. 863 | 864 | Args: 865 | field1 (2D numpy array): first scalar field 866 | field2 (2D numpy array): second scalar field 867 | win_size_x (int): interrogation window horizontal size 868 | win_size_y (int): interrogation window vertical size 869 | step_x (int): horizontal step for dividing on interrogation windows 870 | step_y (int): vertical step for dividing on interrogation windows 871 | Returns: 872 | vector_field (): vector field of displacements 873 | ''' 874 | assert field1.shape == field2.shape, 'Shapes of field1 and field2 must be equals' 875 | assert win_size_x > 4 and win_size_y > 4, 'Size of interrogation windows should be greater than 4 pixels' 876 | assert step_x > 0 and step_y > 0, 'Horizontal and vertical steps should be greater than zero' 877 | 878 | # Get interrogation windows 879 | list_of_windows = [[], []] 880 | list_of_coords = [] 881 | 882 | width = field1.shape[1] 883 | height = field1.shape[0] 884 | num_win_x = range(int(np.floor((width - win_size_x) / step_x + 1))) 885 | num_win_y = range(int(np.floor((height - win_size_y) / step_y + 1))) 886 | 887 | for i in num_win_x: 888 | start_x = step_x * i 889 | end_x = step_x * i + win_size_x 890 | center_x = np.round(end_x - win_size_x / 2) 891 | 892 | for j in num_win_y: 893 | start_y = step_y * j 894 | end_y = step_y * j + win_size_y 895 | center_y = np.round(end_y - win_size_y / 2) 896 | 897 | window1 = field1[start_y:end_y, start_x:end_x] 898 | window2 = field2[start_y:end_y, start_x:end_x] 899 | list_of_windows[0].append(window1) 900 | list_of_windows[1].append(window2) 901 | list_of_coords.append([center_x, center_y]) 902 | 903 | # Calculate correlation function 904 | correlation_list = [] 905 | 906 | # Create 2D Gauss kernel 907 | gauss = np.outer(signal.windows.gaussian(win_size_x, win_size_x / 2), 908 | signal.windows.gaussian(win_size_y, win_size_y / 2)) 909 | 910 | for i in range(len(list_of_windows[0])): 911 | # Windowing interrogation windows 912 | list_of_windows[0][i] = list_of_windows[0][i] * gauss 913 | list_of_windows[1][i] = list_of_windows[1][i] * gauss 914 | mean1 = np.mean(list_of_windows[0][i]) 915 | std1 = np.std(list_of_windows[0][i]) 916 | mean2 = np.mean(list_of_windows[1][i]) 917 | std2 = np.std(list_of_windows[1][i]) 918 | a = np.fft.rfft2(list_of_windows[0][i] - mean1, norm='ortho') 919 | b = np.fft.rfft2(list_of_windows[1][i] - mean2, norm='ortho') 920 | c = np.multiply(a, b.conjugate()) 921 | d = np.fft.irfft2(c) 922 | if std1 == 0: 923 | std1 = 1 924 | if std2 == 0: 925 | std2 = 1 926 | e = d / (std1 * std2) 927 | correlation_list.append(e) 928 | 929 | # Find maximums 930 | maximums_list = [] 931 | 932 | for i in range(len(correlation_list)): 933 | 934 | # Find maximum indexes for x and y 935 | maximum = np.unravel_index(correlation_list[i].argmax(), correlation_list[i].shape) 936 | 937 | # Get neighborhood pixels of maximum at X axis 938 | cx0 = np.fabs(correlation_list[i][maximum[0], maximum[1] - 1]) 939 | cx1 = np.fabs(correlation_list[i][maximum[0], maximum[1] ]) 940 | 941 | if maximum[1] == correlation_list[i].shape[1]: 942 | cx2 = np.fabs(correlation_list[i][maximum[0], maximum[1] + 1]) 943 | else: 944 | cx2 = np.fabs(correlation_list[i][maximum[0], 0]) 945 | 946 | # Get neighborhood pixels of maximum at Y axis 947 | cy0 = np.fabs(correlation_list[i][maximum[0] - 1, maximum[1]]) 948 | cy1 = np.fabs(correlation_list[i][maximum[0] , maximum[1]]) 949 | 950 | if maximum[0] == correlation_list[i].shape[0]: 951 | cy2 = np.fabs(correlation_list[i][maximum[0] + 1, maximum[1]]) 952 | else: 953 | cy2 = np.fabs(correlation_list[i][0, maximum[1]]) 954 | 955 | # 3-point gauss fit 956 | try: 957 | x_max = maximum[1] + (np.log(np.abs(cx0)) - np.log(np.abs(cx2)))/(2 * np.log(np.abs(cx0)) - 4 * np.log(np.abs(cx1)) + 2 * np.log(np.abs(cx2))) 958 | except (ZeroDivisionError, ValueError): 959 | x_max = 0 960 | try: 961 | y_max = maximum[0] + (np.log(np.abs(cy0)) - np.log(np.abs(cy2)))/(2 * np.log(np.abs(cy0)) - 4 * np.log(np.abs(cy1)) + 2 * np.log(np.abs(cy2))) 962 | except (ZeroDivisionError, ValueError): 963 | y_max = 0 964 | 965 | # Shift maximum due to pereodic of correlation function 966 | if x_max > correlation_list[i].shape[0] / 2: 967 | x_max = x_max - correlation_list[i].shape[0] 968 | elif np.fabs(x_max) < 0.01: 969 | x_max = 0 970 | 971 | # Shift maximum due to pereodic of correlation function 972 | if y_max > correlation_list[i].shape[1] / 2: 973 | y_max = y_max - correlation_list[i].shape[1] 974 | elif np.fabs(y_max) < 0.01: 975 | y_max = 0 976 | 977 | # Not actual maximum value 978 | maximums_list.append([x_max, y_max, np.max(correlation_list[i])]) 979 | 980 | # Create vector field 981 | vector_field = [] 982 | 983 | return np.array(list_of_coords), np.array(maximums_list) 984 | -------------------------------------------------------------------------------- /projector.py: -------------------------------------------------------------------------------- 1 | '''Module for Projector class''' 2 | from __future__ import annotations 3 | 4 | import cv2 5 | import numpy as np 6 | 7 | import config 8 | 9 | 10 | class Projector(): 11 | ''' 12 | Class to control projector during experiment 13 | ''' 14 | def __init__(self, width: int, height: int, min_brightness: float = 0, max_brightness: float = 255): 15 | self.width = width 16 | self.height = height 17 | self.__min_image_brightness = min_brightness 18 | self.__max_image_brightness = max_brightness 19 | self.window_exist: bool = False 20 | 21 | def set_up_window(self) -> None: 22 | ''' 23 | Open new window thru OpenCV GUI and show it on second extended screen 24 | ''' 25 | cv2.namedWindow('Projector window', cv2.WND_PROP_FULLSCREEN) 26 | cv2.setWindowProperty('Projector window', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) 27 | # TODO: remove magic number 1920 with screen resolution for multimonitor case 28 | # https://stackoverflow.com/questions/3129322/how-do-i-get-monitor-resolution-in-python 29 | cv2.moveWindow('Projector window', config.PROJECTOR_WINDOW_SHIFT, 0) 30 | self.window_exist = True 31 | 32 | def project_pattern(self, pattern: np.ndarray, correction: bool = True) -> None: 33 | ''' 34 | Project pattern thru OpenCV GUI window, before projection pattern is intensity corrected 35 | 36 | Args: 37 | pattern (numpy array): image to project with OpenCV imshow method 38 | correction (bool): do intensity correction before project pattern 39 | ''' 40 | # Open OpenCV GUI window, if it is has not been already opened 41 | if not self.window_exist: 42 | self.set_up_window() 43 | 44 | # Correct image with calibration coefficients 45 | if correction: 46 | self._corrected_pattern = self.image_brightness_rescale_factor * ((pattern / config.PROJECTOR_GAMMA_A) ** (1 / config.PROJECTOR_GAMMA_B)) + self.min_image_brightness 47 | else: 48 | self._corrected_pattern = pattern 49 | # Show image at OpenCV GUI window 50 | cv2.imshow('Projector window', self._corrected_pattern) 51 | 52 | def project_black_background(self) -> None: 53 | ''' 54 | Project black pattern thru OpenCV GUI window for projector off emulation 55 | ''' 56 | # Open OpenCV GUI window, if it is has not been already opened 57 | if not self.window_exist: 58 | self.set_up_window() 59 | 60 | # Create black bakground image 61 | background = np.zeros((self.height, self.width)) 62 | 63 | # Show image at OpenCV GUI window 64 | cv2.imshow('Projector window', background) 65 | cv2.waitKey(200) 66 | 67 | def project_white_background(self) -> None: 68 | ''' 69 | Project white pattern thru OpenCV GUI window for using projector as light source 70 | ''' 71 | # Open OpenCV GUI window, if it is has not been already opened 72 | if not self.window_exist: 73 | self.set_up_window() 74 | 75 | # Create black bakground image 76 | background = np.ones((self.height, self.width)) * 255 77 | 78 | # Show image at OpenCV GUI window 79 | cv2.imshow('Projector window', background) 80 | cv2.waitKey(200) 81 | 82 | def close_window(self) -> None: 83 | ''' 84 | Close opened OpenCV GUI window on second extended screen 85 | ''' 86 | cv2.destroyWindow('Projector window') 87 | self.window_exist = False 88 | 89 | @property 90 | def corrected_pattern(self) -> np.ndarray: 91 | ''' 92 | Return last projected corrected pattern as numpy array 93 | ''' 94 | return self._corrected_pattern 95 | 96 | @property 97 | def resolution(self) -> tuple[int, int]: 98 | return self.width, self.height 99 | 100 | @property 101 | def min_image_brightness(self) -> float: 102 | return self.__min_image_brightness 103 | 104 | @min_image_brightness.setter 105 | def min_image_brightness(self, value: float): 106 | self.__min_image_brightness = value 107 | 108 | @property 109 | def max_image_brightness(self) -> float: 110 | return self.__max_image_brightness 111 | 112 | @max_image_brightness.setter 113 | def max_image_brightness(self, value: float): 114 | self.__max_image_brightness = value 115 | 116 | @property 117 | def image_brightness_rescale_factor(self) -> float: 118 | return (self.max_image_brightness - self.min_image_brightness) 119 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | '''Module to store utils for working with data''' 2 | 3 | from __future__ import annotations 4 | from typing import Optional 5 | 6 | import json 7 | 8 | import numpy as np 9 | from matplotlib import pyplot as plt 10 | import cv2 11 | 12 | from fpp_structures import FPPMeasurement, PhaseShiftingAlgorithm, CameraMeasurement 13 | 14 | 15 | def create_fpp_measurement_from_files(files_path : str, file_mask : str, shifts_count : int, frequencies : list[float]) -> FPPMeasurement: 16 | ''' 17 | Create FPPMeasurement instance from image files 18 | 19 | Args: 20 | files_path (str): The path to images files 21 | file_mask (str): The file mask which used for files itterating 22 | thru phase shifts and frequeincies, e.i. 'frame_{0}_{1}.png'. First argument 23 | in mask is frequeincy number, second is phase shift number 24 | shifts_count (int): The count of phase shifts used during images capturing 25 | frequencies (list): The list of frequencies used during images capturing 26 | 27 | Returns: 28 | measurement (FPPMeasurement): FPPMeasurement instance 29 | ''' 30 | # Filenames for image files 31 | filenames = [] 32 | 33 | for i in range(len(frequencies)): 34 | # List of filenames for one frequency 35 | one_frequency_files = [] 36 | for j in range(shifts_count): 37 | one_frequency_files.append(files_path + file_mask.format(i, j)) 38 | filenames.append(one_frequency_files) 39 | 40 | # Calculate phase shifts 41 | shifts = [2 * np.pi / len(shifts_count) * i for i in range(shifts_count)] 42 | 43 | # Create new FPPMeasurement instance 44 | measurement = FPPMeasurement( 45 | shifts = shifts, 46 | frequencies = frequencies, 47 | imgs_file_names = filenames, 48 | ) 49 | 50 | return measurement 51 | 52 | 53 | def load_fpp_measurements(file: str) -> FPPMeasurement: 54 | ''' 55 | Load FPPMeasurements from json file 56 | 57 | Args: 58 | file (str): the path to FPPMeasurements json file 59 | 60 | Returns: 61 | measurements (FPPMeasurement): FPPMeasurement instances loaded from file 62 | ''' 63 | with open(file, 'r') as fp: 64 | data = json.load(fp) 65 | 66 | measurement = FPPMeasurement( 67 | phase_shifting_type = PhaseShiftingAlgorithm(data.get('phase_shifting_type', 1)), 68 | shifts = data['shifts'], 69 | frequencies = data['frequencies'], 70 | camera_results= [ 71 | CameraMeasurement( 72 | fringe_orientation=data['camera_results'][0]['fringe_orientation'], 73 | imgs_list=get_images_from_config(data['camera_results'][0]['imgs_file_names']) 74 | ), 75 | CameraMeasurement( 76 | fringe_orientation=data['camera_results'][1]['fringe_orientation'], 77 | imgs_list=get_images_from_config(data['camera_results'][1]['imgs_file_names']) 78 | ), 79 | CameraMeasurement( 80 | fringe_orientation=data['camera_results'][2]['fringe_orientation'], 81 | imgs_list=get_images_from_config(data['camera_results'][2]['imgs_file_names']) 82 | ), 83 | CameraMeasurement( 84 | fringe_orientation=data['camera_results'][3]['fringe_orientation'], 85 | imgs_list=get_images_from_config(data['camera_results'][3]['imgs_file_names']) 86 | ) 87 | ] 88 | ) 89 | 90 | return measurement 91 | 92 | 93 | def get_images_from_config(paths: list[list[str]]) -> list[list[np.ndarray]]: 94 | ''' 95 | Load images from files for CameraMeasurement instance. 96 | 97 | Args: 98 | path (list[list[str]]): list of list of paths to images files 99 | 100 | Returns: 101 | images (list[list[np.ndarray]]): list of list of loaded images 102 | ''' 103 | images = [] 104 | 105 | for one_frequency_path in paths: 106 | images.append([]) 107 | for image_path in one_frequency_path: 108 | image = cv2.imread(image_path) 109 | # If image loaded 110 | if image is not None: 111 | # If image is 3D array 112 | if len(image.shape) > 2: 113 | # Transform to grayscale 114 | images[-1].append(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)) 115 | else: 116 | images[-1].append(image) 117 | 118 | return images 119 | 120 | 121 | def get_quiverplot(coords: np.ndarray, maximums: np.ndarray, image: Optional[np.ndarray] = None, stretch_factor: float = 5.0): 122 | """Draw a vector field of displacements on input image (if defined) 123 | 124 | Args: 125 | coords (list[np.ndarray]): list of vectors start points 126 | maximums (list[np.ndarray]): list of vectors end points 127 | image=None (np.ndarray): image to draw vector field on 128 | stretch_factor=5.0 (float): magnitude factor for vector length 129 | """ 130 | x = [coord[0] for coord in coords] 131 | y = [coord[1] for coord in coords] 132 | u = [maximum[0] * stretch_factor for maximum in maximums] 133 | v = [maximum[1] * stretch_factor for maximum in maximums] 134 | c = [maximum[2] for maximum in maximums] 135 | 136 | plt.quiver(x, y, u, v, c, scale=1, units='xy', cmap='jet') 137 | cbar = plt.colorbar() 138 | cbar.ax.set_ylabel('Normal correlation function maximum') 139 | 140 | plt.xlabel('X, pixels') 141 | plt.ylabel('Y, pixels') 142 | 143 | if image is not None: 144 | plt.imshow(image, cmap='gray') 145 | 146 | plt.show() 147 | --------------------------------------------------------------------------------