├── ToF
├── tof.jpg
├── kitchen.jpg
├── create_rgbd.py
├── lib
│ └── depth_utils.py
└── depth.py
├── 3D-print
├── hardware.jpg
├── Screenshot.jpg
├── sensorpack.3mf
├── sensorpack.max
└── textures
│ ├── tof_back.jpg
│ ├── tof_front.jpg
│ ├── im519_front.jpg
│ └── imx519_back.jpg
├── alignment
├── synthetic
│ ├── fov.png
│ ├── cubic.png
│ ├── pattern.png
│ └── synthetic.max
├── images
│ └── alignment.jpg
├── undistort.py
├── calibrate.py
├── alignment.py
└── imx519_calibration.json
├── BNO055
├── pico
│ ├── lib
│ │ └── adafruit_bno055.mpy
│ └── main.py
└── receiver
│ └── receive.py
├── .gitmodules
├── requirements.txt
├── .gitignore
├── RGB
├── capture-sequence.py
├── opencv-test.py
└── RGB-cam.py
├── LICENSE
└── readme.md
/ToF/tof.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/ToF/tof.jpg
--------------------------------------------------------------------------------
/ToF/kitchen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/ToF/kitchen.jpg
--------------------------------------------------------------------------------
/3D-print/hardware.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/hardware.jpg
--------------------------------------------------------------------------------
/3D-print/Screenshot.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/Screenshot.jpg
--------------------------------------------------------------------------------
/3D-print/sensorpack.3mf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/sensorpack.3mf
--------------------------------------------------------------------------------
/3D-print/sensorpack.max:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/sensorpack.max
--------------------------------------------------------------------------------
/alignment/synthetic/fov.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/alignment/synthetic/fov.png
--------------------------------------------------------------------------------
/3D-print/textures/tof_back.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/textures/tof_back.jpg
--------------------------------------------------------------------------------
/3D-print/textures/tof_front.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/textures/tof_front.jpg
--------------------------------------------------------------------------------
/alignment/images/alignment.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/alignment/images/alignment.jpg
--------------------------------------------------------------------------------
/alignment/synthetic/cubic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/alignment/synthetic/cubic.png
--------------------------------------------------------------------------------
/alignment/synthetic/pattern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/alignment/synthetic/pattern.png
--------------------------------------------------------------------------------
/3D-print/textures/im519_front.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/textures/im519_front.jpg
--------------------------------------------------------------------------------
/3D-print/textures/imx519_back.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/3D-print/textures/imx519_back.jpg
--------------------------------------------------------------------------------
/alignment/synthetic/synthetic.max:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/alignment/synthetic/synthetic.max
--------------------------------------------------------------------------------
/BNO055/pico/lib/adafruit_bno055.mpy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LaserBorg/Sensorpack/HEAD/BNO055/pico/lib/adafruit_bno055.mpy
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "pyJBU"]
2 | path = pyJBU
3 | url = https://github.com/LaserBorg/pyJBU.git
4 | [submodule "Thermal"]
5 | path = Thermal
6 | url = https://github.com/LaserBorg/MLX90640-pico.git
7 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python
2 | open3d
3 | numpy
4 |
5 | ArducamDepthCamera
6 |
7 | # not working on rpi venv, passthrough system package instead
8 | #picamera[array]
9 | #rpi-libcamera
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # temp
2 | Thumbs.db
3 | __pycache__/
4 |
5 | _docs/
6 | _drivers/
7 | _old/
8 |
9 | ToF/output/*
10 | !ToF/output/ficus.jpg
11 | !ToF/output/kitchen.jpg
12 |
13 | RGB/output/*
14 |
15 | alignment/images/*
16 | !alignment/images/alignment.jpg
--------------------------------------------------------------------------------
/RGB/capture-sequence.py:
--------------------------------------------------------------------------------
1 | import os
2 | import datetime
3 | import subprocess
4 |
5 | output_dir = "RGB/output"
6 |
7 | for x in range(20):
8 | timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
9 | output_path = os.path.join(output_dir, f"image_{timestamp}.jpg")
10 |
11 | command = f"rpicam-still --autofocus-mode auto --autofocus-range normal --width 4656 --height 3496 --gain 4 -o {output_path}"
12 | subprocess.run(command, shell=True)
--------------------------------------------------------------------------------
/RGB/opencv-test.py:
--------------------------------------------------------------------------------
1 | '''
2 | https://forums.raspberrypi.com/viewtopic.php?t=369522
3 | '''
4 |
5 | from picamera2 import Picamera2
6 | import cv2
7 |
8 | size = (1920, 1080)
9 | center =((size[0]//2),(size[1]//2))
10 |
11 | camera_mode = {
12 | "size": size,
13 | "format": 'XRGB8888',
14 | 'preserve_ar': True
15 | }
16 |
17 | cam_id = 0
18 |
19 | picam2 = Picamera2(cam_id)
20 |
21 | configuration = picam2.create_still_configuration(main=camera_mode) # create_video_configuration
22 | picam2.configure(configuration)
23 | picam2.start()
24 |
25 | while True:
26 | frame = picam2.capture_array()
27 | cv2.circle(frame, center, 10, (255, 0 , 255), -1)
28 | cv2.imshow('frame', frame)
29 |
30 | key = cv2.waitKey(1)
31 | if key == ord("q"):
32 | break
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Philip Gutjahr
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/alignment/undistort.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import json
3 | import numpy as np
4 | import os
5 | import glob
6 |
7 | def undistort_image(filename, mtx, dist):
8 | image = cv2.imread(filename)
9 | image_undistorted = cv2.undistort(image, mtx, dist)
10 | return image_undistorted
11 |
12 |
13 | # Load the parameters from the JSON file
14 | with open('alignment/imx519_calibration.json', 'r') as json_file:
15 | calibration_data = json.load(json_file)
16 |
17 | mtx = np.array(calibration_data['camera_matrix'])
18 | dist = np.array(calibration_data['distortion_coefficients'])
19 | rvecs = [np.array(r_vec) for r_vec in calibration_data['rotation_vectors']]
20 | tvecs = [np.array(t_vec).reshape(3, 1) for t_vec in calibration_data['translation_vectors']]
21 |
22 |
23 | input_dir = 'RGB/output' # 'alignment/images/distorted'
24 | output_dir = 'alignment/images/undistorted'
25 | os.makedirs(output_dir, exist_ok=True)
26 |
27 | images = glob.glob(os.path.join(input_dir, '*.jpg'))
28 |
29 | for filename in images:
30 | image_undistorted = undistort_image(filename, mtx, dist)
31 |
32 | # Save the undistorted image
33 | filename = os.path.splitext(os.path.basename(filename))[0]
34 | savepath = os.path.join(output_dir, filename + '.jpg')
35 | cv2.imwrite(savepath, image_undistorted)
36 |
37 | cv2.imshow('image_undistorted', image_undistorted)
38 | if cv2.waitKey(100) & 0xFF == ord('q'):
39 | break
40 |
41 | cv2.destroyAllWindows()
42 |
--------------------------------------------------------------------------------
/ToF/create_rgbd.py:
--------------------------------------------------------------------------------
1 | from lib.depth_utils import load_frame, get_intrinsic, convert_distance_to_zdepth, create_rgbd, filter_by_luminance, create_visualizer
2 |
3 |
4 | if __name__ == "__main__":
5 | import cv2
6 | import open3d as o3d
7 | import os
8 |
9 | os.environ["LIBGL_ALWAYS_SOFTWARE"] = "1"
10 |
11 | print_stats = True
12 | save_pcd = True
13 | bilateralfilter = False
14 |
15 | color_path = "ToF/output/kitchen_amplitude.png"
16 | depth_path = "ToF/output/kitchen_int16.png"
17 | # color_path = "ToF/output/synthetic/amplitude.png"
18 | # depth_path = "ToF/output/synthetic/depth.png"
19 |
20 | shape = (240,180)
21 | fov = 70
22 |
23 | max_depth = 4000
24 | confidence = 20
25 |
26 | color, depth = load_frame(color_path, depth_path, max_depth)
27 |
28 | if print_stats:
29 | print("color", color.shape, color.dtype, (color.min(), color.max()))
30 | print("depth", depth.shape, depth.dtype, (depth.min(), depth.max()))
31 | # cv2.imshow("color", color)
32 | # cv2.imshow("depth", depth)
33 |
34 | if bilateralfilter:
35 | depth = cv2.bilateralFilter(depth, d=5, sigmaColor=75, sigmaSpace=75)
36 |
37 |
38 | camera_intrinsic = get_intrinsic(shape, fov)
39 | #print(camera_intrinsic.intrinsic_matrix)
40 |
41 | zdepth = convert_distance_to_zdepth(depth, camera_intrinsic)
42 | rgbd_image = create_rgbd(color, zdepth)
43 | pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, camera_intrinsic)
44 |
45 | pcd = filter_by_luminance(pcd, confidence)
46 |
47 | if save_pcd:
48 | o3d.io.write_point_cloud("ToF/output/pcd_rgbd.ply", pcd, write_ascii=False)
49 |
50 | vis = create_visualizer()
51 | vis.add_geometry(pcd)
52 | vis.run()
53 |
54 | cv2.destroyAllWindows()
55 | vis.destroy_window()
56 |
--------------------------------------------------------------------------------
/BNO055/pico/main.py:
--------------------------------------------------------------------------------
1 | '''
2 | based on Adafruit Demo:
3 | https://github.com/adafruit/Adafruit_CircuitPython_BNO055/blob/main/examples/bno055_simpletest.py
4 |
5 | https://github.com/adafruit/Adafruit_BNO055
6 | https://github.com/adafruit/Adafruit_CircuitPython_BNO055
7 |
8 | https://learn.adafruit.com/adafruit-bno055-absolute-orientation-sensor/python-circuitpython
9 | '''
10 |
11 | import struct
12 | import asyncio
13 | import busio # type: ignore
14 | import board # type: ignore
15 | import adafruit_bno055 # type: ignore
16 | from usb_cdc import data as ser # type: ignore
17 |
18 |
19 | SCL_pin = board.GP19
20 | SDA_pin = board.GP18
21 |
22 | refresh_rate = 100 # Hz
23 | FRAME_MARKER = b'\xAA\xAA'
24 |
25 | i2c = busio.I2C(SCL_pin, SDA_pin, frequency=400000)
26 | bno055 = adafruit_bno055.BNO055_I2C(i2c)
27 |
28 | async def read_sensor_data():
29 | # print("Temperature:", bno055.temperature)
30 | # print("Acceleration:", bno055.acceleration, "Length:", len(bno055.acceleration))
31 | # print("Magnetic:", bno055.magnetic, "Length:", len(bno055.magnetic))
32 | # print("Gyro:", bno055.gyro, "Length:", len(bno055.gyro))
33 | # print("Euler:", bno055.euler, "Length:", len(bno055.euler))
34 | # print("Quaternion:", bno055.quaternion, "Length:", len(bno055.quaternion))
35 | # print("Linear acceleration:", bno055.linear_acceleration, "Length:", len(bno055.linear_acceleration))
36 | # print("Gravity:", bno055.gravity, "Length:", len(bno055.gravity))
37 |
38 | # Pack sensor data into a bytearray
39 | data = struct.pack('f'*23, # Format string for struct.pack (23 floating point numbers)
40 | float(bno055.temperature),
41 | *bno055.acceleration,
42 | *bno055.magnetic,
43 | *bno055.gyro,
44 | *bno055.euler,
45 | *bno055.quaternion,
46 | *bno055.linear_acceleration,
47 | *bno055.gravity)
48 | return data
49 |
50 |
51 | async def send_data(data):
52 | """Send data and return the number of bytes sent."""
53 | data = FRAME_MARKER + data
54 | ser.write(data)
55 | return len(data)
56 |
57 |
58 | async def main():
59 | while True:
60 | sensor_data = await read_sensor_data()
61 | await send_data(sensor_data)
62 | await asyncio.sleep(1/refresh_rate)
63 |
64 |
65 | if __name__ == "__main__":
66 | try:
67 | asyncio.run(main())
68 | except Exception as e:
69 | print(f"An error occurred: {e}")
70 |
--------------------------------------------------------------------------------
/alignment/calibrate.py:
--------------------------------------------------------------------------------
1 | '''
2 | https://www.geeksforgeeks.org/camera-calibration-with-python-opencv/
3 | https://docs.opencv.org/4.x/da/d0d/tutorial_camera_calibration_pattern.html
4 | '''
5 |
6 | import cv2
7 | import numpy as np
8 | import os
9 | import glob
10 | import json
11 |
12 | def save_coefficients_as_json(mtx, dist, rvecs, tvecs, path):
13 | calibration_data = {
14 | 'camera_matrix': mtx.tolist(),
15 | 'distortion_coefficients': dist.tolist(),
16 | 'rotation_vectors': [r_vec.flatten().tolist() for r_vec in rvecs],
17 | 'translation_vectors': [t_vec.flatten().tolist() for t_vec in tvecs]
18 | }
19 | with open(path, 'w') as outfile:
20 | json.dump(calibration_data, outfile, indent=4)
21 |
22 |
23 | input_dir = 'ToF/output/v1' # 'alignment/images/distorted'
24 | ext = '.png' # '.jpg'
25 | CHECKERBOARD = (7, 7) # (6, 9)
26 | epsilon = 0.001
27 | iterations = 30
28 |
29 |
30 | # stop when specified accuracy or number of iterations is reached.
31 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, iterations, epsilon)
32 |
33 | points_3D = []
34 | points_2D = []
35 |
36 | # 3D points real world coordinates
37 | objectp3d = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
38 | objectp3d[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
39 | prev_img_shape = None
40 |
41 | images = glob.glob(os.path.join(input_dir, '*' + ext))
42 |
43 | for filename in images:
44 | image = cv2.imread(filename)
45 | grayColor = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
46 |
47 | # Find the chess board corners If desired number of corners are found in the image then ret = true
48 | ret, corners = cv2.findChessboardCorners( grayColor, CHECKERBOARD,
49 | cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
50 |
51 | # If desired number of corners can be detected then, refine the pixel coordinates and display them on the images of checker board
52 | if ret == True:
53 | points_3D.append(objectp3d)
54 |
55 | # Refining pixel coordinates for given 2d points.
56 | corners_refined = cv2.cornerSubPix(grayColor, corners, (11, 11), (-1, -1), criteria)
57 | points_2D.append(corners_refined)
58 |
59 | # Draw and display the corners
60 | image = cv2.drawChessboardCorners(image, CHECKERBOARD, corners_refined, ret)
61 |
62 | cv2.imshow('img', image)
63 | cv2.waitKey(100)
64 | cv2.destroyAllWindows()
65 |
66 | # Perform camera calibration by passing points_3D and its corresponding pixel coordinates (points_2D)
67 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(points_3D, points_2D, grayColor.shape[::-1], None, None)
68 |
69 | json_path = 'alignment/ToF_calibration.json'
70 | save_coefficients_as_json(mtx, dist, rvecs, tvecs, json_path)
71 |
72 | # print(" Camera matrix:", mtx)
73 | # print("\n Distortion coefficient:", dist)
74 | # print("\n Rotation Vectors:", rvecs)
75 | # print("\n Translation Vectors:", tvecs)
76 |
--------------------------------------------------------------------------------
/alignment/alignment.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import os
4 | import json
5 |
6 |
7 | def align_img(img, position, rotation, scale, size, interpolate=False):
8 |
9 | def calculate_position(imgsize, size, position, scale):
10 |
11 | def translate_centered(imgsize, size, translate=(0, 0)):
12 | x = (size[0] - imgsize[0]) // 2 + translate[0]
13 | y = (size[1] - imgsize[1]) // 2 + translate[1]
14 | return x, y
15 |
16 | scaled_imgsize = (int(imgsize[0] * scale[0]), int(imgsize[1] * scale[1]))
17 | center_offset = np.array(position) - np.array(size) / 2
18 | x, y = translate_centered(scaled_imgsize, size, translate=center_offset.astype(int))
19 | return scaled_imgsize, x, y
20 |
21 | imgsize = (img.shape[1], img.shape[0])
22 |
23 | scaled_imgsize, x, y = calculate_position(imgsize, size, position, scale)
24 |
25 | interpolation = cv2.INTER_LINEAR if interpolate else cv2.INTER_NEAREST
26 | img = cv2.resize(img, scaled_imgsize, interpolation=interpolation)
27 |
28 | # Rotate the image
29 | center = (scaled_imgsize[0] // 2, scaled_imgsize[1] // 2)
30 | rotation_matrix = cv2.getRotationMatrix2D(center, rotation, 1.0)
31 | img_rotated = cv2.warpAffine(img, rotation_matrix, scaled_imgsize, flags=cv2.INTER_LINEAR)
32 |
33 | # resize canvas and place the rotated image centered with offset
34 | img_aligned = np.zeros((size[1], size[0], 3), dtype=np.uint8)
35 | img_aligned[y:y+scaled_imgsize[1], x:x+scaled_imgsize[0]] = img_rotated
36 |
37 | return img_aligned
38 |
39 |
40 | if __name__ == '__main__':
41 |
42 | input_dir = "alignment/images/"
43 | output_dir = "alignment/images/aligned/"
44 | json_path = "alignment/alignment.json"
45 |
46 | # Load transforms from JSON
47 | with open(json_path, 'r') as file:
48 | transformations = json.load(file)
49 |
50 | canvas_size = np.array(transformations['Size'])
51 |
52 |
53 | # Load images
54 | depth = cv2.imread(os.path.join(input_dir, 'depth.png'))
55 | thermal = cv2.imread(os.path.join(input_dir,'thermal.png'))
56 | color = cv2.imread(os.path.join(input_dir, 'RGB.jpg'))
57 | color = cv2.resize(color, canvas_size)
58 |
59 |
60 | # Align
61 | d = transformations['Depth']
62 | depth_aligned = align_img(depth, np.array(d['position']), d['rotation'], np.array(d['scale']), canvas_size)
63 |
64 | t = transformations['Thermal']
65 | thermal_aligned = align_img(thermal, np.array(t['position']), t['rotation'], np.array(t['scale']), canvas_size)
66 |
67 |
68 | # Save and display
69 | cv2.imwrite(os.path.join(output_dir,'color_aligned.jpg'), color)
70 | cv2.imwrite(os.path.join(output_dir,'depth_aligned.jpg'), depth_aligned)
71 | cv2.imwrite(os.path.join(output_dir,'thermal_aligned.jpg'), thermal_aligned)
72 |
73 | cv2.imshow('Color', color)
74 | cv2.imshow('Depth aligned', depth_aligned)
75 | cv2.imshow('Thermal aligned', thermal_aligned)
76 |
77 | cv2.waitKey(0)
78 | cv2.destroyAllWindows()
79 |
--------------------------------------------------------------------------------
/RGB/RGB-cam.py:
--------------------------------------------------------------------------------
1 | from picamera2 import Picamera2, Preview
2 | import datetime
3 | import time
4 | import os
5 |
6 | class HDRCamera:
7 | def __init__(self, cam_id, fstops=1, output_dir="./"):
8 | self.fstops = fstops
9 | self.output_dir = output_dir
10 |
11 | self.picam2 = Picamera2(cam_id)
12 | self.picam2.start_preview(Preview.QTGL)
13 |
14 | self.preview_config = self.picam2.create_preview_configuration()
15 | self.image_config = self.picam2.create_still_configuration()
16 |
17 | self.ctrls_autofocus = {"AfMode": 1, "AfTrigger": 0, "AfSpeed": 1} # AfModeEnum.Auto, AfSpeedEnum.Fast
18 | self.ctrls_fixedfocus = {"AfMode": 0} # AfModeEnum.Manual
19 |
20 | self.picam2.configure(self.preview_config)
21 | self.picam2.set_controls(self.ctrls_autofocus)
22 | self.picam2.start()
23 | time.sleep(3)
24 |
25 | self.metadata = self.picam2.capture_metadata()
26 | self.get_aeb_params()
27 |
28 | def get_aeb_params(self):
29 | exp = int(self.metadata["ExposureTime"] * self.metadata["AnalogueGain"] * self.metadata["DigitalGain"])
30 | self.aeb_params = {"ColourGains": self.metadata["ColourGains"],
31 | "AEB": [{"exp": exp//2**self.fstops, "gain": 1},
32 | {"exp": exp//2, "gain": 2},
33 | {"exp": exp, "gain": 2**self.fstops}]}
34 |
35 | def capture(self):
36 | # Lock the focus
37 | self.picam2.set_controls(self.ctrls_fixedfocus)
38 |
39 | # take AEB exposures
40 | if self.fstops > 0:
41 | for i in range(3):
42 | ctrls_aeb = {'AnalogueGain': self.aeb_params["AEB"][i]["gain"],
43 | 'ExposureTime': self.aeb_params["AEB"][i]["exp"],
44 | 'ColourGains': self.aeb_params["ColourGains"]}
45 | self.picam2.set_controls(ctrls_aeb)
46 | time.sleep(0.5)
47 |
48 | timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
49 | output_path = os.path.join(self.output_dir, f"image{i}_{timestamp}.jpg")
50 | self.picam2.switch_mode_and_capture_file(self.image_config, output_path)
51 | time.sleep(1)
52 |
53 | # Switch back to continuous autofocus
54 | self.picam2.set_controls(self.ctrls_autofocus)
55 | #time.sleep(2) # Wait for the autofocus to complete
56 |
57 | # take a single exposure
58 | else:
59 | timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
60 | output_path = os.path.join(self.output_dir, f"image_{timestamp}.jpg")
61 | self.picam2.switch_mode_and_capture_file(self.image_config, output_path)
62 |
63 | def close(self):
64 | self.picam2.close()
65 |
66 |
67 | if __name__ == "__main__":
68 | output_dir = "RGB/output"
69 | os.makedirs(output_dir, exist_ok=True)
70 |
71 | hdrcamera = HDRCamera(0, fstops=0, output_dir=output_dir)
72 | hdrcamera.capture()
73 | hdrcamera.close()
74 |
--------------------------------------------------------------------------------
/ToF/lib/depth_utils.py:
--------------------------------------------------------------------------------
1 | import open3d as o3d
2 | import numpy as np
3 | import cv2
4 | import datetime
5 | import os
6 |
7 |
8 | def load_frame(color_path, depth_path, max_depth=4000):
9 | color = cv2.imread(color_path, -1)
10 |
11 | depth_img = cv2.imread(depth_path, -1)
12 | depth = (depth_img / 65536 * max_depth).astype(np.float32)
13 |
14 | return color, depth
15 |
16 |
17 | def save_frame(frame, output_dir, is_depth=False, max_depth=4000):
18 | '''save as 16-bit PNG images'''
19 | timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
20 |
21 | if is_depth:
22 | frame = frame / max_depth * 65536
23 | dtype = np.uint16
24 | filepath = os.path.join(output_dir, f"depth_{timestamp}.png")
25 | else:
26 | dtype = np.uint8
27 | filepath = os.path.join(output_dir, f"amplitude_{timestamp}.png")
28 |
29 | cv2.imwrite(filepath, frame.astype(dtype))
30 |
31 |
32 | def get_intrinsic(shape=(240,180), fov=70):
33 |
34 | def calculate_fov(shape, fov):
35 | aspect_ratio = shape[0] / shape[1]
36 | fov_rad = np.deg2rad(fov)
37 |
38 | hfov_rad = 2 * np.arctan(np.tan(fov_rad / 2) / np.sqrt(1 + (1 / aspect_ratio**2)))
39 | vfov_rad = 2 * np.arctan(np.tan(fov_rad / 2) / np.sqrt(1 + aspect_ratio**2))
40 |
41 | hfov = np.rad2deg(hfov_rad)
42 | vfov = np.rad2deg(vfov_rad)
43 | return [hfov, vfov]
44 |
45 | fovs = calculate_fov(shape, fov)
46 | width, height = shape
47 |
48 | fx = width / (2 * np.tan(0.5 * np.pi * fovs[0] / 180))
49 | fy = height / (2 * np.tan(0.5 * np.pi * fovs[1] / 180))
50 | cx = width / 2
51 | cy = height / 2
52 |
53 | # K = np.array([[fx, 0, cx],
54 | # [0, fy, cy],
55 | # [0, 0, 1]])
56 |
57 | camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
58 | return camera_intrinsic
59 |
60 |
61 | def create_frustum(height=4000, fov=65, aspect_ratio=4/3):
62 | # frustum = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-2000, -1500, 0), max_bound=(2000, 1500, -4000))
63 |
64 | half_height = height
65 | half_width = half_height * np.tan(np.radians(fov / 2))
66 | half_depth = half_width / aspect_ratio
67 |
68 | vertices = [
69 | [0, 0, 0], # Tip of the pyramid
70 | [-half_width, -half_depth, -half_height], # Base vertices
71 | [half_width, -half_depth, -half_height],
72 | [half_width, half_depth, -half_height],
73 | [-half_width, half_depth, -half_height]
74 | ]
75 |
76 | lines = [
77 | [0, 1], [0, 2], [0, 3], [0, 4], # Edges from the tip to the base
78 | [1, 2], [2, 3], [3, 4], [4, 1] # Edges around the base
79 | ]
80 |
81 | colors = [[1, 0, 0] for _ in range(len(lines))] # Red color for all lines
82 |
83 | line_set = o3d.geometry.LineSet()
84 | line_set.points = o3d.utility.Vector3dVector(vertices)
85 | line_set.lines = o3d.utility.Vector2iVector(lines)
86 | line_set.colors = o3d.utility.Vector3dVector(colors)
87 | return line_set
88 |
89 |
90 | def convert_distance_to_zdepth(depth, intrinsic):
91 | height, width = depth.shape
92 | fx = intrinsic.intrinsic_matrix[0, 0]
93 | fy = intrinsic.intrinsic_matrix[1, 1]
94 | cx = intrinsic.intrinsic_matrix[0, 2]
95 | cy = intrinsic.intrinsic_matrix[1, 2]
96 |
97 | x, y = np.meshgrid(np.arange(width), np.arange(height))
98 | x = (x - cx) / fx
99 | y = (y - cy) / fy
100 |
101 | zdepth = (depth / np.sqrt(x**2 + y**2 + 1)).astype(np.float32)
102 | return zdepth
103 |
104 |
105 | def create_rgbd(color, depth):
106 | color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
107 |
108 | depth_image = o3d.geometry.Image(depth)
109 | color_image = o3d.geometry.Image(color)
110 |
111 | rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
112 | color_image, depth_image, depth_scale=1.0, depth_trunc=4000.0, convert_rgb_to_intensity=False)
113 |
114 | return rgbd_image
115 |
116 |
117 | def filter_by_luminance(pcd, confidence=20):
118 | green_channel = np.asarray(pcd.colors)[:, 1]
119 | mask = green_channel >= (confidence / 255.0)
120 | pcd = pcd.select_by_index(np.where(mask)[0])
121 | return pcd
122 |
123 |
124 | def create_visualizer(shape=(640,480), pointsize=2., bgcolor=(0, 0, 0)):
125 | # vis = o3d.visualization.Visualizer()
126 | vis = o3d.visualization.VisualizerWithKeyCallback()
127 |
128 | vis.create_window("Point Cloud", shape[0], shape[1])
129 | render_option = vis.get_render_option()
130 | render_option.point_size = pointsize
131 | render_option.background_color = bgcolor
132 |
133 | view_control = vis.get_view_control()
134 | view_control.set_up([0, -1, 0])
135 | view_control.set_front([0, 0, -1])
136 | view_control.set_lookat([0, 0, 0])
137 | return vis
138 |
--------------------------------------------------------------------------------
/BNO055/receiver/receive.py:
--------------------------------------------------------------------------------
1 | '''
2 | receive BNO055 sensor data from USB
3 | '''
4 |
5 | import serial
6 | import numpy as np
7 | import open3d as o3d
8 | import time
9 |
10 |
11 | class IMU_Visualizer:
12 | def __init__(self):
13 | # Initialize Open3D visualizer
14 | self.vis = o3d.visualization.Visualizer()
15 | self.vis.create_window(width=640, height=480)
16 |
17 | # Create a 3D box to represent the sensor
18 | self.box = o3d.geometry.TriangleMesh.create_box(width=1, height=1, depth=1)
19 | self.box.compute_vertex_normals()
20 | self.box.paint_uniform_color([0.5, 0.5, 0.5]) # Gray color
21 | self.vis.add_geometry(self.box)
22 |
23 | # Create a coordinate frame
24 | self.frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
25 | self.vis.add_geometry(self.frame)
26 |
27 | # Set the camera parameters
28 | ctr = self.vis.get_view_control()
29 | ctr.set_zoom(1.5) # Adjust this value to move the camera further away
30 |
31 | self.prev_acceleration = np.array([0.0, 0.0, 0.0])
32 | self.velocity = np.array([0.0, 0.0, 0.0])
33 | self.position = np.array([0.0, 0.0, 0.0])
34 | self.euler = np.array([0, 0, 0])
35 | self.dt = 0.01
36 |
37 |
38 | def integrate_acceleration(self, acceleration):
39 | acceleration = np.array(acceleration)
40 | self.velocity += 0.5 * (acceleration + self.prev_acceleration) * self.dt
41 | self.position += self.velocity * self.dt
42 | self.prev_acceleration = acceleration
43 | return self.position
44 |
45 |
46 | def update_visualization(self, pos=(0,0,0), euler=(0,0,0)):
47 | # swap the axes to match the Open3D coordinate system
48 | euler = (euler[2], -euler[0], -euler[1])
49 |
50 | euler = np.array(euler)
51 | delta_euler = euler - self.euler
52 | self.euler = euler
53 |
54 | R = self.box.get_rotation_matrix_from_xyz(np.radians(delta_euler))
55 | self.box.rotate(R, center=self.box.get_center())
56 |
57 | self.box.translate(pos, relative=False)
58 |
59 | # Update the visualizer
60 | self.vis.update_geometry(self.box)
61 | self.vis.poll_events()
62 | self.vis.update_renderer()
63 |
64 |
65 | def read_BNO055_from_serial(ser):
66 | package_len = 23 * 4 # 23 floats, each 4 bytes
67 |
68 | # Read bytes until we find the frame marker
69 | while True:
70 | data = ser.read(2)
71 | if data == FRAME_MARKER:
72 | break
73 |
74 | # Preallocate a numpy array for the frame data
75 | data = np.empty(package_len * 4, dtype=np.uint8)
76 |
77 | # Read the frame data into the numpy array
78 | for i in range(package_len):
79 | data[i] = ser.read(1)[0]
80 |
81 | # Convert the bytes to a numpy array of floats
82 | values = np.frombuffer(data, dtype=np.float32)
83 |
84 | # Decode the values
85 | temperature = values[0]
86 | acceleration = tuple(values[1:4])
87 | magnetic = tuple(values[4:7])
88 | gyro = tuple(values[7:10])
89 | euler = tuple(values[10:13])
90 | quaternion = tuple(values[13:17])
91 | linear_acceleration = tuple(values[17:20])
92 | gravity = tuple(values[20:23])
93 |
94 | return temperature, acceleration, magnetic, gyro, euler, quaternion, linear_acceleration, gravity
95 |
96 |
97 | if __name__ == "__main__":
98 |
99 | port = 'COM3'
100 | FRAME_MARKER = b'\xAA\xAA'
101 |
102 | imu_visualizer = IMU_Visualizer()
103 |
104 | with serial.Serial(port, 921600, timeout=1) as ser:
105 | while True:
106 | values = read_BNO055_from_serial(ser)
107 |
108 | if values is not None:
109 | temperature, acceleration, magnetic, gyro, euler, quaternion, linear_acceleration, gravity = values
110 |
111 | # print("Temperature:", temperature)
112 | # print("Acceleration:", acceleration, "Length:", len(acceleration))
113 | # print("Magnetic:", magnetic, "Length:", len(magnetic))
114 | # print("Gyro:", gyro, "Length:", len(gyro))
115 | # print("Euler:", euler, "Length:", len(euler))
116 | # print("Quaternion:", quaternion, "Length:", len(quaternion))
117 | # print("Linear acceleration:", linear_acceleration, "Length:", len(linear_acceleration))
118 | # print("Gravity:", gravity, "Length:", len(gravity))
119 |
120 |
121 | # # Integrate the acceleration to get the position
122 | # position = imu_visualizer.integrate_acceleration(acceleration)
123 | # print("Position:", position)
124 |
125 | imu_visualizer.update_visualization(euler=euler)
126 | time.sleep(imu_visualizer.dt)
127 |
128 | else:
129 | print("Resynchronizing...")
130 |
--------------------------------------------------------------------------------
/ToF/depth.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import ArducamDepthCamera as ac
4 | import open3d as o3d
5 |
6 | from lib.depth_utils import save_frame, get_intrinsic, create_frustum, convert_distance_to_zdepth, create_rgbd, filter_by_luminance, create_visualizer
7 |
8 | import os
9 | os.environ["LIBGL_ALWAYS_SOFTWARE"] = "1"
10 |
11 |
12 | def filter_buffer(buffer: np.ndarray, amplitude: np.ndarray) -> np.ndarray:
13 | buffer = np.nan_to_num(buffer)
14 | buffer[amplitude < amplitude_thres] = 0
15 | return buffer
16 |
17 | def on_amplitude_changed(value):
18 | global amplitude_thres
19 | amplitude_thres = value
20 |
21 |
22 | amplitude_thres = 20
23 | colormap_amplitude = False
24 |
25 | output_dir = "ToF/output/"
26 | os.makedirs(output_dir, exist_ok=True)
27 |
28 |
29 | def main(cam_id=0, frame_average=0, save_maps=False, max_depth=4000, fov=70, confidence=20):
30 | cam = ac.ArducamCamera()
31 |
32 | bilateralfilter = False
33 |
34 | ret = 0
35 | ret = cam.open(ac.TOFConnect.CSI, cam_id)
36 | if ret != 0:
37 | print("Failed to open camera. Error code:", ret)
38 | return
39 |
40 | ret = cam.start(ac.TOFOutput.DEPTH)
41 | if ret != 0:
42 | print("Failed to start camera. Error code:", ret)
43 | cam.close()
44 | return
45 |
46 | r = cam.getControl(ac.TOFControl.RANGE)
47 |
48 | info = cam.getCameraInfo()
49 | camera_intrinsic = get_intrinsic(shape=(info.height, info.width), fov=fov)
50 | print(f"Camera resolution: {info.width}x{info.height}")
51 |
52 | cv2.namedWindow("depth", cv2.WINDOW_AUTOSIZE)
53 | cv2.createTrackbar("amplitude", "depth", amplitude_thres, 255, on_amplitude_changed)
54 |
55 | # Open3D visualization
56 | vis = create_visualizer(pointsize=1.5)
57 |
58 | # Define the camera frustum
59 | frustum = create_frustum()
60 | vis.add_geometry(frustum)
61 |
62 | pcd = o3d.geometry.PointCloud()
63 | vis.add_geometry(pcd)
64 |
65 | exit_flag = False
66 |
67 | def exit_callback(vis):
68 | global exit_flag
69 | exit_flag = True
70 |
71 | vis.register_key_callback(ord("q"), exit_callback)
72 |
73 | frame = cam.requestFrame(2000)
74 | depth_mean = frame.getDepthData()
75 | amplitude_mean = frame.getConfidenceData()
76 | frame_count = 0
77 |
78 | while not exit_flag:
79 | frame = cam.requestFrame(2000)
80 | if frame is not None and isinstance(frame, ac.DepthData):
81 | depth = frame.getDepthData()
82 | amplitude_buf = frame.getConfidenceData()
83 |
84 | depth_mean = (depth_mean * frame_count + depth) / (frame_count + 1)
85 | amplitude_mean = (amplitude_mean * frame_count + amplitude_buf) / (frame_count + 1)
86 | frame_count += 1
87 |
88 | depth = filter_buffer(depth_mean, amplitude_mean)
89 | amplitude_buf = amplitude_mean
90 |
91 | # normalized amplitude buffer
92 | amplitude_clipped = np.clip(amplitude_buf, 0, np.percentile(amplitude_buf, 99))
93 | amplitude_normalized = cv2.normalize(amplitude_clipped, None, 0, 1, cv2.NORM_MINMAX)
94 | amplitude_normalized = (amplitude_normalized * 255.0).astype(np.uint8)
95 | cv2.imshow("amplitude", amplitude_normalized)
96 |
97 | # colorized amplitude
98 | if colormap_amplitude:
99 | amplitude_normalized = cv2.applyColorMap(amplitude_normalized, cv2.COLORMAP_VIRIDIS)
100 |
101 | # colorized depth buffer
102 | colorized_depth = (depth * (255.0 / r)).astype(np.uint8)
103 | colorized_depth = cv2.bitwise_not(colorized_depth)
104 | colorized_depth = filter_buffer(colorized_depth, amplitude_buf)
105 | cv2.imshow("depth", colorized_depth)
106 |
107 | if bilateralfilter:
108 | depth = cv2.bilateralFilter(depth, d=5, sigmaColor=75, sigmaSpace=75)
109 |
110 | # create point cloud
111 | zdepth = convert_distance_to_zdepth(depth, camera_intrinsic)
112 | rgbd_image = create_rgbd(amplitude_normalized, zdepth)
113 | current_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, camera_intrinsic)
114 |
115 | # Filter points based on amplitude
116 | current_pcd = filter_by_luminance(current_pcd, confidence)
117 |
118 | pcd.points = current_pcd.points
119 | pcd.colors = current_pcd.colors
120 |
121 | # Reset after some iterations
122 | if frame_count >= frame_average:
123 | # save 16bit depth and 8bit amplitude frames
124 | if save_maps:
125 | save_frame(amplitude_normalized, output_dir)
126 | save_frame(depth, output_dir, is_depth=True, max_depth=max_depth)
127 |
128 | # save point cloud
129 | pcd_path = os.path.join(output_dir, "pcd.ply")
130 | o3d.io.write_point_cloud(pcd_path, pcd, write_ascii=False)
131 |
132 | depth_mean = depth
133 | amplitude_mean = amplitude_buf
134 | frame_count = 0
135 |
136 | vis.update_geometry(pcd)
137 | vis.poll_events()
138 | vis.update_renderer()
139 | cam.releaseFrame(frame)
140 |
141 | key = cv2.waitKey(1)
142 | if key == ord("q"):
143 | break
144 |
145 | vis.destroy_window()
146 | cv2.destroyAllWindows()
147 |
148 |
149 | if __name__ == "__main__":
150 | main(cam_id=8, frame_average=20, save_maps=False)
151 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Sensorpack
2 |
3 | **! Work in progress !**
4 |
5 | a Raspberry Pi Camera Array containing depth, thermal and RGB cameras.
6 | The device contains a Pi Pico/Pico2 to preprocess the thermal sensor data. It might also receive a BNO055 absolute orientation sensor later.
7 |
8 | This is a personal project. I want to gain some experience with the sensors and their calibration and registration.
9 |
10 | I used a Pi5 as host since we need two CSI-ports, but you may also be successful with a CM4 or some CSI multiplexer.
11 |
12 | ## Hardware
13 |
14 |
15 |
16 | Devices:
17 | - Arducam ToF Camera ([info](https://www.arducam.com/time-of-flight-camera-raspberry-pi/))
18 | - Arducam IMX519 16MP Autofocus ([info](https://www.arducam.com/16mp-autofocus-camera-for-raspberry-pi/), [shop](https://www.arducam.com/product/imx519-autofocus-camera-module-for-raspberry-pi-arducam-b0371/))
19 | - Pimoroni MLX90640 Thermal Camera Breakout 55° ([shop](https://shop.pimoroni.com/products/mlx90640-thermal-camera-breakout?variant=12536948654163))
20 | - Raspberry Pi Pico / Pico 2
21 | - Raspberry Pi 5
22 |
23 | The enclosure is designed in 3ds Max and printed using Prusa Slicer in PETG for durability. project (max), exports (obj) and slicer (3mf) files are included.
24 |
25 |
26 |
27 | ## clone including submodules
28 |
29 | ```
30 | git clone https://github.com/LaserBorg/Sensorpack.git
31 | cd Sensorpack
32 | git submodule update --init --recursive
33 | ```
34 |
35 |
36 | ## build Arducam Pivariety camera driver to install ToF and IMX519
37 |
38 | this was the original forum thread from April 2024 where I tried to get ToF + 16MP running with in Bookworm:
39 | https://forum.arducam.com/t/installation-tof-camera-fails-on-bookworm-could-not-open-device-node-dev-video0/5883/29
40 |
41 | first, install Arducam Pivariety Driver for ToF as described ([troubleshooting](https://docs.arducam.com/Raspberry-Pi-Camera/Tof-camera/Troubleshooting/#4-cannot-be-used-on-raspberry-pi5)):
42 | https://docs.arducam.com/Raspberry-Pi-Camera/Tof-camera/Getting-Started/
43 |
44 | then install IMX519:
45 | https://docs.arducam.com/Raspberry-Pi-Camera/Native-camera/16MP-IMX519/
46 |
47 | With IMX519 at CSI-0 and ToF at CSI-1 attached, make sure your /boot/firmware/config.txt looks like this:
48 |
49 | dtoverlay=imx519, cam0
50 | dtoverlay=arducam-pivariety
51 |
52 | I used these commands to check if the cameras are connected:
53 |
54 | dmesg | grep arducam
55 | media-ctl -p
56 |
57 | my ToF camera is ID 8, which is required to initialize the camera driver.
58 |
59 | ## Thermal
60 | I modified existing implementations to send the image buffer via USB serial connection to a receiver script on the host computer.
61 | The submodule repo contains forks for Pico implementations running:
62 |
63 | - Circuitpython
64 | - Micropython
65 | - Pico SDK
66 | - Arduino.
67 |
68 | Currently I'm sticking with CircuitPython, simply for ease of use.
69 |
70 | #### Performance
71 | Andre Weinand's [Pico SDK implementation](https://github.com/weinand/thermal-imaging-camera) provides ~ 23 fps, but unfortunately I'm not fluent with C++ and Piko SDK.
72 |
73 | Micropython was ~4 fps and Circuitpython is even worse, so I I swapped the Pico for a Pico 2, which improved the performance a bit.
74 |
75 | ## Libcamera and Autofocus
76 |
77 | some Info about Libcamera commands:
78 | - https://docs.arducam.com/Raspberry-Pi-Camera/Native-camera/Libcamera-User-Guide/#for-arducam-16mp64mp-autofocus-camera
79 | - https://www.raspberrypi.com/documentation/computers/camera_software.html
80 |
81 | example rpicam (= libcamera) command for autofocus, fixed exposure and gain for IMX519:
82 |
83 | rpicam-still --autofocus-mode auto --autofocus-range normal --width 4656 --height 3496 --shutter 500000 --gain 2 -e png -o RGB/image.png
84 |
85 | ## Open3D
86 |
87 |
88 |
89 |
90 | #### point cloud rendering
91 |
92 | I tried to replicate the Arducam pointcloud example (C++) using Python and used the Open3D [visualization examples](https://www.open3d.org/html/python_example/visualization/index.html) as a reference.
93 |
94 | It works good so far, but I realized that the depth buffer contains raw distance readings, **which implies that its image plane is spherical, not planar**.
95 | Since (I think) Open3D doesn't support distortion coefficients, I tried using OpenCV to undistort the map (_cv2.fisheye.initUndistortRectifyMap_), but haven't calibrated the camera yet, so the necessary coefficients are unknown. **It'd be great if someone could support here.**
96 |
97 | #### OpenGL
98 | because Raspberry Pi only supports OpenGL ES which seems to be not compatible to Open3D, we need to switch to software rendering:
99 |
100 | import os
101 | os.environ["LIBGL_ALWAYS_SOFTWARE"] = "1"
102 |
103 | ## Alignment
104 |
105 |
106 |
107 | So far there is just a script that roughly aligns the resulting images using simple 2D position / rotation / scale operations.
108 | Next step should be proper calibration including distortions, then unwarping of the ToF cam, which seems to have no planar but spherical image plane.
109 |
110 | ## Joint Bilateral Upscaling
111 |
112 | I plan to upscale Depth (240x180) and Thermal (32x24) data using Joint Bilateral Upscaling (JBU) with RGB as reference. It is forked from [Martin Zurowietz's](https://github.com/mzur) implementation of Johannes Kopf's [publication](https://johanneskopf.de/publications/jbu/).
113 |
114 | the code is located in a separate repo:
115 | https://github.com/LaserBorg/pyJBU
116 |
117 | I used Cython, multiprocessing and single channel kernels (instead of RGB) to significantly improve execution speed.
118 |
119 | ## BNO055 Absolute Orientation Sensor
120 |
121 | I haven't integrated the sensor in the enclosure design yet, but a basic version of the sender code for CircuitPython (Pico) and the receiver script for the host (Pi5) is already provided for testing purposes.
--------------------------------------------------------------------------------
/alignment/imx519_calibration.json:
--------------------------------------------------------------------------------
1 | {
2 | "camera_matrix": [
3 | [
4 | 3522.8383507978074,
5 | 0.0,
6 | 2349.2008991192065
7 | ],
8 | [
9 | 0.0,
10 | 3545.645310359797,
11 | 1744.4353951423896
12 | ],
13 | [
14 | 0.0,
15 | 0.0,
16 | 1.0
17 | ]
18 | ],
19 | "distortion_coefficients": [
20 | [
21 | 0.09483031602837731,
22 | -0.32372924501149797,
23 | -0.0005473917868680918,
24 | 0.0013327380908414509,
25 | 0.30231761923881606
26 | ]
27 | ],
28 | "rotation_vectors": [
29 | [
30 | 0.2184478722800586,
31 | 0.025371534070473753,
32 | 1.6374791636240513
33 | ],
34 | [
35 | 0.2610353000419025,
36 | 0.1778456058684364,
37 | 1.5554141703553084
38 | ],
39 | [
40 | 0.025801273290273822,
41 | 0.02794285911814414,
42 | 1.6310887717318385
43 | ],
44 | [
45 | -0.23028199509788916,
46 | -0.2712707013672905,
47 | 1.5938522015905465
48 | ],
49 | [
50 | -0.037646625867693226,
51 | -0.17422102766347217,
52 | 1.557152213000911
53 | ],
54 | [
55 | -0.0013801836688718267,
56 | -0.14004144917401515,
57 | 1.5482886261820539
58 | ],
59 | [
60 | 0.029775638854273402,
61 | -0.11694255453658545,
62 | 1.5861545641132169
63 | ],
64 | [
65 | 0.421173843092505,
66 | -0.46654886281282804,
67 | 1.5232032973003324
68 | ],
69 | [
70 | 0.051107318987111204,
71 | -0.07124462781206249,
72 | 1.5551516319913743
73 | ],
74 | [
75 | -0.058062035105332935,
76 | -0.16313541263408196,
77 | 1.5240886334591766
78 | ],
79 | [
80 | 0.45266670552114835,
81 | 0.4139286923059039,
82 | 1.5274261995776344
83 | ],
84 | [
85 | 0.07463234892154494,
86 | -0.1817001415978214,
87 | 1.5645298054228618
88 | ],
89 | [
90 | 0.03817661515025648,
91 | -0.09283993426606292,
92 | 1.5703829700753218
93 | ],
94 | [
95 | 0.2017937031714222,
96 | 0.052106409667481306,
97 | 1.6086606138230097
98 | ],
99 | [
100 | -0.17438405779235822,
101 | -0.018977718279804646,
102 | 1.6246044871147602
103 | ],
104 | [
105 | 0.13143826301002232,
106 | 0.16381379130339602,
107 | 1.5130653383954045
108 | ],
109 | [
110 | 0.003062609932198246,
111 | 0.07301961533378011,
112 | 1.6081268632373462
113 | ],
114 | [
115 | 0.19843464864040938,
116 | 0.1486788962344926,
117 | 1.5430437759728186
118 | ],
119 | [
120 | 0.215667044822895,
121 | 0.16530065458245313,
122 | 1.5027350707005018
123 | ],
124 | [
125 | 0.10203265974392105,
126 | 0.14023552533000894,
127 | 1.531683237129066
128 | ],
129 | [
130 | 0.10252046878431142,
131 | 0.11850554910882019,
132 | 1.5831429923764448
133 | ],
134 | [
135 | 0.06855432572071371,
136 | 0.00031227722723335195,
137 | 1.562713264450092
138 | ],
139 | [
140 | -0.04654223565139739,
141 | -0.22352230238389953,
142 | 1.522245945182987
143 | ],
144 | [
145 | -0.05417831913155387,
146 | -0.1645900220708523,
147 | 1.5496440286903326
148 | ],
149 | [
150 | 0.07143384645986743,
151 | -0.03778526775503384,
152 | 1.6057270155756485
153 | ],
154 | [
155 | 0.03225210013118339,
156 | 0.17361740026601163,
157 | 1.5224868876025637
158 | ],
159 | [
160 | 0.05446016776425682,
161 | 0.18352103431101774,
162 | 1.537108184564017
163 | ],
164 | [
165 | 0.03831712798332153,
166 | 0.1081122353924485,
167 | 1.5580227588779478
168 | ],
169 | [
170 | 0.39486593392926167,
171 | 0.05617396519105121,
172 | 1.6405674879446959
173 | ],
174 | [
175 | -0.16759998423347178,
176 | -0.19123399702108423,
177 | 1.5825226412735327
178 | ],
179 | [
180 | 0.07925143721797046,
181 | 0.12109102969330333,
182 | 1.5508090054575845
183 | ],
184 | [
185 | 0.1679136716613228,
186 | -0.0544910556435319,
187 | 1.6475435608675746
188 | ],
189 | [
190 | 0.001799079448376279,
191 | -0.06474004892130594,
192 | 1.561796677057659
193 | ],
194 | [
195 | 0.09510663830983998,
196 | 0.11507172194671744,
197 | 1.5258044795200967
198 | ],
199 | [
200 | 0.4432820147741939,
201 | 0.33972653973200717,
202 | 1.5195585023999374
203 | ],
204 | [
205 | 0.07721181072090516,
206 | 0.0890081794172243,
207 | 1.5334002713459303
208 | ],
209 | [
210 | 0.0490773373498453,
211 | 0.0985650597311563,
212 | 1.5421681881010274
213 | ],
214 | [
215 | -0.0031322482704218718,
216 | 0.17584866004475372,
217 | 1.5406355484958318
218 | ],
219 | [
220 | -0.16420688924889973,
221 | -0.20551341445371724,
222 | 1.5899059742624735
223 | ],
224 | [
225 | 0.1284545585734674,
226 | 0.18849721448676046,
227 | 1.4879253470133966
228 | ],
229 | [
230 | -0.009239951277085589,
231 | -0.0351285862346868,
232 | 1.5742972513198061
233 | ],
234 | [
235 | 0.23460993791259752,
236 | -0.07452550208265098,
237 | 1.5911100906796116
238 | ],
239 | [
240 | 0.19045520196947047,
241 | 0.00863454358572551,
242 | 1.609070046943184
243 | ],
244 | [
245 | 0.19253895024765083,
246 | -0.00141157159038379,
247 | 1.6040564055121211
248 | ],
249 | [
250 | 0.21754671116786112,
251 | 0.04907965398805894,
252 | 1.6058616561207093
253 | ],
254 | [
255 | 0.042493366841346684,
256 | 0.010342802820209204,
257 | 1.5600366721909116
258 | ],
259 | [
260 | 0.05194699400974186,
261 | 0.07410705751996503,
262 | 1.529456735677103
263 | ],
264 | [
265 | 0.07926650596333487,
266 | 0.13666910303035165,
267 | 1.5363448513694697
268 | ],
269 | [
270 | -0.09945095615544462,
271 | 0.060090477521591974,
272 | 1.5341148591869411
273 | ]
274 | ],
275 | "translation_vectors": [
276 | [
277 | -9.808648452424526,
278 | -11.446794835895679,
279 | 27.413883476436755
280 | ],
281 | [
282 | 9.44524203934138,
283 | 1.4707982501359256,
284 | 16.928716743630186
285 | ],
286 | [
287 | -1.9207558714095556,
288 | 4.074386108043331,
289 | 20.788384405278162
290 | ],
291 | [
292 | -17.240920922532418,
293 | -14.699509131789101,
294 | 44.050749195207565
295 | ],
296 | [
297 | 18.659205316689167,
298 | 0.7130080381105881,
299 | 32.369130926331
300 | ],
301 | [
302 | 6.572944216972625,
303 | -11.263221311017492,
304 | 28.28779044795826
305 | ],
306 | [
307 | 17.39201818590344,
308 | -7.652782020841271,
309 | 30.06673842102489
310 | ],
311 | [
312 | 3.9615220257617576,
313 | -3.32708518027881,
314 | 10.010282719109656
315 | ],
316 | [
317 | 17.345948018292084,
318 | -12.748841142581202,
319 | 29.387710889377693
320 | ],
321 | [
322 | 10.519727588536874,
323 | -12.45720785990421,
324 | 29.59310835719692
325 | ],
326 | [
327 | 5.111394168414121,
328 | -2.2991965768595914,
329 | 10.307093382179003
330 | ],
331 | [
332 | -16.59601373343999,
333 | -17.688693115628993,
334 | 41.54907922439491
335 | ],
336 | [
337 | 18.489049887250083,
338 | -12.212870365803628,
339 | 29.490692123886365
340 | ],
341 | [
342 | -8.80834375882944,
343 | -10.581750288779476,
344 | 26.954706194575405
345 | ],
346 | [
347 | -14.163238167079712,
348 | -3.428230379781759,
349 | 40.59255143727711
350 | ],
351 | [
352 | -13.199224558923062,
353 | 10.196551505941473,
354 | 33.955943105136825
355 | ],
356 | [
357 | 7.7366327385597815,
358 | -11.207829106875156,
359 | 28.01317753383583
360 | ],
361 | [
362 | 11.530794814290404,
363 | 3.7524836851697025,
364 | 20.980743257638377
365 | ],
366 | [
367 | 17.460690176063288,
368 | 8.490117190601628,
369 | 30.018296957683358
370 | ],
371 | [
372 | 11.748689767670255,
373 | 3.9662487012100485,
374 | 20.883711825414878
375 | ],
376 | [
377 | -12.210913594625808,
378 | 10.474455294589884,
379 | 33.735201627419364
380 | ],
381 | [
382 | 18.599750132163983,
383 | 5.632856634943239,
384 | 31.193172216803553
385 | ],
386 | [
387 | -6.861539488453525,
388 | -18.81117650893643,
389 | 43.302467666961505
390 | ],
391 | [
392 | 8.730676894583931,
393 | -21.130034057316944,
394 | 47.740135675821385
395 | ],
396 | [
397 | -3.043842709955344,
398 | -7.663002616530264,
399 | 19.737247268281816
400 | ],
401 | [
402 | 13.955700191071315,
403 | 9.349590430215642,
404 | 32.65256488150614
405 | ],
406 | [
407 | 6.002602166259428,
408 | 8.806327136876936,
409 | 32.377449679113106
410 | ],
411 | [
412 | -4.153737810607182,
413 | 3.660884048189035,
414 | 20.65407472677772
415 | ],
416 | [
417 | 6.808987447376655,
418 | -10.801527848202237,
419 | 24.215937263570662
420 | ],
421 | [
422 | 3.0654145315469035,
423 | -1.168234532935017,
424 | 11.253852400381096
425 | ],
426 | [
427 | -4.190749539603307,
428 | 3.619699426418464,
429 | 20.31692913829535
430 | ],
431 | [
432 | -1.3520453602623486,
433 | -10.828653918640601,
434 | 27.1546061621024
435 | ],
436 | [
437 | 21.12210302060236,
438 | -21.148668961162677,
439 | 47.620398817962084
440 | ],
441 | [
442 | 12.960833133457411,
443 | 4.150193173637869,
444 | 21.2725406909654
445 | ],
446 | [
447 | 4.885085200821552,
448 | -3.8900291132586653,
449 | 10.033251429144478
450 | ],
451 | [
452 | -13.880571448997284,
453 | 4.924915875741314,
454 | 36.494677265063615
455 | ],
456 | [
457 | -14.733905225614409,
458 | 4.666071439463112,
459 | 36.732691525173536
460 | ],
461 | [
462 | 17.892449519391587,
463 | 8.570836148959334,
464 | 31.026589076911048
465 | ],
466 | [
467 | 2.825037150313135,
468 | -1.1642611955703008,
469 | 11.757706019999015
470 | ],
471 | [
472 | -6.44484828056842,
473 | 10.145151573251187,
474 | 33.96501049424585
475 | ],
476 | [
477 | 29.48620892450574,
478 | -21.49360405862826,
479 | 47.33567388494885
480 | ],
481 | [
482 | -2.270297959012244,
483 | -7.076003985612339,
484 | 19.715705751291985
485 | ],
486 | [
487 | -6.411615865668551,
488 | -9.874692009168065,
489 | 24.68309285305613
490 | ],
491 | [
492 | -7.345848241255197,
493 | -9.921085062558635,
494 | 25.078393636780326
495 | ],
496 | [
497 | -4.293564159754064,
498 | -7.975432075496114,
499 | 21.295694490866772
500 | ],
501 | [
502 | 19.90421364053495,
503 | 1.6827863856722185,
504 | 31.909024910370373
505 | ],
506 | [
507 | 2.432453844961699,
508 | -0.5054322397434713,
509 | 11.520014696151451
510 | ],
511 | [
512 | -12.35076051404047,
513 | 10.208645270309084,
514 | 33.978516935770294
515 | ],
516 | [
517 | -16.818952550327086,
518 | -3.928779018010658,
519 | 40.97241544828305
520 | ]
521 | ]
522 | }
--------------------------------------------------------------------------------