├── LICENSE
├── README.md
├── docs
├── alphasense_setup.md
├── jetson_setup.md
└── raw_image_pipeline.png
├── jenkins-pipeline
├── raw_image_pipeline
├── CMakeLists.txt
├── config
│ ├── alphasense_calib_1.6mp_example.yaml
│ ├── alphasense_calib_example.yaml
│ ├── alphasense_color_calib_example.yaml
│ └── pipeline_params_example.yaml
├── include
│ └── raw_image_pipeline
│ │ ├── modules
│ │ ├── color_calibration.hpp
│ │ ├── color_enhancer.hpp
│ │ ├── debayer.hpp
│ │ ├── flip.hpp
│ │ ├── gamma_correction.hpp
│ │ ├── undistortion.hpp
│ │ ├── vignetting_correction.hpp
│ │ └── white_balance.hpp
│ │ ├── raw_image_pipeline.hpp
│ │ └── utils.hpp
├── package.xml
└── src
│ └── raw_image_pipeline
│ ├── modules
│ ├── color_calibration.cpp
│ ├── color_enhancer.cpp
│ ├── debayer.cpp
│ ├── flip.cpp
│ ├── gamma_correction.cpp
│ ├── undistortion.cpp
│ ├── vignetting_correction.cpp
│ └── white_balance.cpp
│ └── raw_image_pipeline.cpp
├── raw_image_pipeline_python
├── CMakeLists.txt
├── config
│ ├── board_gt.png
│ ├── color_calibration_checker.pdf
│ ├── color_calibration_checker.svg
│ └── color_ref.jpg
├── package.xml
├── scripts
│ ├── apply_pipeline.py
│ └── color_calibration.py
├── setup.py
├── src
│ ├── py_raw_image_pipeline
│ │ └── __init__.py
│ └── raw_image_pipeline_python.cpp
└── thirdparty
│ └── cvnp
│ ├── .github
│ └── workflows
│ │ └── ubuntu.yml
│ ├── .gitignore
│ ├── CMakeLists.txt
│ ├── LICENSE
│ ├── Readme.md
│ ├── conanfile.txt
│ ├── conanfile_pybind_only.txt
│ ├── cvnp
│ ├── cvnp.cpp
│ ├── cvnp.h
│ ├── cvnp_shared_mat.h
│ ├── cvnp_synonyms.cpp
│ ├── cvnp_synonyms.h
│ ├── cvnp_test_helper.cpp
│ └── pydef_cvnp.cpp
│ ├── main
│ └── cvnp_main.cpp
│ ├── requirements.txt
│ └── tests
│ ├── test_cvnp.py
│ └── test_cvnp_cpp.cpp
├── raw_image_pipeline_ros
├── CMakeLists.txt
├── config
│ └── rqt
│ │ └── alphasense.perspective
├── include
│ └── raw_image_pipeline_ros
│ │ └── raw_image_pipeline_ros.hpp
├── launch
│ └── raw_image_pipeline_node.launch
├── nodelet_plugins.xml
├── package.xml
└── src
│ ├── raw_image_pipeline_ros.cpp
│ ├── raw_image_pipeline_ros_node.cpp
│ └── raw_image_pipeline_ros_nodelet.cpp
└── raw_image_pipeline_white_balance
├── CMakeLists.txt
├── LICENSE
├── cfg
└── RawImagePipelineWhiteBalance.cfg
├── data
├── alphasense.png
└── gehler_shi.png
├── include
└── raw_image_pipeline_white_balance
│ └── convolutional_color_constancy.hpp
├── model
└── default.bin
├── package.xml
└── src
├── raw_image_pipeline_white_balance
└── convolutional_color_constancy.cpp
└── raw_image_pipeline_white_balance_node.cpp
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021-2023 ETH Zurich - Robotic Systems Lab (RSL)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RAW Image Pipeline
2 |
3 | Image processing utilities used for cameras that provide RAW data, such as the Alphasense Core unit.
4 |
5 | **Maintainers:** Matias Mattamala (matias@robots.ox.ac.uk)
6 |
7 | **Contributors:** Matias Mattamala, Timon Homberger, Marco Tranzatto, Samuel Zimmermann, Lorenz Wellhausen, Shehryar Khattak, Gabriel Waibel
8 |
9 |
10 |
11 |
12 | ## License
13 | This source code is released under a [MIT License](LICENSE).
14 |
15 | `raw_image_pipeline_white_balance` relies on Shane Yuan's [AutoWhiteBalance package](https://github.com/yuanxy92/AutoWhiteBalance) licensed under [GNU](raw_image_pipeline_white_balance/LICENSE).
16 |
17 | `raw_image_pipeline_python` relies on Pascal Thomet's [cvnp](https://github.com/pthom/cvnp), licensed under [MIT](raw_image_pipeline_python/thirdparty/cvnp/LICENSE) as well.
18 |
19 |
20 | ## Build
21 |
22 | [](https://ci.leggedrobotics.com/job/bitbucket_leggedrobotics/job/alphasense_rsl/job/master/)
23 |
24 | ## Overview
25 | ### Packages
26 | 1. **`raw_image_pipeline`**: ROS-independent implementation of the pipeline.
27 | 2. **`raw_image_pipeline_python`**: Python bindings for `raw_image_pipeline`.
28 | 3. **`raw_image_pipeline_ros`**: ROS interface to run the processing pipeline.
29 | 4. **`raw_image_pipeline_white_balance`**: Additional white balance algorithm built upon Shane Yuan's [code](https://github.com/yuanxy92/AutoWhiteBalance), based on Barron's ([1](https://arxiv.org/abs/1507.00410), [2](https://arxiv.org/abs/1611.07596)).
30 |
31 | ## Pipeline
32 |
33 | The package implements diferent modules that are chained together to process the RAW images. Each can be disabled and the image will be processed by the subsequent modules.
34 | * **Debayer**: `auto`, `bayer_bggr8`, `bayer_gbrg8`, `bayer_grbg8`, `bayer_rggb8`
35 | * **Flip**: Flips the image 180 degrees
36 | * **White balance**: `simple`, `grey_world`, `learned` (from [OpenCV](https://docs.opencv.org/4.x/df/db9/namespacecv_1_1xphoto.html)), `ccc` (from `raw_image_pipeline_white_balance` package), `pca` (custom implementation)
37 | * **Color correction**: Simple color correction based on a mixing BGR 3x3 matrix.
38 | * **Gamma correction**: `default` (from OpenCV), `custom` (custom implementation)
39 | * **Vignetting correction**: Removes the darkening effect of the lens toward the edges of the image by applying a polynomial mask.
40 | * **Color enhancement**: Converts the image to HSV and applies a gain to the S (saturation) channel.
41 | * **Undistortion**: Corrects the image given the camera calibration file.
42 |
43 |
44 |
45 |
46 |
47 | Detailed modules description
48 |
49 |
50 | ### Debayer Module
51 | This module demosaics a Bayer-encoded image into BGR values (following OpenCV's convention). It relies on OpenCV's methods for both CPU and GPU.
52 |
53 | **Parameters**
54 | - `debayer/enabled`: Enables the module. `True` by default
55 | - `debayer/encoding`: Encoding of the incoming image. `auto` uses the encoding field of the ROS message
56 | - Values: `auto`, `bayer_bggr8`, `bayer_gbrg8`, `bayer_grbg8`, `bayer_rggb8`
57 |
58 | ### Flip
59 | This flips the image 180 degrees. Just that.
60 |
61 | **Parameters**
62 | - `debayer/enabled`: Enables the module. `False` by default
63 |
64 | ### White Balance
65 | It automatically corrects white balance using different available algorithms.
66 |
67 | **Parameters**
68 | - `white_balance/enabled`: Enables the module. `False` by default
69 | - `white_balance/method`: To select the method used for automatic white balance
70 | - `simple`: from [OpenCV](https://docs.opencv.org/4.x/df/db9/namespacecv_1_1xphoto.html). Tends to saturate colors depending on the clipping percentile.
71 | - `grey_world`: from [OpenCV](https://docs.opencv.org/4.x/df/db9/namespacecv_1_1xphoto.html)
72 | - `learned`: from [OpenCV](https://docs.opencv.org/4.x/df/db9/namespacecv_1_1xphoto.html)
73 | - `ccc`: from `raw_image_pipeline_white_balance` package
74 | - `pca`: custom implementation
75 | - `white_balance/clipping_percentile`: Used in `simple` method
76 | - Values: between 0 and 100
77 | - `white_balance/saturation_bright_thr`: Used in `grey_world`, `learned` and `ccc` methods
78 | - Values: Between 1.0 and 0.0. E.g. 0.8 means that all the values above 0.8*255 (8 bit images) are discarded for white balance determination.
79 | - `white_balance/clipping_percentile`: Used in `simple` method
80 | - Values: Between 1.0 and 0.0. E.g. 0.2 means that all the values below 0.2*255 (8 bit images) are discarded.
81 | - `white_balance/temporal_consistency`: Only for `ccc`. `False` by default. It uses a Kalman filter to smooth the illuminant estimate.
82 |
83 | ### Color calibration
84 | It applies a fixed affine transformation to each BGR pixel to mix and add a bias term to the color channels. It can be a replacement for the white balance module.
85 |
86 | - `color_calibration/enabled`: Enables the module. `False` by default
87 | - `color_calibration/calibration_file`: A YAML file with the color calibration matrix and bias. Example [file](raw_image_pipeline/config/alphasense_color_calib_example.yaml). This file can be obtained using a script in the `raw_image_pipeline_python` package: [`color_calibration.py`](raw_image_pipeline_python/scripts/color_calibration.py). To run it, we require a set of images capturing a calibration board ([example](raw_image_pipeline_python/config/color_calibration_checker.pdf)): a reference image `ref.png` ([example](raw_image_pipeline_python/config/color_ref.jpg)) and a collection of images from the camera to be calibrated. The usage is:
88 |
89 | ```sh
90 | color_calibration.py [-h] -i INPUT -r REF [-o OUTPUT_PATH] [-p PREFIX] [--loss LOSS] [--compute_bias]
91 |
92 | Performs color calibration between 2 images, using ArUco 4X4
93 |
94 | optional arguments:
95 | -h, --help show this help message and exit
96 | -i INPUT, --input INPUT
97 | Input image (to be calibrated), or folder with reference images
98 | -r REF, --ref REF Reference image to perform the calibration
99 | -o OUTPUT_PATH, --output_path OUTPUT_PATH
100 | Output path to store the file. Default: current path
101 | -p PREFIX, --prefix PREFIX
102 | Prefix for the calibration file. Default: none
103 | --loss LOSS Loss used in the optimization. Options: 'linear', 'soft_l1', 'huber', 'cauchy', 'arctan'
104 | --compute_bias If bias should be computed
105 | ```
106 |
107 | > :warning: This feature is experimental and it is not recommended for 'serious' applications.
108 |
109 | ### Gamma correction
110 | It applies a gamma correction to improve illumination.
111 |
112 | **Parameters**
113 | - `gamma_correction/enabled`: Enables the module. `False` by default
114 | - `gamma_correction/method`: To select the method used for automatic white balance
115 | - `default`: correction from OpenCV (CUDA only)
116 | - `custom`: a custom implementation based on a look-up table.
117 | - `gamma_correction/k`: Gamma factor: >1 is a forward gamma correction that makes the image darker; <1 is an inverse correction that increases brightness.
118 | ### Vignetting correction
119 | It applies a polynomial illumination compensation to overcome the barrel effect of wide-angle lenses:
120 | `s * (r^2 * a2 + r^4 * a4)`, with `r` the distance to the image center.
121 |
122 | **Parameters**
123 | - `vignetting_correction/enabled`: Enables the module. `False` by default
124 | - `vignetting_correction/scale`: s value
125 | - `vignetting_correction/a2`: 2nd-order factor
126 | - `vignetting_correction/a4`: 4th-order factor
127 |
128 | > :warning: This feature is experimental and it is not recommended for 'serious' applications.
129 |
130 | ### Color enhancement
131 | It increases the saturation of the colors by transforming the image to HSV and applying a linear gain.
132 |
133 | **Parameters**
134 | - `vignetting_correction/enabled`: Enables the module. `False` by default
135 | - `vignetting_correction/saturation_gain`: A factor to increase the saturation channel of the HSV channel.
136 |
137 |
138 | ### Undistortion
139 | It undistorts the image follow a pinhole model. It requires the intrinsic calibration from Kalibr.
140 |
141 | - `color_calibration/enabled`: Enables the module. `False` by default
142 | - `color_calibration/calibration_file`: Camera calibration from Kalibr, following the format of the [example file](raw_image_pipeline/config/alphasense_calib_1.6mp_example.yaml)
143 |
144 |
145 |
146 |
147 | ## Requirements and compilation
148 | ### Dependencies
149 |
150 | ```sh
151 | sudo apt install libyaml-cpp-dev
152 | ```
153 |
154 | ```sh
155 | cd ~/git
156 | git clone git@github.com:catkin/catkin_simple.git
157 | git clone git@github.com:ethz-asl/glog_catkin.git
158 | git clone git@github.com:leggedrobotics/pybind11_catkin.git
159 | cd ~/catkin_ws/src
160 | ln -s ../../git/catkin_simple .
161 | ln -s ../../git/glog_catkin .
162 | ln -s ../../git/pybind11_catkin .
163 | ```
164 |
165 | If you need CUDA support, you need to build OpenCV with CUDA. Check the [instructions below](#cuda-support)
166 |
167 | ### Build raw_image_pipeline_ros
168 |
169 | To build the ROS package:
170 | ```sh
171 | catkin build raw_image_pipeline_ros
172 | ```
173 |
174 | If you also need the Python bindings, run:
175 | ```sh
176 | catkin build raw_image_pipeline_python
177 | ```
178 |
179 | ### CUDA support
180 | If you are using a Jetson or another GPU-enabled computer and want to exploit the GPU, you need to compile OpenCV with CUDA support. Clone the [opencv_catkin](https://github.com/ori-drs/opencv_catkin) package, which setups **OpenCV 4.2 by default**.
181 | ```sh
182 | cd ~/git
183 | git clone git@github.com:ori-drs/opencv_catkin.git
184 | cd ~/catkin_ws/src
185 | ln -s ../../git/opencv_catkin .
186 | cd ~/catkin_ws
187 | ```
188 |
189 | > :warning: Before compiling, you need to confirm the _compute capability_ of your NVidia GPU, which you can check in [this website](https://developer.nvidia.com/cuda-gpus) or the [CUDA wikipedia page](https://en.wikipedia.org/wiki/CUDA).
190 |
191 | #### Compilation on Jetson Xavier board (compute capability 7.2)
192 | ```
193 | catkin build opencv_catkin --cmake-args -DCUDA_ARCH_BIN=7.2
194 | source devel/setup.bash
195 | ```
196 |
197 | #### Compilation on Jetson Orin board (compute capability 8.7)
198 | ```
199 | catkin build opencv_catkin --cmake-args -DCUDA_ARCH_BIN=8.7
200 | source devel/setup.bash
201 | ```
202 |
203 | #### Compilation on other platforms (e.g. laptops, desktops)
204 | There are some extra considerations if you plan to compile OpenCV with CUDA in your working laptop/desktop:
205 | 1. **Compute capability may be different for your GPU:** Please check the aforementioned websites to set the flag correctly.
206 | 2. **The opencv_catkin default flags are the minimum:** Graphical support libraries (such as GTK) are disabled, so you cannot use methods such as `cv::imshow`. If you want to enable it, you can check the flags in the CMakeLists of `opencv_catkin`
207 | 3. **Default OpenCV version is 4.2:** The package installs by default OpenCV 4.2, which was the version compatible with ROS melodic. This can be changed by modyfing the CMakeLists of `opencv_catkin` as well.
208 |
209 | OpenCV's compilation will take a while - get a coffee in the meantime. When it's done, you can rebuild `raw_image_pipeline_ros`.
210 |
211 |
212 | #### Troubleshooting
213 | * If you get errors due to `glog`, remove `glog_catkin`, compile `opencv_catkin` using the system's glog, and _then_ build `raw_image_pipeline_ros` (which will compile `glog_catkin`)
214 | * If OpenCV fails due to CUDA errors, confirm that you compiled using the right compute capability for your GPU.
215 | * If you are using older versions of CUDA (10.x and before), they may require older GCC versions. For example, to use GCC 7 you can use:
216 | ``` sh
217 | catkin build opencv_catkin --cmake-args -DCUDA_ARCH_BIN= -DCMAKE_C_COMPILER=/usr/bin/gcc-7
218 | ```
219 |
220 | ## Run the node
221 | To run, we use the same launch file as before:
222 | ```sh
223 | roslaunch raw_image_pipeline_ros raw_image_pipeline_node.launch
224 | ```
225 |
226 | This launchfile was setup for Alphasense cameras. The parameters can be inspected in the [launch file itself](raw_image_pipeline_ros/launch/raw_image_pipeline_node.launch).
227 |
228 | ## Alphasense-specific info
229 | ### Setup
230 | Please refer to [**Alphasense Setup**](docs/alphasense_setup.md) for the instructions to setup the host PC where the Alphasense will be connected.
231 | For further information you can refer the [official manual](https://github.com/sevensense-robotics/alphasense_core_manual/).
232 |
--------------------------------------------------------------------------------
/docs/alphasense_setup.md:
--------------------------------------------------------------------------------
1 | # Alphasense software setup (host PC)
2 |
3 | The instructions below must be executed in the host PC (i.e, the PC where you will connect the Alphasense).
4 |
5 |
6 | ## Install Sevensense's Alphasense drivers
7 |
8 | The drivers you install in the host PC must match the firmware version on Alphasense you're using. If not, you'll get:
9 |
10 | 1. Host errors when trying to launch the drivers (usually because the driver is older than the firmware)
11 | 2. Firmware errors asking to update it (when the firmware is older than the drivers)
12 |
13 | Bear this in mind before installing the drivers on your host PC.
14 |
15 | ### Latest version of drivers:
16 |
17 | ```sh
18 | curl -Ls https://deb.7sr.ch/pubkey.gpg | sudo apt-key add -
19 | echo "deb [arch=amd64] https://deb.7sr.ch/alphasense focal main" | sudo tee /etc/apt/sources.list.d/sevensense.list
20 | sudo apt update
21 | sudo apt install alphasense-driver-core alphasense-firmware ros-noetic-alphasense-driver-ros ros-noetic-alphasense-driver
22 | echo -e "net.core.rmem_max=11145728" | sudo tee /etc/sysctl.d/90-increase-network-buffers.conf
23 | ```
24 |
25 | ### SubT/Glimpse-compatible drivers:
26 | We need to change the PPA to `alphasense/1.0` and manually install the compatible version:
27 |
28 | ```sh
29 | curl -Ls https://deb.7sr.ch/pubkey.gpg | sudo apt-key add -
30 | echo "deb [arch=amd64] https://deb.7sr.ch/alphasense/1.0 focal main" | sudo tee /etc/apt/sources.list.d/sevensense.list
31 | sudo apt update
32 | sudo apt install alphasense-driver-core=1.7.2-0~build83~focal \
33 | alphasense-firmware=1.7.2-0~build82~focal \
34 | ros-noetic-alphasense-driver-ros=1.7.2-0~build81~focal \
35 | ros-noetic-alphasense-driver=1.7.2-0~build79~focal
36 | echo -e "net.core.rmem_max=11145728" | sudo tee /etc/sysctl.d/90-increase-network-buffers.conf
37 | ```
38 |
39 | ### Alphasense viewer
40 |
41 | If you also need the alphasense viewer:
42 |
43 | ### Latest
44 | ```sh
45 | sudo apt install alphasense-viewer
46 | ```
47 |
48 | ### SubT/Glimpse
49 | ```sh
50 | sudo apt install alphasense-viewer=1.7.2-0~build81~focal
51 | ```
52 |
53 | ## Network configuration
54 |
55 | ### Identify the ethernet interface
56 | Identify the ethernet interface (we will call it `INTERFACE_NAME_ALPHASENSE` onwards) you'll use to connect to the Alphasense. e.g `enp4s0`, `eno0`, `eth0`
57 |
58 | ### PTP and PHC2SYS Configurations
59 |
60 | Instructions on how to setup PTP time sync; steps to be executed on NPC:
61 |
62 | ```
63 | sudo apt install linuxptp ethtool
64 | ```
65 |
66 | Modify `/etc/linuxptp/ptp4l.conf` and
67 | (A) Change line regarding `clockClass` to:
68 | ```
69 | clockClass 128
70 | ```
71 |
72 | (B) append the following lines at the end of the file, replacing `INTERFACE_NAME_ALPHASENSE` by the interface name:
73 | ```sh
74 | # RSL Customization
75 | boundary_clock_jbod 1
76 | [INTERFACE_NAME_ALPHASENSE]
77 | ```
78 |
79 | Example:
80 | ```sh
81 | # RSL Customization
82 | boundary_clock_jbod 1
83 | [enp4s0]
84 | ```
85 |
86 | Create a systemd drop-in directory to override the system service file:
87 | ```
88 | sudo mkdir -p /etc/systemd/system/ptp4l.service.d
89 | ```
90 |
91 | Create a file at `/etc/systemd/system/ptp4l.service.d/override.conf` with the following contents:
92 | ```sh
93 | [Service]
94 | ExecStart=
95 | ExecStart=/usr/sbin/ptp4l -f /etc/linuxptp/ptp4l.conf
96 | ```
97 |
98 | Restart the ptp4l service so the change takes effect:
99 | ```sh
100 | sudo systemctl daemon-reload
101 | sudo systemctl restart ptp4l
102 | sudo systemctl status ptp4l
103 | ```
104 |
105 | Enable the ptp4l service:
106 | ```sh
107 | sudo systemctl enable ptp4l.service
108 | ```
109 |
110 | ## Alphasense - phc2sys
111 | Create a systemd drop-in directory to override the system service file:
112 | ```sh
113 | sudo mkdir -p /etc/systemd/system/phc2sys.service.d
114 | ```
115 |
116 | Create a file at `/etc/systemd/system/phc2sys.service.d/override.conf` with the following contents, replacing `INTERFACE_NAME_ALPHASENSE` by the interface name:
117 | ```sh
118 | [Service]
119 | ExecStart=
120 | ExecStart=/usr/sbin/phc2sys -c INTERFACE_NAME_ALPHASENSE -s CLOCK_REALTIME -w -O 0
121 | ```
122 |
123 | Restart the phc2sys service so the change takes effect:
124 | ```sh
125 | sudo systemctl daemon-reload
126 | sudo systemctl restart phc2sys
127 | sudo systemctl status phc2sys
128 | ```
129 |
130 | Enable the phc2sys service:
131 | ```sh
132 | sudo systemctl enable phc2sys.service
133 | ```
134 |
135 | ## Recommendations
136 | ### Maximize Network Performance
137 | Follow instructions at: [maximize_network_performance.md](https://github.com/sevensense-robotics/alphasense_core_manual/blob/master/pages/maximize_network_performance.md)
138 |
139 | Hints:
140 | To permanently set 'mtu' to 7260 (see `mtu: XXX`) of ethernet interface where alphasense is connected, netplan config is modified as follow:
141 |
142 | ```sh
143 | network:
144 | version: 2
145 | renderer: networkd
146 | ethernets:
147 | enp4s0:
148 | dhcp4: no
149 | mtu: XXX # this is where you can set mtu in netplan config file
150 | addresses: [192.168.20.1/24]
151 | ```
152 |
153 | ### Increase Network Buffer
154 | ```sh
155 | echo -e "net.core.rmem_max=11145728" | sudo tee /etc/sysctl.d/90-increase-network-buffers.conf
156 | ```
157 | Above command taken from "getting started" github [page](https://github.com/sevensense-robotics/alphasense_core_manual/blob/master/pages/getting_started.md#1-launching-the-standalone-viewer).
158 |
159 | Settings for ANYmal Cerberus (23/11/2020)
160 | ```sh
161 | - net.core.netdev_budget=600
162 | - net.core.netdev_max_backlog=2000
163 | - mtu 7260
164 | - net.core.rmem_max=11145728
165 | ```
166 |
167 | ### Time Synchronization
168 | The settings for [time_synchronization.md](https://github.com/sevensense-robotics/alphasense_core_manual/blob/master/pages/time_synchronization.md) over PTP are included in the section [PTP and PHC2SYS Configurations](###PTP-and-PHC2SYS-Configurations).
169 |
--------------------------------------------------------------------------------
/docs/jetson_setup.md:
--------------------------------------------------------------------------------
1 | > :warning: **The instructions below are not recommended. Use the [`opencv_catkin`](../README.md##requirements-and-compilation) approach instead.**
2 |
3 |
4 | ## Setting up the Jetson
5 |
6 | Instructions on how to set up the Jetson for use with alphasense_rsl resources
7 |
8 | ### Dependencies
9 |
10 | ```sh
11 | sudo apt install ros-melodic-eigen-conversions
12 | ```
13 |
14 | ### Compilation and installation of xphoto from OpenCV 4.2.0
15 |
16 | Note: This is needed if compiling `debayer_cuda` on Jetson machines, that have Jetpack 4.5 with ROS Melodic installed (OpenCV version must be > 4.2.0).
17 |
18 | #### Requirements
19 |
20 | ```sh
21 | git clone git@github.com:opencv/opencv_contrib.git -b 4.2.0
22 | git clone git@github.com:opencv/opencv.git -b 4.2.0
23 | ```
24 |
25 | #### Compilation and installation
26 | This installs only xphoto and its dependencies in `.local_opencv_xphoto`
27 |
28 | ```sh
29 | $ cd ~/git
30 | $ ./git_make_shared.sh
31 | $ mkdir ~/git/opencv/build && cd ~/git/opencv/build
32 | $ mkdir ~/.local_opencv_xphoto
33 | $ cmake -DOPENCV_EXTRA_MODULES_PATH=~/git/opencv_contrib/modules -DBUILD_LIST=xphoto -DCMAKE_INSTALL_PREFIX=~/.local_opencv_xphoto -DENABLE_PRECOMPILED_HEADERS=OFF ..
34 | $ make -j5
35 | $ make install
36 | ```
37 |
38 | Then set the variable `LOCAL_OPENCV_INSTALL` in CMakeLists.txt of `debayer_cuda` to true.
--------------------------------------------------------------------------------
/docs/raw_image_pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leggedrobotics/raw_image_pipeline/556c315a256061ef11bf52a0a74a87736cffe905/docs/raw_image_pipeline.png
--------------------------------------------------------------------------------
/jenkins-pipeline:
--------------------------------------------------------------------------------
1 | library 'continuous_integration_pipeline'
2 | ciPipeline("--environment anymal --cmake-args '-DCMAKE_BUILD_TYPE=Release -DCUDA_ARCH_BIN=7.2' --dependencies 'git@github.com:ori-drs/opencv_catkin.git;master'")
--------------------------------------------------------------------------------
/raw_image_pipeline/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | # Licensed under the MIT license. See LICENSE file in the project root for details.
4 | #
5 |
6 | cmake_minimum_required(VERSION 3.1)
7 | project(raw_image_pipeline)
8 |
9 | set(CMAKE_CXX_STANDARD 14)
10 | set(CMAKE_CXX_FLAGS "-std=c++14 -Wall -Wextra -O3 -DNDEBUG")
11 |
12 | set(PACKAGE_DEPENDENCIES
13 | eigen_conversions
14 | raw_image_pipeline_white_balance
15 | pybind11_catkin
16 | )
17 |
18 | find_package(catkin REQUIRED
19 | COMPONENTS
20 | ${PACKAGE_DEPENDENCIES}
21 | )
22 | # YAML-cpp
23 | find_package(yaml-cpp REQUIRED)
24 |
25 | find_package(Eigen3 REQUIRED)
26 | message("Eigen Version:" ${EIGEN3_VERSION_STRING})
27 | message("Eigen Path:" ${Eigen3_DIR})
28 |
29 | find_package(OpenCV REQUIRED)
30 | message("OpenCV version: ${OpenCV_Version}")
31 | message("OpenCV path: ${OpenCV_DIR}")
32 |
33 | # Check if OpenCV was compiled with CUDA
34 | if(${OpenCV_CUDA_VERSION})
35 | add_definitions (-DHAS_CUDA)
36 | find_package(CUDA)
37 | set(CUDA_npp_LIBRARIES
38 | ${CUDA_nppc_LIBRARY}
39 | ${CUDA_nppial_LIBRARY}
40 | ${CUDA_nppicc_LIBRARY}
41 | ${CUDA_nppidei_LIBRARY}
42 | ${CUDA_nppif_LIBRARY}
43 | ${CUDA_nppig_LIBRARY}
44 | ${CUDA_nppim_LIBRARY}
45 | ${CUDA_nppist_LIBRARY}
46 | ${CUDA_nppisu_LIBRARY}
47 | ${CUDA_nppitc_LIBRARY}
48 | ${CUDA_npps_LIBRARY}
49 | )
50 | else()
51 | set(CUDA_npp_LIBRARIES "")
52 | set(CUDA_INCLUDE_DIRS "")
53 | endif()
54 |
55 | catkin_package(
56 | INCLUDE_DIRS include
57 | LIBRARIES ${PROJECT_NAME}
58 | CATKIN_DEPENDS
59 | ${PACKAGE_DEPENDENCIES}
60 | DEPENDS
61 | OpenCV
62 | YAML_CPP
63 | )
64 |
65 | include_directories(
66 | include
67 | ${catkin_INCLUDE_DIRS}
68 | ${OpenCV_INCLUDE_DIRS}
69 | ${CUDA_INCLUDE_DIRS}
70 | )
71 |
72 | add_library(${PROJECT_NAME} SHARED
73 | src/raw_image_pipeline/modules/color_calibration.cpp
74 | src/raw_image_pipeline/modules/color_enhancer.cpp
75 | src/raw_image_pipeline/modules/debayer.cpp
76 | src/raw_image_pipeline/modules/flip.cpp
77 | src/raw_image_pipeline/modules/gamma_correction.cpp
78 | src/raw_image_pipeline/modules/undistortion.cpp
79 | src/raw_image_pipeline/modules/vignetting_correction.cpp
80 | src/raw_image_pipeline/modules/white_balance.cpp
81 | src/raw_image_pipeline/raw_image_pipeline.cpp
82 | )
83 |
84 | target_link_libraries(${PROJECT_NAME}
85 | ${catkin_LIBRARIES}
86 | ${CUDA_npp_LIBRARIES}
87 | ${OpenCV_LIBS}
88 | ${YAML_CPP_LIBRARIES}
89 | )
90 |
91 | # Mark executables and/or libraries for installation
92 | install(
93 | TARGETS ${PROJECT_NAME}
94 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
95 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
96 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
97 | )
98 |
99 | # Mark cpp header files for installation
100 | install(
101 | DIRECTORY include/${PROJECT_NAME}/
102 | DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
103 | FILES_MATCHING PATTERN "*.hpp"
104 | )
105 |
--------------------------------------------------------------------------------
/raw_image_pipeline/config/alphasense_calib_1.6mp_example.yaml:
--------------------------------------------------------------------------------
1 | # Refer to this link: http://wiki.ros.org/camera_calibration_parsers for the format of this file.
2 | # Created date: Oct 15, 2019.
3 | image_width: 1440
4 | image_height: 1080
5 | camera_name: cam0_sensor_frame
6 | camera_matrix:
7 | rows: 3
8 | cols: 3
9 | data: [699.2284099702, 0.0, 711.8009584441, 0.0, 698.546880367, 524.7993478318, 0.0, 0.0, 1.0]
10 | distortion_model: equidistant
11 | distortion_coefficients:
12 | rows: 1
13 | cols: 4
14 | data: [-0.0480706813, 0.0129997684, -0.0112199955, 0.0026955514]
15 | rectification_matrix:
16 | rows: 3
17 | cols: 3
18 | data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
19 | projection_matrix:
20 | rows: 3
21 | cols: 4
22 | data: [699.2284099702, 0.0, 711.8009584441, 0.0, 0.0, 698.546880367, 524.7993478318, 0.0, 0.0, 0.0, 1.0, 0.0]
23 |
24 |
--------------------------------------------------------------------------------
/raw_image_pipeline/config/alphasense_calib_example.yaml:
--------------------------------------------------------------------------------
1 | # Refer to this link: http://wiki.ros.org/camera_calibration_parsers for the format of this file.
2 | # Created date: Oct 15, 2019.
3 | image_width: 720
4 | image_height: 540
5 | camera_name: cam0_sensor_frame
6 | camera_matrix:
7 | rows: 3
8 | cols: 3
9 | data: [347.548139773951, 0.0, 342.454373227748, 0.0, 347.434712422309, 271.368057185649, 0.0, 0.0, 1.0]
10 | distortion_model: equidistant
11 | distortion_coefficients:
12 | rows: 1
13 | cols: 4
14 | data: [-0.0396482888762527, -0.00367688950406141, 0.00391742438164282, -0.00178738156007817]
15 | rectification_matrix:
16 | rows: 3
17 | cols: 3
18 | data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
19 | projection_matrix:
20 | rows: 3
21 | cols: 4
22 | data: [347.548139773951, 0.0, 342.454373227748, 0.0, 0.0, 347.434712422309, 271.368057185649, 0.0, 0.0, 0.0, 1.0, 0.0]
23 |
--------------------------------------------------------------------------------
/raw_image_pipeline/config/alphasense_color_calib_example.yaml:
--------------------------------------------------------------------------------
1 | matrix:
2 | rows: 3
3 | cols: 3
4 | # data: [1.6016861, -1.0104847, 0.2213647, -0.012442476, 1.0841908, -0.07573348, 0.09123942, -0.8107389, 1.8687756]
5 | # data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
6 | # data: [1.438429, 0.077131405, -0.20208795, -0.5693435, 2.9333502, -0.60181355, -0.3423824, -0.41744354, 2.2011373]
7 | data: [2.4276948, 0.21479778, -0.30818, 0.09277014, 1.1962607, -0.09772757, -0.24436986, -0.22239459, 2.099912]
8 | # data: [1.5758197, -0.0010973621, 0.1050253, -0.9788821, 1.0417756, -0.30694953, 0.21349414, -0.04809823, 1.1955975]
9 |
10 | bias:
11 | rows: 3
12 | cols: 1
13 | data: [0.0, 0.0, 0.0]
--------------------------------------------------------------------------------
/raw_image_pipeline/config/pipeline_params_example.yaml:
--------------------------------------------------------------------------------
1 | # Pipeline options
2 | debayer:
3 | enabled: true
4 | encoding: "auto"
5 |
6 | flip:
7 | enabled: false
8 | angle: 0
9 |
10 | white_balance:
11 | enabled: true
12 | method: "ccc"
13 | clipping_percentile: 20
14 | saturation_bright_thr: 0.8
15 | saturation_dark_thr: 0.2
16 | temporal_consistency: false
17 |
18 | color_calibration:
19 | enabled: false
20 |
21 | gamma_correction:
22 | enabled: false
23 | method: "custom"
24 | k: 0.8
25 |
26 | vignetting_correction:
27 | enabled: false
28 | scale: 1.5
29 | a2: 1e-3
30 | a4: 1e-6
31 |
32 | color_enhancer:
33 | hue_gain: 1.0
34 | saturation_gain: 1.5
35 | value_gain: 1.0
36 |
37 | undistortion:
38 | enabled: true
39 | balance: 0.0
40 | fov_scale: 0.8 #1.2
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/color_calibration.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #ifdef HAS_CUDA
14 | #include
15 | #include
16 | #include
17 | #endif
18 |
19 | namespace raw_image_pipeline {
20 |
21 | class ColorCalibrationModule {
22 | public:
23 | ColorCalibrationModule(bool use_gpu);
24 | void enable(bool enabled);
25 | bool enabled() const;
26 |
27 | //-----------------------------------------------------------------------------
28 | // Setters
29 | //-----------------------------------------------------------------------------
30 | void setCalibrationMatrix(const std::vector& color_calibration_matrix);
31 | void setCalibrationBias(const std::vector& color_calibration_bias);
32 |
33 | //-----------------------------------------------------------------------------
34 | // Getters
35 | //-----------------------------------------------------------------------------
36 | cv::Mat getCalibrationMatrix() const;
37 | cv::Mat getCalibrationBias() const;
38 |
39 | //-----------------------------------------------------------------------------
40 | // Main interface
41 | //-----------------------------------------------------------------------------
42 | template
43 | bool apply(T& image) {
44 | if (!enabled_) {
45 | return false;
46 | }
47 | if (image.channels() != 3) {
48 | return false;
49 | }
50 | if (!calibration_available_) {
51 | std::cout << "No calibration available!" << std::endl;
52 | return false;
53 | }
54 | colorCorrection(image);
55 | return true;
56 | }
57 |
58 | //-----------------------------------------------------------------------------
59 | // Helper methods (CPU)
60 | //-----------------------------------------------------------------------------
61 | public:
62 | void loadCalibration(const std::string& file_path);
63 |
64 | private:
65 | void initCalibrationMatrix(const cv::Matx33d& matrix);
66 |
67 | void colorCorrection(cv::Mat& image);
68 | #ifdef HAS_CUDA
69 | void colorCorrection(cv::cuda::GpuMat& image);
70 | #endif
71 |
72 | //-----------------------------------------------------------------------------
73 | // Variables
74 | //-----------------------------------------------------------------------------
75 | bool enabled_;
76 | bool use_gpu_;
77 |
78 | // Calibration & undistortion
79 | bool calibration_available_;
80 | cv::Matx33f color_calibration_matrix_;
81 | cv::Scalar color_calibration_bias_;
82 |
83 | #ifdef HAS_CUDA
84 | Npp32f gpu_color_calibration_matrix_[3][4] = {{1.f, 0.f, 0.f, 0.f}, {0.f, 1.f, 0.f, 0.f}, {0.f, 0.f, 1.f, 0.f}};
85 | #endif
86 | };
87 |
88 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/color_enhancer.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #ifdef HAS_CUDA
11 | #include
12 | #include
13 | #endif
14 |
15 | namespace raw_image_pipeline {
16 |
17 | class ColorEnhancerModule {
18 | public:
19 | ColorEnhancerModule(bool use_gpu);
20 | void enable(bool enabled);
21 | bool enabled() const;
22 |
23 | //-----------------------------------------------------------------------------
24 | // Setters
25 | //-----------------------------------------------------------------------------
26 | void setHueGain(const double& gain);
27 | void setSaturationGain(const double& gain);
28 | void setValueGain(const double& gain);
29 |
30 | //-----------------------------------------------------------------------------
31 | // Main interface
32 | //-----------------------------------------------------------------------------
33 | template
34 | bool apply(T& image) {
35 | if (!enabled_) {
36 | return false;
37 | }
38 | if (image.channels() != 3) {
39 | return false;
40 | }
41 | enhance(image);
42 | return true;
43 | }
44 |
45 | //-----------------------------------------------------------------------------
46 | // Helper methods (CPU)
47 | //-----------------------------------------------------------------------------
48 | private:
49 | void enhance(cv::Mat& image);
50 | #ifdef HAS_CUDA
51 | void enhance(cv::cuda::GpuMat& image);
52 | #endif
53 |
54 | //-----------------------------------------------------------------------------
55 | // Variables
56 | //-----------------------------------------------------------------------------
57 | bool enabled_;
58 | bool use_gpu_;
59 |
60 | double value_gain_;
61 | double saturation_gain_;
62 | double hue_gain_;
63 | };
64 |
65 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/debayer.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #ifdef HAS_CUDA
11 | #include
12 | #include
13 | #endif
14 |
15 | namespace raw_image_pipeline {
16 |
17 | class DebayerModule {
18 | public:
19 | DebayerModule(bool use_gpu);
20 | void enable(bool enabled);
21 | bool enabled() const;
22 |
23 | //-----------------------------------------------------------------------------
24 | // Setters
25 | //-----------------------------------------------------------------------------
26 | void setEncoding(const std::string& encoding);
27 |
28 | //-----------------------------------------------------------------------------
29 | // Getters
30 | //-----------------------------------------------------------------------------
31 | cv::Mat getImage() const;
32 |
33 | //-----------------------------------------------------------------------------
34 | // Main interface
35 | //-----------------------------------------------------------------------------
36 | template
37 | bool apply(T& image, std::string& encoding) {
38 | // if (!enabled_) {
39 | // return false;
40 | // }
41 | // Check encoding
42 | std::string input_encoding = encoding_ == "auto" ? encoding : encoding_;
43 | // Run debayer
44 | debayer(image, encoding);
45 | saveDebayeredImage(image);
46 | return true;
47 | }
48 |
49 | //-----------------------------------------------------------------------------
50 | // Helper methods (CPU)
51 | //-----------------------------------------------------------------------------
52 | private:
53 | bool isBayerEncoding(const std::string& encoding) const;
54 |
55 | void debayer(cv::Mat& image, std::string& encoding);
56 | void saveDebayeredImage(cv::Mat& image);
57 |
58 | #ifdef HAS_CUDA
59 | void debayer(cv::cuda::GpuMat& image, std::string& encoding);
60 | void saveDebayeredImage(cv::cuda::GpuMat& image);
61 | #endif
62 |
63 | //-----------------------------------------------------------------------------
64 | // Variables
65 | //-----------------------------------------------------------------------------
66 | bool enabled_;
67 | bool use_gpu_;
68 |
69 | std::string encoding_;
70 |
71 | cv::Mat image_;
72 |
73 | // Types
74 | std::vector BAYER_TYPES = {"bayer_bggr8",
75 | "bayer_gbrg8",
76 | "bayer_grbg8",
77 | "bayer_rggb8"
78 | "bayer_bggr16",
79 | "bayer_gbrg16",
80 | "bayer_grbg16",
81 | "bayer_rggb16"};
82 | };
83 |
84 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/flip.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #ifdef HAS_CUDA
11 | #include
12 | #include
13 | #endif
14 |
15 | namespace raw_image_pipeline {
16 |
17 | class FlipModule {
18 | public:
19 | FlipModule(bool use_gpu);
20 | void enable(bool enabled);
21 | bool enabled() const;
22 |
23 | //-----------------------------------------------------------------------------
24 | // Setters
25 | //-----------------------------------------------------------------------------
26 | void setAngle(const int& angle);
27 |
28 | //-----------------------------------------------------------------------------
29 | // Getters
30 | //-----------------------------------------------------------------------------
31 | cv::Mat getImage() const;
32 |
33 | //-----------------------------------------------------------------------------
34 | // Main interface
35 | //-----------------------------------------------------------------------------
36 | template
37 | bool apply(T& image) {
38 | if (!enabled_) {
39 | saveFlippedImage(image);
40 | return false;
41 | }
42 | flip(image);
43 | saveFlippedImage(image);
44 | return true;
45 | }
46 |
47 | //-----------------------------------------------------------------------------
48 | // Helper methods (CPU)
49 | //-----------------------------------------------------------------------------
50 | private:
51 | void flip(cv::Mat& image);
52 | void saveFlippedImage(cv::Mat& image);
53 |
54 | #ifdef HAS_CUDA
55 | void flip(cv::cuda::GpuMat& image);
56 | void saveFlippedImage(cv::cuda::GpuMat& image);
57 | #endif
58 |
59 | //-----------------------------------------------------------------------------
60 | // Variables
61 | //-----------------------------------------------------------------------------
62 | bool enabled_;
63 | bool use_gpu_;
64 | cv::Mat image_;
65 |
66 | int angle_;
67 | };
68 |
69 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/gamma_correction.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #ifdef HAS_CUDA
11 | #include
12 | #include
13 | #endif
14 |
15 | namespace raw_image_pipeline {
16 |
17 | class GammaCorrectionModule {
18 | public:
19 | GammaCorrectionModule(bool use_gpu);
20 | void enable(bool enabled);
21 | bool enabled() const;
22 |
23 | //-----------------------------------------------------------------------------
24 | // Setters
25 | //-----------------------------------------------------------------------------
26 | void setMethod(const std::string& method);
27 | void setK(const double& k);
28 |
29 | //-----------------------------------------------------------------------------
30 | // Main interface
31 | //-----------------------------------------------------------------------------
32 | template
33 | bool apply(T& image) {
34 | if (!enabled_) {
35 | return false;
36 | }
37 | if (method_ == "custom") {
38 | gammaCorrectCustom(image);
39 | } else {
40 | gammaCorrectDefault(image);
41 | }
42 | return true;
43 | }
44 |
45 | //-----------------------------------------------------------------------------
46 | // Wrapper methods (CPU)
47 | //-----------------------------------------------------------------------------
48 | void gammaCorrectCustom(cv::Mat& image);
49 | void gammaCorrectDefault(cv::Mat& image);
50 |
51 | //-----------------------------------------------------------------------------
52 | // Wrapper methods (GPU)
53 | //-----------------------------------------------------------------------------
54 | #ifdef HAS_CUDA
55 | void gammaCorrectCustom(cv::cuda::GpuMat& image);
56 | void gammaCorrectDefault(cv::cuda::GpuMat& image);
57 | #endif
58 |
59 | //-----------------------------------------------------------------------------
60 | // Helper methods (CPU)
61 | //-----------------------------------------------------------------------------
62 | void init();
63 |
64 | //-----------------------------------------------------------------------------
65 | // Variables
66 | //-----------------------------------------------------------------------------
67 | bool enabled_;
68 | bool use_gpu_;
69 |
70 | std::string method_;
71 | bool is_forward_;
72 | double k_;
73 |
74 | // LUT
75 | cv::Mat cpu_lut_;
76 |
77 | #ifdef HAS_CUDA
78 | cv::Ptr gpu_lut_;
79 | #endif
80 | };
81 |
82 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/undistortion.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #ifdef HAS_CUDA
14 | #include
15 | #include
16 | #include
17 | #endif
18 |
19 | namespace raw_image_pipeline {
20 |
21 | class UndistortionModule {
22 | public:
23 | UndistortionModule(bool use_gpu);
24 | void enable(bool enabled);
25 | bool enabled() const;
26 |
27 | //-----------------------------------------------------------------------------
28 | // Setters
29 | //-----------------------------------------------------------------------------
30 | void setImageSize(int width, int height);
31 | void setNewImageSize(int width, int height);
32 | void setBalance(double balance);
33 | void setFovScale(double fov_scale);
34 |
35 | void setCameraMatrix(const std::vector& camera_matrix);
36 | void setDistortionCoefficients(const std::vector& coefficients);
37 | void setDistortionModel(const std::string& model);
38 | void setRectificationMatrix(const std::vector& rectification_matrix);
39 | void setProjectionMatrix(const std::vector& projection_matrix);
40 |
41 | //-----------------------------------------------------------------------------
42 | // Getters
43 | //-----------------------------------------------------------------------------
44 | int getDistImageHeight() const;
45 | int getDistImageWidth() const;
46 | std::string getDistDistortionModel() const;
47 | cv::Mat getDistCameraMatrix() const;
48 | cv::Mat getDistDistortionCoefficients() const;
49 | cv::Mat getDistRectificationMatrix() const;
50 | cv::Mat getDistProjectionMatrix() const;
51 |
52 | int getRectImageHeight() const;
53 | int getRectImageWidth() const;
54 | std::string getRectDistortionModel() const;
55 | cv::Mat getRectCameraMatrix() const;
56 | cv::Mat getRectDistortionCoefficients() const;
57 | cv::Mat getRectRectificationMatrix() const;
58 | cv::Mat getRectProjectionMatrix() const;
59 |
60 | cv::Mat getDistImage() const;
61 | cv::Mat getRectMask() const;
62 |
63 | //-----------------------------------------------------------------------------
64 | // Main interface
65 | //-----------------------------------------------------------------------------
66 | template
67 | bool apply(T& image) {
68 | saveUndistortedImage(image);
69 | if (!enabled_) {
70 | return false;
71 | }
72 | if (!calibration_available_) {
73 | std::cout << "No calibration available!" << std::endl;
74 | return false;
75 | }
76 | if (dist_distortion_model_ != "none") {
77 | undistort(image);
78 | }
79 | return true;
80 | }
81 |
82 | //-----------------------------------------------------------------------------
83 | // Helper methods
84 | //-----------------------------------------------------------------------------
85 | public:
86 | void loadCalibration(const std::string& file_path);
87 | void init();
88 |
89 | private:
90 | void undistort(cv::Mat& image);
91 | void saveUndistortedImage(cv::Mat& image);
92 |
93 | #ifdef HAS_CUDA
94 | void undistort(cv::cuda::GpuMat& image);
95 | void saveUndistortedImage(cv::cuda::GpuMat& image);
96 | #endif
97 |
98 | //-----------------------------------------------------------------------------
99 | // Variables
100 | //-----------------------------------------------------------------------------
101 | bool enabled_;
102 | bool use_gpu_;
103 |
104 | // Calibration & undistortion
105 | bool calibration_available_;
106 |
107 | // Original - "distorted" parameters
108 | std::string dist_distortion_model_;
109 | cv::Matx33d dist_camera_matrix_;
110 | cv::Matx14d dist_distortion_coeff_;
111 | cv::Matx33d dist_rectification_matrix_;
112 | cv::Matx34d dist_projection_matrix_;
113 | cv::Size dist_image_size_;
114 |
115 | // Undistorted parameters
116 | std::string rect_distortion_model_;
117 | cv::Matx33d rect_camera_matrix_;
118 | cv::Matx14d rect_distortion_coeff_;
119 | cv::Matx33d rect_rectification_matrix_;
120 | cv::Matx34d rect_projection_matrix_;
121 | cv::Size rect_image_size_;
122 |
123 | double rect_balance_;
124 | double rect_fov_scale_;
125 |
126 | cv::Mat undistortion_map_x_;
127 | cv::Mat undistortion_map_y_;
128 |
129 | #ifdef HAS_CUDA
130 | cv::cuda::GpuMat gpu_undistortion_map_x_;
131 | cv::cuda::GpuMat gpu_undistortion_map_y_;
132 | #endif
133 |
134 | // Original image
135 | cv::Mat dist_image_;
136 | cv::Mat rect_mask_;
137 | };
138 |
139 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/vignetting_correction.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #ifdef HAS_CUDA
11 | #include
12 | #include
13 | #endif
14 |
15 | namespace raw_image_pipeline {
16 |
17 | class VignettingCorrectionModule {
18 | public:
19 | VignettingCorrectionModule(bool use_gpu);
20 | void enable(bool enabled);
21 | bool enabled() const;
22 |
23 | //-----------------------------------------------------------------------------
24 | // Main interface
25 | //-----------------------------------------------------------------------------
26 | template
27 | bool apply(T& image) {
28 | if (!enabled_) {
29 | return false;
30 | }
31 | correct(image);
32 | return true;
33 | }
34 |
35 | //-----------------------------------------------------------------------------
36 | // Setters
37 | //-----------------------------------------------------------------------------
38 | void setParameters(const double& scale, const double& a2, const double& a4);
39 |
40 | //-----------------------------------------------------------------------------
41 | // Helper methods (CPU)
42 | //-----------------------------------------------------------------------------
43 | void precomputeVignettingMask(int height, int width);
44 |
45 | private:
46 | void correct(cv::Mat& image);
47 | #ifdef HAS_CUDA
48 | void correct(cv::cuda::GpuMat& image);
49 | #endif
50 |
51 | //-----------------------------------------------------------------------------
52 | // Variables
53 | //-----------------------------------------------------------------------------
54 | bool enabled_;
55 | bool use_gpu_;
56 |
57 | double vignetting_correction_scale_;
58 | double vignetting_correction_a2_;
59 | double vignetting_correction_a4_;
60 | cv::Mat vignetting_mask_f_;
61 | cv::Mat image_f_;
62 | #ifdef HAS_CUDA
63 | cv::cuda::GpuMat gpu_vignetting_mask_f_;
64 | cv::cuda::GpuMat gpu_image_f_;
65 | #endif
66 | };
67 |
68 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/modules/white_balance.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #pragma once
7 |
8 | #include
9 | #include
10 |
11 | #include
12 | #include
13 |
14 | #include
15 | #include
16 | #include
17 |
18 | #ifdef HAS_CUDA
19 | #include
20 | #include
21 | #include
22 | #endif
23 |
24 | namespace raw_image_pipeline {
25 |
26 | class WhiteBalanceModule {
27 | public:
28 | WhiteBalanceModule(bool use_gpu);
29 | void enable(bool enabled);
30 | bool enabled() const;
31 |
32 | //-----------------------------------------------------------------------------
33 | // Setters
34 | //-----------------------------------------------------------------------------
35 | void setMethod(const std::string& method);
36 | void setSaturationPercentile(const double& percentile);
37 | void setSaturationThreshold(const double& bright_thr, const double& dark_thr);
38 | void setTemporalConsistency(bool enabled);
39 |
40 | void resetTemporalConsistency();
41 |
42 | //-----------------------------------------------------------------------------
43 | // Main interface
44 | //-----------------------------------------------------------------------------
45 | template
46 | bool apply(T& image) {
47 | if (!enabled_) {
48 | return false;
49 | }
50 | if (image.channels() != 3) {
51 | return false;
52 | }
53 | if (method_ == "simple") {
54 | balanceWhiteSimple(image);
55 | return true;
56 |
57 | } else if (method_ == "gray_world" || method_ == "grey_world") {
58 | balanceWhiteGreyWorld(image);
59 | return true;
60 |
61 | } else if (method_ == "learned") {
62 | balanceWhiteLearned(image);
63 | return true;
64 |
65 | } else if (method_ == "ccc") {
66 | // CCC white balancing - this works directly on GPU
67 | // cccWBPtr_->setSaturationThreshold(saturation_bright_thr_, saturation_dark_thr_);
68 | // cccWBPtr_->setTemporalConsistency(temporal_consistency_);
69 | // cccWBPtr_->setDebug(false);
70 | // cccWBPtr_->balanceWhite(image, image);
71 | ccc_->setSaturationThreshold(saturation_bright_thr_, saturation_dark_thr_);
72 | ccc_->setTemporalConsistency(temporal_consistency_);
73 | ccc_->setDebug(false);
74 | ccc_->balanceWhite(image, image);
75 | return true;
76 |
77 | } else if (method_ == "pca") {
78 | balanceWhitePca(image);
79 | return true;
80 |
81 | } else {
82 | throw std::invalid_argument("White Balance method [" + method_ +
83 | "] not supported. "
84 | "Supported algorithms: 'simple', 'gray_world', 'learned', 'ccc', 'pca'");
85 | }
86 | }
87 |
88 | //-----------------------------------------------------------------------------
89 | // White balance wrapper methods (CPU)
90 | //-----------------------------------------------------------------------------
91 | private:
92 | void balanceWhiteSimple(cv::Mat& image);
93 | void balanceWhiteGreyWorld(cv::Mat& image);
94 | void balanceWhiteLearned(cv::Mat& image);
95 | void balanceWhitePca(cv::Mat& image);
96 |
97 | //-----------------------------------------------------------------------------
98 | // White balance wrapper methods (GPU)
99 | //-----------------------------------------------------------------------------
100 | #ifdef HAS_CUDA
101 | void balanceWhiteSimple(cv::cuda::GpuMat& image);
102 | void balanceWhiteGreyWorld(cv::cuda::GpuMat& image);
103 | void balanceWhiteLearned(cv::cuda::GpuMat& image);
104 | void balanceWhitePca(cv::cuda::GpuMat& image);
105 | #endif
106 |
107 | //-----------------------------------------------------------------------------
108 | // Variables
109 | //-----------------------------------------------------------------------------
110 | bool enabled_;
111 | bool use_gpu_;
112 |
113 | std::string method_;
114 | double clipping_percentile_;
115 | double saturation_bright_thr_;
116 | double saturation_dark_thr_;
117 | bool temporal_consistency_;
118 |
119 | // Pointers
120 | std::unique_ptr ccc_;
121 | // raw_image_pipeline_white_balance::ConvolutionalColorConstancyWB ccc_;
122 | };
123 |
124 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/raw_image_pipeline.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 | // Author: Timon Homberger
6 |
7 | #pragma once
8 |
9 | #include
10 | #include
11 |
12 | #ifdef HAS_CUDA
13 | #include
14 | #include
15 | #include
16 | #endif
17 |
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 |
24 | // Modules
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 |
34 | namespace raw_image_pipeline {
35 |
36 | class RawImagePipeline {
37 | public:
38 | // Constructor & destructor
39 | RawImagePipeline(bool use_gpu);
40 | RawImagePipeline(bool use_gpu, const std::string& params_path, const std::string& calibration_path,
41 | const std::string& color_calibration_path);
42 | ~RawImagePipeline();
43 |
44 | //-----------------------------------------------------------------------------
45 | // Main interfaces
46 | //-----------------------------------------------------------------------------
47 | bool apply(cv::Mat& image, std::string& encoding);
48 |
49 | // Alternative pipeline that returns a copy
50 | cv::Mat process(const cv::Mat& image, std::string& encoding);
51 |
52 | // Loaders
53 | void loadParams(const std::string& file_path);
54 | void loadCameraCalibration(const std::string& file_path);
55 | void loadColorCalibration(const std::string& file_path);
56 | void initUndistortion();
57 |
58 | // Other interfaces
59 | void resetWhiteBalanceTemporalConsistency();
60 | void setGpu(bool use_gpu);
61 | void setDebug(bool debug);
62 |
63 | //-----------------------------------------------------------------------------
64 | // Setters
65 | //-----------------------------------------------------------------------------
66 | void setDebayer(bool enabled);
67 | void setDebayerEncoding(const std::string& encoding);
68 |
69 | void setFlip(bool enabled);
70 | void setFlipAngle(int angle);
71 |
72 | void setWhiteBalance(bool enabled);
73 | void setWhiteBalanceMethod(const std::string& method);
74 | void setWhiteBalancePercentile(const double& percentile);
75 | void setWhiteBalanceSaturationThreshold(const double& bright_thr, const double& dark_thr);
76 | void setWhiteBalanceTemporalConsistency(bool enabled);
77 | void setColorCalibration(bool enabled);
78 | void setColorCalibrationMatrix(const std::vector& color_calibration_matrix);
79 | void setColorCalibrationBias(const std::vector& color_calibration_bias);
80 | cv::Mat getColorCalibrationMatrix() const;
81 | cv::Mat getColorCalibrationBias() const;
82 |
83 | void setGammaCorrection(bool enabled);
84 | void setGammaCorrectionMethod(const std::string& method);
85 | void setGammaCorrectionK(const double& k);
86 |
87 | void setVignettingCorrection(bool enabled);
88 | void setVignettingCorrectionParameters(const double& scale, const double& a2, const double& a4);
89 |
90 | void setColorEnhancer(bool enabled);
91 | void setColorEnhancerHueGain(const double& gain);
92 | void setColorEnhancerSaturationGain(const double& gain);
93 | void setColorEnhancerValueGain(const double& gain);
94 |
95 | void setUndistortion(bool enabled);
96 | void setUndistortionImageSize(int width, int height);
97 | void setUndistortionNewImageSize(int width, int height);
98 | void setUndistortionBalance(double balance);
99 | void setUndistortionFovScale(double fov_scale);
100 | void setUndistortionCameraMatrix(const std::vector& camera_matrix);
101 | void setUndistortionDistortionCoefficients(const std::vector& coefficients);
102 | void setUndistortionDistortionModel(const std::string& model);
103 | void setUndistortionRectificationMatrix(const std::vector& rectification_matrix);
104 | void setUndistortionProjectionMatrix(const std::vector& projection_matrix);
105 |
106 | //-----------------------------------------------------------------------------
107 | // Getters
108 | //-----------------------------------------------------------------------------
109 | bool isDebayerEnabled() const;
110 | bool isFlipEnabled() const;
111 | bool isWhiteBalanceEnabled() const;
112 | bool isColorCalibrationEnabled() const;
113 | bool isGammaCorrectionEnabled() const;
114 | bool isVignettingCorrectionEnabled() const;
115 | bool isColorEnhancerEnabled() const;
116 | bool isUndistortionEnabled() const;
117 |
118 | int getDistImageHeight() const;
119 | int getDistImageWidth() const;
120 | std::string getDistDistortionModel() const;
121 | cv::Mat getDistCameraMatrix() const;
122 | cv::Mat getDistDistortionCoefficients() const;
123 | cv::Mat getDistRectificationMatrix() const;
124 | cv::Mat getDistProjectionMatrix() const;
125 |
126 | int getRectImageHeight() const;
127 | int getRectImageWidth() const;
128 | std::string getRectDistortionModel() const;
129 | cv::Mat getRectCameraMatrix() const;
130 | cv::Mat getRectDistortionCoefficients() const;
131 | cv::Mat getRectRectificationMatrix() const;
132 | cv::Mat getRectProjectionMatrix() const;
133 |
134 | cv::Mat getDistDebayeredImage() const;
135 | cv::Mat getDistColorImage() const;
136 | cv::Mat getRectMask() const;
137 | cv::Mat getProcessedImage() const;
138 |
139 | private:
140 | //-----------------------------------------------------------------------------
141 | // Pipeline
142 | //-----------------------------------------------------------------------------
143 | template
144 | void pipeline(T& image, std::string& encoding) {
145 | // Run pipeline
146 | debayer_->apply(image, encoding);
147 | saveDebugImage(image, "/tmp/00_debayer.png");
148 |
149 | flipper_->apply(image);
150 | saveDebugImage(image, "/tmp/01_flip.png");
151 |
152 | white_balancer_->apply(image);
153 | saveDebugImage(image, "/tmp/02_white_balancing.png");
154 |
155 | color_calibrator_->apply(image);
156 | saveDebugImage(image, "/tmp/03_color_calibration.png");
157 |
158 | gamma_corrector_->apply(image);
159 | saveDebugImage(image, "/tmp/04_gamma_correction.png");
160 |
161 | vignetting_corrector_->apply(image);
162 | saveDebugImage(image, "/tmp/05_vignetting_correction.png");
163 |
164 | color_enhancer_->apply(image);
165 | saveDebugImage(image, "/tmp/06_color_enhancer.png");
166 |
167 | undistorter_->apply(image);
168 | saveDebugImage(image, "/tmp/07_undistortion.png");
169 |
170 | // Save processed output
171 | saveOutput(image);
172 | }
173 |
174 | void saveOutput(const cv::Mat& image) {
175 | // copy to internal image
176 | image.copyTo(image_);
177 | }
178 |
179 | void saveDebugImage(const cv::Mat& image, const std::string& filename) const {
180 | if (debug_) {
181 | cv::Mat tmp;
182 | image.copyTo(tmp);
183 | cv::normalize(tmp, tmp, 0, 255.0, cv::NORM_MINMAX);
184 | cv::imwrite(filename, tmp);
185 | }
186 | }
187 |
188 | #ifdef HAS_CUDA
189 | void saveDebugImage(const cv::cuda::GpuMat& image, const std::string& filename) const {
190 | cv::Mat tmp;
191 | image.download(tmp);
192 | saveDebugImage(tmp, filename);
193 | }
194 |
195 | void saveOutput(const cv::cuda::GpuMat& image) {
196 | // download to internal image
197 | image.download(image_);
198 | }
199 | #endif
200 |
201 | //-----------------------------------------------------------------------------
202 | // Modules
203 | //-----------------------------------------------------------------------------
204 | std::unique_ptr color_calibrator_;
205 | std::unique_ptr color_enhancer_;
206 | std::unique_ptr debayer_;
207 | std::unique_ptr flipper_;
208 | std::unique_ptr gamma_corrector_;
209 | std::unique_ptr undistorter_;
210 | std::unique_ptr vignetting_corrector_;
211 | std::unique_ptr white_balancer_;
212 |
213 | //-----------------------------------------------------------------------------
214 | // Other variables
215 | //-----------------------------------------------------------------------------
216 | // Pipeline options
217 | bool use_gpu_;
218 | bool debug_;
219 |
220 | // Internal variables
221 | cv::Mat image_;
222 | };
223 |
224 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/include/raw_image_pipeline/utils.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 |
6 | namespace YAML {
7 |
8 | template <>
9 | struct convert {
10 | static bool decode(const YAML::Node& node, cv::Matx14d& rhs) {
11 | if (!node.IsSequence() || node.size() != 4) {
12 | return false;
13 | }
14 | rhs = cv::Matx14d{node[0].as(), node[1].as(), node[2].as(), node[3].as()};
15 | return true;
16 | }
17 | };
18 |
19 | template <>
20 | struct convert {
21 | static bool decode(const YAML::Node& node, cv::Matx31d& rhs) {
22 | if (!node.IsSequence() || node.size() != 3) {
23 | return false;
24 | }
25 | rhs = cv::Matx31d{node[0].as(), node[1].as(), node[2].as()};
26 | return true;
27 | }
28 | };
29 |
30 | template <>
31 | struct convert {
32 | static bool decode(const YAML::Node& node, cv::Matx33d& rhs) {
33 | if (!node.IsSequence() || node.size() != 9) {
34 | return false;
35 | }
36 | rhs = cv::Matx33d{node[0].as(), node[1].as(), node[2].as(), node[3].as(), node[4].as(),
37 | node[5].as(), node[6].as(), node[7].as(), node[8].as()};
38 | return true;
39 | }
40 | };
41 |
42 | template <>
43 | struct convert {
44 | static bool decode(const YAML::Node& node, cv::Matx34d& rhs) {
45 | if (!node.IsSequence() || node.size() != 12) {
46 | return false;
47 | }
48 | rhs = cv::Matx34d{node[0].as(), node[1].as(), node[2].as(), node[3].as(),
49 | node[4].as(), node[5].as(), node[6].as(), node[7].as(),
50 | node[8].as(), node[9].as(), node[10].as(), node[11].as()};
51 | return true;
52 | }
53 | };
54 |
55 | } // namespace YAML
56 |
57 | namespace raw_image_pipeline {
58 | namespace utils {
59 |
60 | // Wrapper for yaml-cpp to provide default values if value is not found
61 | template
62 | T get(const YAML::Node& node, const std::string& param, const T& default_value) {
63 | T out;
64 | try {
65 | if (!node[param]) {
66 | out = default_value;
67 | }
68 | out = node[param].as();
69 | } catch (const YAML::InvalidNode::Exception& e) {
70 | out = default_value;
71 | }
72 | // std::cout << "Reading parameter [" << param << "]: " << std::endl;
73 | return out;
74 | }
75 |
76 | // Convert mat to std vector
77 | // Source: https://stackoverflow.com/a/56600115
78 | template
79 | static std::vector toStdVector(const cv::Mat& m) {
80 | cv::Mat m_aux = m.isContinuous() ? m : m.clone();
81 | std::vector vec = m_aux.reshape(1, m_aux.total() * m_aux.channels());
82 | return vec;
83 | }
84 |
85 | template
86 | static boost::array toBoostArray(const cv::Mat& m) {
87 | // TODO: this could be optimized
88 | boost::array vec;
89 | cv::Mat m_aux = m.isContinuous() ? m : m.clone();
90 | m_aux = m_aux.reshape(1, m_aux.total() * m_aux.channels());
91 |
92 | for (size_t i = 0; i < N; i++) vec[i] = m_aux.at(i);
93 | return vec;
94 | }
95 |
96 | } // namespace utils
97 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | raw_image_pipeline
4 | 0.0.0
5 | ROS-independent image proc cuda pipeline
6 |
7 | Matias Mattamala
8 | Timon Homberger
9 | Matias Mattamala
10 |
11 | Proprietary
12 |
13 | catkin
14 | eigen_conversions
15 | nvidia-cuda
16 | nvidia-cuda-dev
17 | raw_image_pipeline_white_balance
18 | pybind11_catkin
19 |
20 |
21 |
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/color_calibration.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | ColorCalibrationModule::ColorCalibrationModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {
11 | cv::Matx33d matrix = cv::Matx33d::eye();
12 | initCalibrationMatrix(matrix);
13 | }
14 |
15 | void ColorCalibrationModule::enable(bool enabled) {
16 | enabled_ = enabled;
17 | }
18 |
19 | bool ColorCalibrationModule::enabled() const {
20 | return enabled_;
21 | }
22 |
23 | //-----------------------------------------------------------------------------
24 | // Setters
25 | //-----------------------------------------------------------------------------
26 | void ColorCalibrationModule::setCalibrationMatrix(const std::vector& color_calibration_matrix) {
27 | cv::Matx33d matrix = cv::Matx33d(color_calibration_matrix.data());
28 | initCalibrationMatrix(matrix);
29 | }
30 |
31 | void ColorCalibrationModule::setCalibrationBias(const std::vector& color_calibration_bias) {
32 | color_calibration_bias_ = cv::Scalar(color_calibration_bias.at(0), // B
33 | color_calibration_bias.at(1), // G
34 | color_calibration_bias.at(2) // R
35 | );
36 | }
37 |
38 | //-----------------------------------------------------------------------------
39 | // Getters
40 | //-----------------------------------------------------------------------------
41 | cv::Mat ColorCalibrationModule::getCalibrationMatrix() const {
42 | return cv::Mat(color_calibration_matrix_);
43 | }
44 |
45 | cv::Mat ColorCalibrationModule::getCalibrationBias() const {
46 | return cv::Mat(color_calibration_bias_);
47 | }
48 |
49 | //-----------------------------------------------------------------------------
50 | // Helper methods
51 | //-----------------------------------------------------------------------------
52 | void ColorCalibrationModule::loadCalibration(const std::string& file_path) {
53 | std::cout << "Loading color calibration from file " << file_path << std::endl;
54 |
55 | // Check if file exists
56 | if (boost::filesystem::exists(file_path)) {
57 | // Load calibration
58 | YAML::Node node = YAML::LoadFile(file_path);
59 | // Camera matrix
60 | cv::Matx33d matrix = utils::get(node["matrix"], "data", cv::Matx33d::eye());
61 | initCalibrationMatrix(matrix);
62 | // Bias
63 | cv::Matx31d bias = utils::get(node["bias"], "data", cv::Matx31d::zeros());
64 | color_calibration_bias_ = cv::Scalar(bias(0), // B
65 | bias(1), // G
66 | bias(2) // R
67 | );
68 |
69 | calibration_available_ = true;
70 | }
71 | // If not, disable calibration available flag
72 | else {
73 | calibration_available_ = false;
74 | std::cout << "Warning: Color calibration file doesn't exist" << std::endl;
75 | }
76 | }
77 |
78 | void ColorCalibrationModule::initCalibrationMatrix(const cv::Matx33d& matrix) {
79 | color_calibration_matrix_ = matrix;
80 |
81 | #ifdef HAS_CUDA
82 | if (use_gpu_) {
83 | for (size_t i = 0; i < 3; i++)
84 | for (size_t j = 0; j < 3; j++) {
85 | gpu_color_calibration_matrix_[i][j] = static_cast(matrix(i, j));
86 | }
87 | }
88 | #endif
89 | }
90 |
91 | void ColorCalibrationModule::colorCorrection(cv::Mat& image) {
92 | // https://stackoverflow.com/a/12678457
93 | cv::Mat flat_image = image.reshape(1, image.rows * image.cols);
94 | cv::Mat flat_image_f;
95 | flat_image.convertTo(flat_image_f, CV_32F);
96 |
97 | // Mix
98 | cv::Mat mixed_image = flat_image_f * color_calibration_matrix_.t();
99 | cv::Mat image_f = mixed_image.reshape(3, image.rows);
100 | // Add bias
101 | image_f += color_calibration_bias_;
102 |
103 | image_f.convertTo(image, CV_8UC3);
104 | }
105 |
106 | #ifdef HAS_CUDA
107 | void ColorCalibrationModule::colorCorrection(cv::cuda::GpuMat& image) {
108 | // TODO: change implementation to pure OpenCV
109 | NppiSize image_size;
110 | image_size.width = image.cols;
111 | image_size.height = image.rows;
112 |
113 | // Apply calibration
114 | nppiColorTwist32f_8u_C3IR(image.data, static_cast(image.step), image_size, gpu_color_calibration_matrix_);
115 |
116 | // Add bias
117 | cv::cuda::add(image, color_calibration_bias_, image);
118 | }
119 | #endif
120 |
121 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/color_enhancer.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | ColorEnhancerModule::ColorEnhancerModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {}
11 |
12 | void ColorEnhancerModule::enable(bool enabled) {
13 | enabled_ = enabled;
14 | }
15 |
16 | bool ColorEnhancerModule::enabled() const {
17 | return enabled_;
18 | }
19 |
20 | //-----------------------------------------------------------------------------
21 | // Setters
22 | //-----------------------------------------------------------------------------
23 | void ColorEnhancerModule::setHueGain(const double& gain) {
24 | value_gain_ = gain;
25 | }
26 |
27 | void ColorEnhancerModule::setSaturationGain(const double& gain) {
28 | saturation_gain_ = gain;
29 | }
30 |
31 | void ColorEnhancerModule::setValueGain(const double& gain) {
32 | hue_gain_ = gain;
33 | }
34 |
35 | //-----------------------------------------------------------------------------
36 | // Helper methods
37 | //-----------------------------------------------------------------------------
38 | void ColorEnhancerModule::enhance(cv::Mat& image) {
39 | cv::Mat color_enhanced_image;
40 | cv::cvtColor(image, color_enhanced_image, cv::COLOR_BGR2HSV);
41 |
42 | cv::Scalar gains(hue_gain_, saturation_gain_, value_gain_);
43 | cv::multiply(color_enhanced_image, gains, color_enhanced_image);
44 |
45 | // Convert the histogram equalized image from HSV to BGR color space again
46 | cv::cvtColor(color_enhanced_image, image, cv::COLOR_HSV2BGR);
47 | }
48 |
49 | #ifdef HAS_CUDA
50 | void ColorEnhancerModule::enhance(cv::cuda::GpuMat& image) {
51 | cv::cuda::GpuMat color_enhanced_image;
52 | cv::cuda::cvtColor(image, color_enhanced_image, cv::COLOR_BGR2HSV);
53 |
54 | cv::Scalar gains(hue_gain_, saturation_gain_, value_gain_);
55 | cv::cuda::multiply(color_enhanced_image, gains, color_enhanced_image);
56 |
57 | // Convert the histogram equalized image from HSV to BGR color space again
58 | cv::cuda::cvtColor(color_enhanced_image, image, cv::COLOR_HSV2BGR);
59 | }
60 | #endif
61 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/debayer.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | DebayerModule::DebayerModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {}
11 |
12 | void DebayerModule::enable(bool enabled) {
13 | enabled_ = enabled;
14 | }
15 |
16 | bool DebayerModule::enabled() const {
17 | return enabled_;
18 | }
19 |
20 | //-----------------------------------------------------------------------------
21 | // Setters
22 | //-----------------------------------------------------------------------------
23 | void DebayerModule::setEncoding(const std::string& encoding) {
24 | encoding_ = encoding;
25 | }
26 |
27 | //-----------------------------------------------------------------------------
28 | // Getters
29 | //-----------------------------------------------------------------------------
30 | cv::Mat DebayerModule::getImage() const {
31 | return image_.clone();
32 | }
33 |
34 | //-----------------------------------------------------------------------------
35 | // Helper methods
36 | //-----------------------------------------------------------------------------
37 | bool DebayerModule::isBayerEncoding(const std::string& encoding) const {
38 | // Find if encoding is in list of Bayer types
39 | return std::find(BAYER_TYPES.begin(), BAYER_TYPES.end(), encoding) != BAYER_TYPES.end();
40 | }
41 |
42 | //-----------------------------------------------------------------------------
43 | // Wrapper methods (CPU)
44 | //-----------------------------------------------------------------------------
45 | void DebayerModule::debayer(cv::Mat& image, std::string& encoding) {
46 | cv::Mat out;
47 | // We only apply demosaicing (debayer) if the format is valid
48 | if (encoding == "bayer_bggr8") {
49 | cv::demosaicing(image, out, cv::COLOR_BayerBG2BGR);
50 | image = out;
51 | encoding = "bgr8";
52 | cv::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
53 |
54 | } else if (encoding == "bayer_gbrg8") {
55 | cv::demosaicing(image, out, cv::COLOR_BayerGB2BGR);
56 | image = out;
57 | encoding = "bgr8";
58 | cv::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
59 |
60 | } else if (encoding == "bayer_grbg8") {
61 | cv::demosaicing(image, out, cv::COLOR_BayerGR2BGR);
62 | image = out;
63 | encoding = "bgr8";
64 | cv::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
65 |
66 | } else if (encoding == "bayer_rggb8") {
67 | cv::demosaicing(image, out, cv::COLOR_BayerRG2BGR);
68 | image = out;
69 | encoding = "bgr8";
70 | cv::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
71 |
72 | } else if (encoding == "rgb8") {
73 | cv::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
74 | }
75 | // We ignore non-bayer encodings
76 | else if (isBayerEncoding(encoding)) {
77 | throw std::invalid_argument("Encoding [" + encoding + "] is a valid pattern but is not supported!");
78 | }
79 | }
80 |
81 | void DebayerModule::saveDebayeredImage(cv::Mat& image) {
82 | image.copyTo(image_);
83 | }
84 |
85 | //-----------------------------------------------------------------------------
86 | // Wrapper methods (GPU)
87 | //-----------------------------------------------------------------------------
88 | #ifdef HAS_CUDA
89 | void DebayerModule::debayer(cv::cuda::GpuMat& image, std::string& encoding) {
90 | cv::cuda::GpuMat out;
91 | // We only apply demosaicing (debayer) if the format is valid
92 | if (encoding == "bayer_bggr8") {
93 | cv::cuda::demosaicing(image, out, cv::cuda::COLOR_BayerBG2BGR_MHT);
94 | image = out;
95 | encoding = "bgr8";
96 |
97 | } else if (encoding == "bayer_gbrg8") {
98 | cv::cuda::demosaicing(image, out, cv::cuda::COLOR_BayerGB2BGR_MHT);
99 | image = out;
100 | encoding = "bgr8";
101 |
102 | } else if (encoding == "bayer_grbg8") {
103 | cv::cuda::demosaicing(image, out, cv::cuda::COLOR_BayerGR2BGR_MHT);
104 | image = out;
105 | encoding = "bgr8";
106 |
107 | } else if (encoding == "bayer_rggb8") {
108 | cv::cuda::demosaicing(image, out, cv::cuda::COLOR_BayerRG2BGR_MHT);
109 | image = out;
110 | encoding = "bgr8";
111 |
112 | } else if (encoding == "rgb8") {
113 | cv::cuda::cvtColor(image, image, cv::COLOR_RGB2BGR); // Fix because apparently the CPU demosaicing produces RGB
114 | encoding = "bgr8";
115 | }
116 | // We ignore non-bayer encodings
117 | else if (isBayerEncoding(encoding)) {
118 | throw std::invalid_argument("Encoding [" + encoding + "] is a valid pattern but is not supported!");
119 | }
120 | }
121 |
122 | void DebayerModule::saveDebayeredImage(cv::cuda::GpuMat& image) {
123 | image.download(image_);
124 | }
125 |
126 | #endif
127 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/flip.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | FlipModule::FlipModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {}
11 |
12 | void FlipModule::enable(bool enabled) {
13 | enabled_ = enabled;
14 | }
15 |
16 | bool FlipModule::enabled() const {
17 | return enabled_;
18 | }
19 |
20 | //-----------------------------------------------------------------------------
21 | // Setters
22 | //-----------------------------------------------------------------------------
23 | void FlipModule::setAngle(const int& angle) {
24 | angle_ = angle;
25 | }
26 |
27 | //-----------------------------------------------------------------------------
28 | // Getters
29 | //-----------------------------------------------------------------------------
30 | cv::Mat FlipModule::getImage() const {
31 | return image_.clone();
32 | }
33 |
34 | //-----------------------------------------------------------------------------
35 | // Wrapper methods (CPU)
36 | //-----------------------------------------------------------------------------
37 | void FlipModule::flip(cv::Mat& image) {
38 | // From https://stackoverflow.com/a/42359233
39 | cv::Mat out;
40 | if (angle_ == 90) { // clockwise rotation
41 | cv::transpose(image, out);
42 | cv::flip(out, out, 1);
43 | image = out;
44 |
45 | } else if (angle_ == 180) {
46 | cv::flip(image, out, -1); // negative numbers flip x and y
47 | image = out;
48 |
49 | } else if (angle_ == 270) { // counter-clockwise rotation
50 | cv::transpose(image, out);
51 | cv::flip(out, out, 0);
52 | image = out;
53 |
54 | } else {
55 | // Do nothing
56 |
57 | }
58 | }
59 |
60 | void FlipModule::saveFlippedImage(cv::Mat& image) {
61 | image.copyTo(image_);
62 | }
63 |
64 | //-----------------------------------------------------------------------------
65 | // Wrapper methods (GPU)
66 | //-----------------------------------------------------------------------------
67 | #ifdef HAS_CUDA
68 | void FlipModule::flip(cv::cuda::GpuMat& image) {
69 | cv::cuda::GpuMat out;
70 |
71 | if (angle_ == 90) { // clockwise rotation
72 | cv::cuda::transpose(image, out);
73 | cv::cuda::flip(out, out, 1);
74 | image = out;
75 |
76 | } else if (angle_ == 180) {
77 | cv::cuda::flip(image, out, -1); // negative numbers flip x and y
78 | image = out;
79 |
80 | } else if (angle_ == 270) { // counter-clockwise rotation
81 | cv::cuda::transpose(image, out);
82 | cv::cuda::flip(out, out, 0);
83 | image = out;
84 |
85 | } else {
86 | // Do nothing
87 | }
88 | }
89 |
90 | void FlipModule::saveFlippedImage(cv::cuda::GpuMat& image) {
91 | image.download(image_);
92 | }
93 | #endif
94 |
95 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/gamma_correction.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | GammaCorrectionModule::GammaCorrectionModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {
11 | init();
12 | }
13 |
14 | void GammaCorrectionModule::enable(bool enabled) {
15 | enabled_ = enabled;
16 | }
17 |
18 | bool GammaCorrectionModule::enabled() const {
19 | return enabled_;
20 | }
21 |
22 | //-----------------------------------------------------------------------------
23 | // Setters
24 | //-----------------------------------------------------------------------------
25 | void GammaCorrectionModule::setMethod(const std::string& method) {
26 | method_ = method;
27 | }
28 |
29 | void GammaCorrectionModule::setK(const double& k) {
30 | k_ = k;
31 | is_forward_ = (k_ <= 1.0 ? true : false);
32 | init();
33 | }
34 |
35 | void GammaCorrectionModule::init() {
36 | cpu_lut_ = cv::Mat(1, 256, CV_8U);
37 |
38 | for (int i = 0; i < 256; i++) {
39 | float f = i / 255.0;
40 | f = pow(f, k_);
41 | cpu_lut_.at(i) = cv::saturate_cast(f * 255.0);
42 | }
43 |
44 | #ifdef HAS_CUDA
45 | if (use_gpu_) {
46 | gpu_lut_ = cv::cuda::createLookUpTable(cpu_lut_);
47 | }
48 | #endif
49 | }
50 |
51 | //-----------------------------------------------------------------------------
52 | // White balance wrapper methods (CPU)
53 | //-----------------------------------------------------------------------------
54 | void GammaCorrectionModule::gammaCorrectCustom(cv::Mat& image) {
55 | cv::LUT(image, cpu_lut_, image);
56 | }
57 |
58 | void GammaCorrectionModule::gammaCorrectDefault(cv::Mat& image) {
59 | gammaCorrectCustom(image);
60 | }
61 |
62 | //-----------------------------------------------------------------------------
63 | // White balance wrapper methods (GPU)
64 | //-----------------------------------------------------------------------------
65 | #ifdef HAS_CUDA
66 | void GammaCorrectionModule::gammaCorrectCustom(cv::cuda::GpuMat& image) {
67 | gpu_lut_->transform(image, image);
68 | }
69 |
70 | void GammaCorrectionModule::gammaCorrectDefault(cv::cuda::GpuMat& image) {
71 | cv::cuda::GpuMat out;
72 | cv::cuda::gammaCorrection(image, out, is_forward_);
73 | image = out;
74 | }
75 | #endif
76 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/undistortion.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | UndistortionModule::UndistortionModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu), rect_balance_(0.0), rect_fov_scale_(1.0) {}
11 |
12 | void UndistortionModule::enable(bool enabled) {
13 | enabled_ = enabled;
14 | }
15 |
16 | bool UndistortionModule::enabled() const {
17 | return enabled_;
18 | }
19 |
20 | //-----------------------------------------------------------------------------
21 | // Setters
22 | //-----------------------------------------------------------------------------
23 | void UndistortionModule::setImageSize(int width, int height) {
24 | dist_image_size_ = cv::Size(width, height);
25 | rect_image_size_ = cv::Size(width, height);
26 | init();
27 | }
28 |
29 | void UndistortionModule::setNewImageSize(int width, int height) {
30 | rect_image_size_ = cv::Size(width, height);
31 | init();
32 | }
33 |
34 | void UndistortionModule::setBalance(double balance) {
35 | rect_balance_ = balance;
36 | init();
37 | }
38 |
39 | void UndistortionModule::setFovScale(double fov_scale) {
40 | rect_fov_scale_ = fov_scale;
41 | init();
42 | }
43 |
44 | void UndistortionModule::setCameraMatrix(const std::vector& camera_matrix) {
45 | dist_camera_matrix_ = cv::Matx33d(camera_matrix.data());
46 | rect_camera_matrix_ = cv::Matx33d(camera_matrix.data());
47 | init();
48 | }
49 |
50 | void UndistortionModule::setDistortionCoefficients(const std::vector& coefficients) {
51 | dist_distortion_coeff_ = cv::Matx14d(coefficients.data());
52 | rect_distortion_coeff_ = cv::Matx14d(coefficients.data());
53 | init();
54 | }
55 |
56 | void UndistortionModule::setDistortionModel(const std::string& model) {
57 | dist_distortion_model_ = model;
58 | rect_distortion_model_ = model;
59 | init();
60 | }
61 |
62 | void UndistortionModule::setRectificationMatrix(const std::vector& rectification_matrix) {
63 | dist_rectification_matrix_ = cv::Matx33d(rectification_matrix.data());
64 | rect_rectification_matrix_ = cv::Matx33d(rectification_matrix.data());
65 | init();
66 | }
67 |
68 | void UndistortionModule::setProjectionMatrix(const std::vector& projection_matrix) {
69 | dist_projection_matrix_ = cv::Matx34d(projection_matrix.data());
70 | rect_projection_matrix_ = cv::Matx34d(projection_matrix.data());
71 | init();
72 | }
73 |
74 | //-----------------------------------------------------------------------------
75 | // Getters
76 | //-----------------------------------------------------------------------------
77 |
78 | int UndistortionModule::getRectImageHeight() const {
79 | return rect_image_size_.height;
80 | }
81 |
82 | int UndistortionModule::getRectImageWidth() const {
83 | return rect_image_size_.width;
84 | }
85 |
86 | int UndistortionModule::getDistImageHeight() const {
87 | return dist_image_size_.height;
88 | }
89 |
90 | int UndistortionModule::getDistImageWidth() const {
91 | return dist_image_size_.width;
92 | }
93 |
94 | std::string UndistortionModule::getRectDistortionModel() const {
95 | if (calibration_available_) {
96 | if (enabled_) {
97 | return "none";
98 | } else {
99 | return rect_distortion_model_;
100 | }
101 | } else {
102 | return "none";
103 | }
104 | }
105 |
106 | std::string UndistortionModule::getDistDistortionModel() const {
107 | if (calibration_available_) {
108 | return dist_distortion_model_;
109 | } else {
110 | return "none";
111 | }
112 | }
113 |
114 | cv::Mat UndistortionModule::getRectCameraMatrix() const {
115 | return cv::Mat(rect_camera_matrix_).clone();
116 | }
117 |
118 | cv::Mat UndistortionModule::getDistCameraMatrix() const {
119 | return cv::Mat(dist_camera_matrix_).clone();
120 | }
121 |
122 | cv::Mat UndistortionModule::getRectDistortionCoefficients() const {
123 | return cv::Mat(rect_distortion_coeff_).clone();
124 | }
125 |
126 | cv::Mat UndistortionModule::getDistDistortionCoefficients() const {
127 | return cv::Mat(dist_distortion_coeff_).clone();
128 | }
129 |
130 | cv::Mat UndistortionModule::getRectRectificationMatrix() const {
131 | return cv::Mat(rect_rectification_matrix_).clone();
132 | }
133 |
134 | cv::Mat UndistortionModule::getDistRectificationMatrix() const {
135 | return cv::Mat(dist_rectification_matrix_).clone();
136 | }
137 |
138 | cv::Mat UndistortionModule::getRectProjectionMatrix() const {
139 | return cv::Mat(rect_projection_matrix_).clone();
140 | }
141 |
142 | cv::Mat UndistortionModule::getDistProjectionMatrix() const {
143 | return cv::Mat(dist_projection_matrix_).clone();
144 | }
145 |
146 | cv::Mat UndistortionModule::getDistImage() const {
147 | return dist_image_.clone();
148 | }
149 |
150 | cv::Mat UndistortionModule::getRectMask() const {
151 | return rect_mask_.clone();
152 | }
153 |
154 | //-----------------------------------------------------------------------------
155 | // Helper methods
156 | //-----------------------------------------------------------------------------
157 | void UndistortionModule::loadCalibration(const std::string& file_path) {
158 | std::cout << "Loading camera calibration from file " << file_path << std::endl;
159 |
160 | // Check if file exists
161 | if (boost::filesystem::exists(file_path)) {
162 | // Load calibration
163 | YAML::Node node = YAML::LoadFile(file_path);
164 | // Camera matrix
165 | setImageSize(utils::get(node, "image_width", 320), utils::get(node, "image_height", 240));
166 | setCameraMatrix(utils::get>(node["camera_matrix"], "data", std::vector()));
167 | setDistortionCoefficients(utils::get>(node["distortion_coefficients"], "data", std::vector()));
168 | setDistortionModel(utils::get(node, "distortion_model", "none"));
169 | setRectificationMatrix(utils::get>(node["rectification_matrix"], "data", std::vector()));
170 | setProjectionMatrix(utils::get>(node["projection_matrix"], "data", std::vector()));
171 |
172 | // Init rectify map
173 | init();
174 | calibration_available_ = true;
175 | }
176 | // If not, disable calibration available flag
177 | else {
178 | std::cout << "Warning: Calibration file doesn't exist" << std::endl;
179 | calibration_available_ = false;
180 |
181 | setImageSize(320, 240);
182 | setCameraMatrix({1.0, 0.0, 0.0, 0.0, //
183 | 0.0, 1.0, 0.0, 0.0, //
184 | 0.0, 0.0, 1.0, 0.0, //
185 | 0.0, 0.0, 0.0, 1.0});
186 | setDistortionCoefficients({0.0, 0.0, 0.0, 0.0});
187 | setDistortionModel("none");
188 | setRectificationMatrix({1.0, 0.0, 0.0, //
189 | 0.0, 1.0, 0.0, //
190 | 0.0, 0.0, 1.0});
191 | setProjectionMatrix({1.0, 0.0, 0.0, 0.0, //
192 | 0.0, 1.0, 0.0, 0.0, //
193 | 0.0, 0.0, 1.0, 0.0});
194 | }
195 | }
196 |
197 | void UndistortionModule::init() {
198 | cv::Mat new_camera_matrix;
199 | cv::fisheye::estimateNewCameraMatrixForUndistortRectify(dist_camera_matrix_, // Intrinsics (distorted image)
200 | dist_distortion_coeff_, // Distortion (distorted image)
201 | dist_image_size_, // Image size (distorted image)
202 | dist_rectification_matrix_, // Rectification (distorted image)
203 | new_camera_matrix, // Intrinsics (new, undistorted image)
204 | rect_balance_, // Sets the new focal length in range between the min focal length
205 | // and the max focal length. Balance is in range of [0, 1]
206 | rect_image_size_, // Image size (new, undistorted image)
207 | rect_fov_scale_ // Divisor for new focal length
208 | );
209 | rect_camera_matrix_ = cv::Matx33d((double*)new_camera_matrix.ptr());
210 |
211 | // Initialize undistortion maps
212 | cv::fisheye::initUndistortRectifyMap(dist_camera_matrix_, // Intrinsics (distorted image)
213 | dist_distortion_coeff_, // Distortion (distorted image)
214 | dist_rectification_matrix_, // Rectification (distorted image)
215 | rect_camera_matrix_, // New camera matrix (undistorted image)
216 | dist_image_size_, // Image resolution (distorted image)
217 | CV_32F, // Map type
218 | undistortion_map_x_, // Undistortion map for X axis
219 | undistortion_map_y_ // Undistortion map for Y axis
220 | );
221 |
222 | // Update other undistorted (rect) values
223 | rect_distortion_coeff_ = cv::Matx14d::zeros();
224 | rect_rectification_matrix_ = cv::Matx33d::eye();
225 | for (size_t i = 0; i < 3; i++) {
226 | for (size_t j = 0; j < 3; j++) {
227 | rect_projection_matrix_(i, j) = rect_camera_matrix_(i, j);
228 | }
229 | }
230 |
231 | #ifdef HAS_CUDA
232 | if (use_gpu_) {
233 | // Upload everything to GPU
234 | gpu_undistortion_map_x_.upload(undistortion_map_x_);
235 | gpu_undistortion_map_y_.upload(undistortion_map_y_);
236 | }
237 | #endif
238 | }
239 |
240 | void UndistortionModule::undistort(cv::Mat& image) {
241 | cv::Mat out;
242 | cv::remap(image, out, undistortion_map_x_, undistortion_map_y_, cv::InterpolationFlags::INTER_LINEAR, cv::BorderTypes::BORDER_CONSTANT,
243 | 0);
244 | image = out;
245 | }
246 |
247 | void UndistortionModule::saveUndistortedImage(cv::Mat& image) {
248 | image.copyTo(dist_image_);
249 | }
250 |
251 | #ifdef HAS_CUDA
252 | void UndistortionModule::undistort(cv::cuda::GpuMat& image) {
253 | cv::cuda::GpuMat out;
254 | cv::cuda::remap(image, out, gpu_undistortion_map_x_, gpu_undistortion_map_y_, cv::InterpolationFlags::INTER_LINEAR, cv::BORDER_CONSTANT,
255 | 0);
256 | image = out;
257 | }
258 |
259 | void UndistortionModule::saveUndistortedImage(cv::cuda::GpuMat& image) {
260 | image.download(dist_image_);
261 | }
262 | #endif
263 |
264 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/vignetting_correction.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | VignettingCorrectionModule::VignettingCorrectionModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu) {}
11 |
12 | void VignettingCorrectionModule::enable(bool enabled) {
13 | enabled_ = enabled;
14 | }
15 |
16 | bool VignettingCorrectionModule::enabled() const {
17 | return enabled_;
18 | }
19 |
20 | //-----------------------------------------------------------------------------
21 | // Setters
22 | //-----------------------------------------------------------------------------
23 | void VignettingCorrectionModule::setParameters(const double& scale, const double& a2, const double& a4) {
24 | vignetting_correction_scale_ = scale;
25 | vignetting_correction_a2_ = a2;
26 | vignetting_correction_a4_ = a4;
27 | }
28 |
29 | //-----------------------------------------------------------------------------
30 | // Helpers
31 | //-----------------------------------------------------------------------------
32 | void VignettingCorrectionModule::precomputeVignettingMask(int height, int width) {
33 | if (height == vignetting_mask_f_.rows && width == vignetting_mask_f_.cols) return;
34 |
35 | // Initialize mask
36 | vignetting_mask_f_.create(width, height, CV_32F);
37 |
38 | double cx = width / 2.0;
39 | double cy = height / 2.0;
40 |
41 | for (int y = 0; y < height; y++) {
42 | for (int x = 0; x < width; x++) {
43 | double r = std::sqrt(std::pow(y - cy, 2) + std::pow(x - cx, 2));
44 | double k = pow(r, 2) * vignetting_correction_a2_ + pow(r, 4) * vignetting_correction_a4_;
45 | vignetting_mask_f_.at(x, y) = k;
46 | // mask.at(x, y) = k;
47 | }
48 | }
49 | double max;
50 | cv::minMaxLoc(vignetting_mask_f_, NULL, &max, NULL, NULL);
51 | vignetting_mask_f_ = max > 0 ? vignetting_mask_f_ / max : vignetting_mask_f_;
52 | // Scale correction
53 | vignetting_mask_f_ = vignetting_mask_f_ * vignetting_correction_scale_;
54 | // Add 1
55 | vignetting_mask_f_ += 1.0;
56 |
57 | // Upload to gpu
58 | #ifdef HAS_CUDA
59 | if (use_gpu_) {
60 | gpu_vignetting_mask_f_.upload(vignetting_mask_f_);
61 | }
62 | #endif
63 | }
64 |
65 | //-----------------------------------------------------------------------------
66 | // Wrapper methods (CPU)
67 | //-----------------------------------------------------------------------------
68 | void VignettingCorrectionModule::correct(cv::Mat& image) {
69 | precomputeVignettingMask(image.cols, image.rows);
70 |
71 | // COnvert to Lab to apply correction to L channel
72 | cv::Mat corrected_image;
73 | cv::cvtColor(image, corrected_image, cv::COLOR_BGR2Lab);
74 |
75 | // Split the image into 3 channels
76 | std::vector vec_channels(3);
77 | cv::split(corrected_image, vec_channels);
78 |
79 | // Floating point version
80 | // Convert image to float
81 | cv::Mat image_f_;
82 | vec_channels[0].convertTo(image_f_, CV_32FC1);
83 | // Multiply by vignetting mask
84 | cv::multiply(image_f_, vignetting_mask_f_, image_f_, 1.0, CV_32FC1);
85 | vec_channels[0].release();
86 | image_f_.convertTo(vec_channels[0], CV_8UC1);
87 |
88 | // Merge 3 channels in the vector to form the color image in LAB color space.
89 | cv::merge(vec_channels, corrected_image);
90 |
91 | // Convert the histogram equalized image from LAB to BGR color space again
92 | cv::cvtColor(corrected_image, image, cv::COLOR_Lab2BGR);
93 | }
94 |
95 | //-----------------------------------------------------------------------------
96 | // Wrapper methods (GPU)
97 | //-----------------------------------------------------------------------------
98 | #ifdef HAS_CUDA
99 | void VignettingCorrectionModule::correct(cv::cuda::GpuMat& image) {
100 | precomputeVignettingMask(image.cols, image.rows);
101 |
102 | // COnvert to Lab to apply correction to L channel
103 | cv::cuda::GpuMat corrected_image;
104 | cv::cuda::cvtColor(image, corrected_image, cv::COLOR_BGR2Lab);
105 |
106 | // Split the image into 3 channels
107 | std::vector vec_channels(3);
108 | cv::cuda::split(corrected_image, vec_channels);
109 |
110 | // Floating point version
111 | // Convert image to float
112 | cv::cuda::GpuMat image_f_;
113 | vec_channels[0].convertTo(image_f_, CV_32FC1);
114 | // Multiply by vignetting mask
115 | cv::cuda::multiply(image_f_, gpu_vignetting_mask_f_, image_f_, 1.0, CV_32FC1);
116 | vec_channels[0].release();
117 | image_f_.convertTo(vec_channels[0], CV_8UC1);
118 |
119 | // Merge 3 channels in the vector to form the color image in LAB color space.
120 | cv::cuda::merge(vec_channels, corrected_image);
121 |
122 | // Convert the histogram equalized image from LAB to BGR color space again
123 | cv::cuda::cvtColor(corrected_image, image, cv::COLOR_Lab2BGR);
124 | }
125 | #endif
126 |
127 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline/src/raw_image_pipeline/modules/white_balance.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 |
8 | namespace raw_image_pipeline {
9 |
10 | WhiteBalanceModule::WhiteBalanceModule(bool use_gpu) : enabled_(true), use_gpu_(use_gpu), method_("ccc") {
11 | ccc_ = std::make_unique(use_gpu);
12 | }
13 |
14 | void WhiteBalanceModule::enable(bool enabled) {
15 | enabled_ = enabled;
16 | }
17 |
18 | bool WhiteBalanceModule::enabled() const {
19 | return enabled_;
20 | }
21 |
22 | //-----------------------------------------------------------------------------
23 | // Setters
24 | //-----------------------------------------------------------------------------
25 | void WhiteBalanceModule::setMethod(const std::string& method) {
26 | method_ = method;
27 | }
28 |
29 | void WhiteBalanceModule::setSaturationPercentile(const double& percentile) {
30 | clipping_percentile_ = percentile;
31 | }
32 |
33 | void WhiteBalanceModule::setSaturationThreshold(const double& bright_thr, const double& dark_thr) {
34 | saturation_bright_thr_ = bright_thr;
35 | saturation_dark_thr_ = dark_thr;
36 | }
37 |
38 | void WhiteBalanceModule::setTemporalConsistency(bool enabled) {
39 | temporal_consistency_ = enabled;
40 | }
41 |
42 | void WhiteBalanceModule::resetTemporalConsistency() {
43 | if (method_ == "ccc") {
44 | // cccWBPtr_->resetTemporalConsistency();
45 | ccc_->resetTemporalConsistency();
46 | }
47 | }
48 |
49 | //-----------------------------------------------------------------------------
50 | // White balance wrapper methods (CPU)
51 | //-----------------------------------------------------------------------------
52 | void WhiteBalanceModule::balanceWhiteSimple(cv::Mat& image) {
53 | cv::Ptr wb;
54 | wb = cv::xphoto::createSimpleWB();
55 | wb->setP(clipping_percentile_);
56 | wb->balanceWhite(image, image);
57 | }
58 |
59 | void WhiteBalanceModule::balanceWhiteGreyWorld(cv::Mat& image) {
60 | cv::Ptr wb;
61 | wb = cv::xphoto::createGrayworldWB();
62 | wb->setSaturationThreshold(saturation_bright_thr_);
63 | wb->balanceWhite(image, image);
64 | }
65 |
66 | void WhiteBalanceModule::balanceWhiteLearned(cv::Mat& image) {
67 | cv::Ptr wb;
68 | wb = cv::xphoto::createLearningBasedWB();
69 | wb->setSaturationThreshold(saturation_bright_thr_);
70 | wb->balanceWhite(image, image);
71 | }
72 |
73 | void WhiteBalanceModule::balanceWhitePca(cv::Mat& image) {
74 | // Note: BGR input
75 |
76 | // Split channels
77 | std::vector split_img;
78 | cv::split(image, split_img);
79 | split_img[0].convertTo(split_img[0], CV_32FC1);
80 | split_img[2].convertTo(split_img[2], CV_32FC1);
81 |
82 | // Get elementwise squared values
83 | cv::Mat I_r_2;
84 | cv::Mat I_b_2;
85 | cv::multiply(split_img[0], split_img[0], I_b_2);
86 | cv::multiply(split_img[2], split_img[2], I_r_2);
87 |
88 | // Get summed up channels
89 | const double sum_I_r_2 = cv::sum(I_r_2)[0];
90 | const double sum_I_b_2 = cv::sum(I_b_2)[0];
91 | const double sum_I_g = cv::sum(split_img[1])[0];
92 | const double sum_I_r = cv::sum(split_img[2])[0];
93 | const double sum_I_b = cv::sum(split_img[0])[0];
94 |
95 | // Get max values of channels
96 | double max_I_r, max_I_g, max_I_b, max_I_r_2, max_I_b_2;
97 | double min_I_r, min_I_g, min_I_b, min_I_r_2, min_I_b_2;
98 | cv::minMaxLoc(split_img[2], &min_I_r, &max_I_r); // R
99 | cv::minMaxLoc(split_img[1], &min_I_g, &max_I_g); // G
100 | cv::minMaxLoc(split_img[0], &min_I_b, &max_I_b); // B
101 | cv::minMaxLoc(I_r_2, &min_I_r_2, &max_I_r_2);
102 | cv::minMaxLoc(I_b_2, &min_I_b_2, &max_I_b_2);
103 |
104 | // Prepare Matrices for PCA method
105 | Eigen::Matrix2f mat_temp_b;
106 | mat_temp_b << sum_I_b_2, sum_I_b, max_I_b_2, max_I_b;
107 | Eigen::Matrix2f mat_temp_r;
108 | mat_temp_r << sum_I_r_2, sum_I_r, max_I_r_2, max_I_r;
109 | Eigen::Vector2f vec_temp_g;
110 | vec_temp_g << sum_I_g, max_I_g;
111 |
112 | // PCA method calculation
113 | Eigen::Vector2f vec_out_b, vec_out_r;
114 | vec_out_b = mat_temp_b.inverse() * vec_temp_g;
115 | vec_out_r = mat_temp_r.inverse() * vec_temp_g;
116 | cv::Mat b_point = vec_out_b[0] * I_b_2 + vec_out_b[1] * split_img[0];
117 | cv::Mat r_point = vec_out_r[0] * I_r_2 + vec_out_r[1] * split_img[2];
118 |
119 | // Saturate values above 255
120 | cv::threshold(b_point, b_point, 255, 255, cv::THRESH_TRUNC);
121 | cv::threshold(r_point, r_point, 255, 255, cv::THRESH_TRUNC);
122 |
123 | // Convert back to UINT8
124 | b_point.convertTo(b_point, CV_8UC1);
125 | r_point.convertTo(r_point, CV_8UC1);
126 |
127 | // Merge channels
128 | std::vector channels;
129 | channels.push_back(b_point);
130 | channels.push_back(split_img[1]);
131 | channels.push_back(r_point);
132 | cv::Mat merged_image;
133 | cv::merge(channels, merged_image);
134 |
135 | image = merged_image;
136 | }
137 |
138 | //-----------------------------------------------------------------------------
139 | // White balance wrapper methods (GPU)
140 | //-----------------------------------------------------------------------------
141 | #ifdef HAS_CUDA
142 | void WhiteBalanceModule::balanceWhiteSimple(cv::cuda::GpuMat& image) {
143 | cv::Mat cpu_image;
144 | image.download(cpu_image);
145 | balanceWhiteSimple(cpu_image);
146 | image.upload(cpu_image);
147 | }
148 |
149 | void WhiteBalanceModule::balanceWhiteGreyWorld(cv::cuda::GpuMat& image) {
150 | cv::Mat cpu_image;
151 | image.download(cpu_image);
152 | balanceWhiteGreyWorld(cpu_image);
153 | image.upload(cpu_image);
154 | }
155 |
156 | void WhiteBalanceModule::balanceWhiteLearned(cv::cuda::GpuMat& image) {
157 | cv::Mat cpu_image;
158 | image.download(cpu_image);
159 | balanceWhiteLearned(cpu_image);
160 | image.upload(cpu_image);
161 | }
162 |
163 | void WhiteBalanceModule::balanceWhitePca(cv::cuda::GpuMat& image) {
164 | cv::Mat cpu_image;
165 | image.download(cpu_image);
166 | balanceWhitePca(cpu_image);
167 | image.upload(cpu_image);
168 | }
169 | #endif
170 |
171 | } // namespace raw_image_pipeline
--------------------------------------------------------------------------------
/raw_image_pipeline_python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | # Licensed under the MIT license. See LICENSE file in the project root for details.
4 | #
5 |
6 | cmake_minimum_required(VERSION 3.1)
7 | project(raw_image_pipeline_python)
8 |
9 | set(CMAKE_CXX_STANDARD 14)
10 | set(CMAKE_CXX_FLAGS "-std=c++14 -Wall -Wextra -O3 -DNDEBUG")
11 |
12 | set(PACKAGE_DEPENDENCIES
13 | raw_image_pipeline
14 | pybind11_catkin
15 | )
16 |
17 | find_package(catkin REQUIRED
18 | COMPONENTS
19 | ${PACKAGE_DEPENDENCIES}
20 | )
21 |
22 | catkin_package(
23 | LIBRARIES
24 | CATKIN_DEPENDS
25 | ${PACKAGE_DEPENDENCIES}
26 | )
27 |
28 | # Add cvnp to add support for opencv-numpy conversions
29 | add_subdirectory(thirdparty/cvnp)
30 |
31 | # Based on https://github.com/ipab-slmc/exotica/blob/master/exotica_python/CMakeLists.txt
32 | pybind11_add_module(_py_raw_image_pipeline MODULE
33 | src/raw_image_pipeline_python.cpp
34 | )
35 | target_link_libraries(_py_raw_image_pipeline PRIVATE cvnp ${PYTHON_LIBRARIES} ${catkin_LIBRARIES} ${STATIC_NVENC} ${SYSTEM_LIBS})
36 | target_compile_options(_py_raw_image_pipeline PRIVATE "-Wno-deprecated-declarations") # because we are refactoring and triggering this ourselves.
37 | target_include_directories(_py_raw_image_pipeline PUBLIC ${catkin_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS})
38 | add_dependencies(_py_raw_image_pipeline ${catkin_EXPORTED_TARGETS})
39 |
40 | set_target_properties(_py_raw_image_pipeline PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CATKIN_GLOBAL_PYTHON_DESTINATION}/py_raw_image_pipeline)
41 | set(PYTHON_LIB_DIR ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_PYTHON_DESTINATION}/py_raw_image_pipeline)
42 | add_custom_command(TARGET _py_raw_image_pipeline
43 | POST_BUILD
44 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_LIB_DIR}
45 | COMMAND ${CMAKE_COMMAND} -E copy $ ${PYTHON_LIB_DIR}/_py_raw_image_pipeline.so
46 | WORKING_DIRECTORY ${CATKIN_DEVEL_PREFIX}
47 | COMMENT "Copying library files to python directory")
48 |
49 | catkin_python_setup()
50 | catkin_install_python(PROGRAMS scripts/apply_pipeline.py
51 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
52 | install(TARGETS _py_raw_image_pipeline LIBRARY DESTINATION ${CATKIN_GLOBAL_PYTHON_DESTINATION}/py_raw_image_pipeline)
--------------------------------------------------------------------------------
/raw_image_pipeline_python/config/board_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leggedrobotics/raw_image_pipeline/556c315a256061ef11bf52a0a74a87736cffe905/raw_image_pipeline_python/config/board_gt.png
--------------------------------------------------------------------------------
/raw_image_pipeline_python/config/color_calibration_checker.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leggedrobotics/raw_image_pipeline/556c315a256061ef11bf52a0a74a87736cffe905/raw_image_pipeline_python/config/color_calibration_checker.pdf
--------------------------------------------------------------------------------
/raw_image_pipeline_python/config/color_ref.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leggedrobotics/raw_image_pipeline/556c315a256061ef11bf52a0a74a87736cffe905/raw_image_pipeline_python/config/color_ref.jpg
--------------------------------------------------------------------------------
/raw_image_pipeline_python/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | raw_image_pipeline_python
4 | 0.0.0
5 | Python bindings of raw_image_pipeline
6 |
7 | Matias Mattamala
8 | Matias Mattamala
9 |
10 | Proprietary
11 |
12 | catkin
13 |
14 | raw_image_pipeline
15 | pybind11_catkin
16 |
17 |
18 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/scripts/apply_pipeline.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | #
4 | # Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
5 | # Licensed under the MIT license. See LICENSE file in the project root for details.
6 | #
7 |
8 | from py_raw_image_pipeline import RawImagePipeline
9 | import cv2
10 | import pathlib
11 | import rospkg
12 |
13 |
14 | def main():
15 | # Load image
16 | rospack = rospkg.RosPack()
17 | image_path = rospack.get_path("raw_image_pipeline_white_balance") + "/data/alphasense.png"
18 | img = cv2.imread(image_path, cv2.IMREAD_ANYCOLOR)
19 |
20 | # Set config files
21 | calib_file = rospack.get_path("raw_image_pipeline") + "/config/alphasense_calib_example.yaml"
22 | color_calib_file = rospack.get_path("raw_image_pipeline") + "/config/alphasense_color_calib_example.yaml"
23 | param_file = rospack.get_path("raw_image_pipeline") + "/config/pipeline_params_example.yaml"
24 |
25 | # Create image Proc
26 | proc = RawImagePipeline(False, param_file, calib_file, color_calib_file)
27 |
28 | # Uncomment below to show calibration data
29 | print("Original parameters:")
30 | print(" dist_image_height:", proc.get_dist_image_height())
31 | print(" dist_image_width:", proc.get_dist_image_width())
32 | print(" dist_distortion_model:", proc.get_dist_distortion_model())
33 | print(" dist_camera_matrix:", proc.get_dist_camera_matrix())
34 | print(" dist_distortion_coefficients:", proc.get_dist_distortion_coefficients())
35 | print(" dist_rectification_matrix:", proc.get_dist_rectification_matrix())
36 | print(" dist_projection_matrix:", proc.get_dist_projection_matrix())
37 |
38 | print("\nNew parameters:")
39 | print(" rect_image_height:", proc.get_rect_image_height())
40 | print(" rect_image_width:", proc.get_rect_image_width())
41 | print(" rect_distortion_model:", proc.get_rect_distortion_model())
42 | print(" rect_camera_matrix:", proc.get_rect_camera_matrix())
43 | print(" rect_distortion_coefficients:", proc.get_rect_distortion_coefficients())
44 | print(" rect_rectification_matrix:", proc.get_rect_rectification_matrix())
45 | print(" rect_projection_matrix:", proc.get_rect_projection_matrix())
46 |
47 |
48 |
49 | # Apply pipeline without modifying input
50 | img2 = proc.process(img, "bgr8")
51 |
52 | # Apply pipeline changing the input
53 | proc.apply(img, "bgr8")
54 |
55 | # show image
56 | cv2.imwrite("output_apply.png", img)
57 | cv2.imwrite("output_process.png", img2)
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
62 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/scripts/color_calibration.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | #
4 | # Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
5 | # Licensed under the MIT license. See LICENSE file in the project root for details.
6 | #
7 |
8 | # ArUco markers generated using https://chev.me/arucogen/
9 | # 4x4, markers 0, 1, 2, 3
10 | # marker size: 30 mm
11 |
12 | import cv2
13 | import cv2.aruco as aruco
14 | import argparse
15 | import matplotlib.pyplot as plt
16 | import numpy as np
17 | from pathlib import Path
18 | import os.path
19 | import os
20 | from os.path import join
21 | from scipy.optimize import least_squares, minimize, LinearConstraint, NonlinearConstraint
22 | from tqdm import tqdm
23 | from ruamel.yaml import YAML
24 |
25 | COLOR_CHECKER_DIM = 24
26 | SCALE_FACTOR = 4
27 | TARGET_IMAGE_WIDTH = int(224 * SCALE_FACTOR)
28 | TARGET_IMAGE_HEIGHT = int(160 * SCALE_FACTOR)
29 | SQUARE_SIZE = int(30 * SCALE_FACTOR)
30 | HALF_SQUARE_SIZE = int(SQUARE_SIZE / 2)
31 | OFFSET = HALF_SQUARE_SIZE
32 | MARGIN = int(2.5 * SCALE_FACTOR)
33 | TARGET_IMAGE_SIZE = (
34 | (TARGET_IMAGE_WIDTH),
35 | (TARGET_IMAGE_HEIGHT),
36 | )
37 |
38 | # ArUco stuff
39 | aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
40 | aruco_params = aruco.DetectorParameters_create()
41 | aruco_target_pts = np.array(
42 | [
43 | [0, 0],
44 | [TARGET_IMAGE_WIDTH, 0],
45 | [0, TARGET_IMAGE_HEIGHT],
46 | [TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT],
47 | ]
48 | )
49 |
50 |
51 | def show_image(image_bgr: np.array) -> None:
52 | image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
53 | plt.figure()
54 | plt.imshow(image_rgb)
55 | plt.show()
56 |
57 |
58 | def show_calibration_result(ref_bgr: np.array, input_list: list, corr_list: np.array, save_path: str) -> None:
59 | ref_rgb = cv2.cvtColor(ref_bgr, cv2.COLOR_BGR2RGB)
60 |
61 | N = len(input_list)
62 | scale = 2
63 | fig, axs = plt.subplots(N, 3, figsize=(scale * 3, scale * N))
64 | for i in range(N):
65 | input_rgb = cv2.cvtColor(input_list[i], cv2.COLOR_BGR2RGB)
66 | corr_rgb = cv2.cvtColor(corr_list[i], cv2.COLOR_BGR2RGB)
67 | try:
68 | axs[i][0].imshow(input_rgb)
69 | axs[i][1].imshow(corr_rgb)
70 | axs[i][2].imshow(ref_rgb)
71 |
72 | for ax in axs[i]:
73 | ax.set_xticks([])
74 | ax.set_yticks([])
75 | except Exception:
76 | axs[0].imshow(input_rgb)
77 | axs[1].imshow(corr_rgb)
78 | axs[2].imshow(ref_rgb)
79 |
80 | plt.axis("off")
81 | plt.tight_layout()
82 | plt.savefig(join(save_path, "calibrated_images.png"))
83 |
84 |
85 | def apply_color_correction(color_correction: np.matrix, img: np.array) -> np.array:
86 | C = color_correction["matrix"]
87 | b = color_correction["bias"]
88 |
89 | # change from HWC to CHW (channels first)
90 | img_chw = np.einsum("ijk->kij", img)
91 | # reshape as a vector of channels
92 | img_channels = img_chw.reshape(3, -1)
93 | # apply transformation
94 | img_channels = C @ img_channels + 255 * b
95 | # reshape
96 | img_chw = img_channels.reshape(img.shape)
97 | # Return to opencv's HWC
98 | return np.einsum("kij->ijk", img_chw)
99 |
100 |
101 | def get_color_centroids(img: np.array, dim=COLOR_CHECKER_DIM) -> np.array:
102 | # Compute ARUCO coordinates
103 | aruco_corners, ids, _ = aruco.detectMarkers(img, aruco_dict, parameters=aruco_params)
104 | frame_markers = aruco.drawDetectedMarkers(img.copy(), aruco_corners, ids)
105 |
106 | if len(aruco_corners) != 4:
107 | kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
108 | sharp_img = cv2.filter2D(img, -1, kernel)
109 | aruco_corners, ids, _ = aruco.detectMarkers(sharp_img, aruco_dict, parameters=aruco_params)
110 | frame_markers = aruco.drawDetectedMarkers(sharp_img.copy(), aruco_corners, ids)
111 |
112 | if len(aruco_corners) != 4:
113 | # print(f"Couldn't detect all the markers")
114 | return [], [], False
115 |
116 | # Create list of aruco center points
117 | try:
118 | aruco_center_pts = []
119 | for i in range(4):
120 | # find marker
121 | idx = list(ids.squeeze()).index(i)
122 | aruco_center_pts.append(aruco_corners[idx][-1].mean(axis=0))
123 | aruco_center_pts = np.array(aruco_center_pts)
124 | except:
125 | return [], [], False
126 |
127 | # Compute homography
128 | aruco_corner_pts = np.concatenate([x[0] for x in aruco_corners])
129 | H, mask = cv2.findHomography(aruco_center_pts, aruco_target_pts, cv2.RANSAC, 5.0)
130 | img_cropped = cv2.warpPerspective(img, H, TARGET_IMAGE_SIZE)
131 | img_cropped = img_cropped[OFFSET : TARGET_IMAGE_HEIGHT - OFFSET, OFFSET : TARGET_IMAGE_WIDTH - OFFSET]
132 | # show_image(img_cropped)
133 |
134 | # Compute the median for all pixels within each blob
135 | rgb_centroids = []
136 | x = int(MARGIN + HALF_SQUARE_SIZE)
137 | y = int(MARGIN + HALF_SQUARE_SIZE)
138 | s = int(HALF_SQUARE_SIZE * 0.5)
139 | d = int(SQUARE_SIZE + MARGIN)
140 | for i in range(4): # marker has 24 squares
141 | x = int(MARGIN + HALF_SQUARE_SIZE)
142 | for j in range(6):
143 | # print(f"i: {i}, j: {j}, x={x}, y={y}")
144 | # centroid = img_cropped[y-s:y+s, x-s:x+s].copy().reshape(-1, 3) #.mean(axis=(0,1))
145 | centroid = np.median(img_cropped[y - s : y + s, x - s : x + s], axis=(0, 1))
146 | rgb_centroids.append(centroid)
147 | # img_cropped[y-s:y+s, x-s:x+s] = img_cropped[y-s:y+s, x-s:x+s] * 0
148 | x = x + d
149 | y = y + d
150 | # show_image(img_cropped)
151 |
152 | # Return the centroids
153 | return np.array(rgb_centroids), img_cropped, True
154 | # return np.concatenate(rgb_centroids)
155 |
156 |
157 | def find_color_calibration(input: np.array, reference: np.array, loss="linear", compute_bias=False) -> np.array:
158 | # check input size
159 | assert input.shape == reference.shape
160 |
161 | # transpose input
162 | input = input.transpose()
163 | reference = reference.transpose()
164 |
165 | # regularization factor
166 | reg = 10
167 |
168 | # build optimization costs
169 | def fun(x: np.array):
170 | # sum || input * C - reference ||^2
171 | C = np.copy(x[:9].reshape((3, 3)))
172 | bias = np.copy(x[9:].reshape((3, 1)))
173 | color_corrected = np.matmul(C, input)
174 |
175 | if compute_bias:
176 | color_corrected = color_corrected + 255 * bias
177 |
178 | # Return residual
179 | return np.linalg.norm(color_corrected - reference) # + reg * np.linalg.norm(x)
180 |
181 | def constraint(x: np.array):
182 | x = x.reshape((3, 3))
183 | return np.matmul(x, input).flatten()
184 |
185 | nonlinear_constraint = NonlinearConstraint(constraint, 0, 255)
186 |
187 | # minimize
188 | x0 = np.zeros((12,))
189 | x0[:9] = np.eye(3).flatten() * 0.1
190 | sol = least_squares(fun, x0, loss=loss)
191 | # sol = minimize(fun, x0, constraints=[nonlinear_constraint])
192 |
193 | # Recover calibration matrix and bias
194 | C = sol.x[:9].reshape((3, 3)).astype(np.float32)
195 | b = sol.x[9:].reshape((3, 1)).astype(np.float32)
196 |
197 | # Return color calibration matrix C
198 | return {"matrix": C, "bias": b, "sol": sol}
199 |
200 |
201 | def main(*arg, **args):
202 | parser = argparse.ArgumentParser(description="Performs color calibration between 2 images, using ArUco 4X4")
203 | parser.add_argument(
204 | "-i", "--input", type=str, required=True, help="Input image (to be calibrated), or folder with reference images"
205 | )
206 | parser.add_argument("-r", "--ref", type=str, required=True, help="Reference image to perform the calibration")
207 | parser.add_argument(
208 | "-o",
209 | "--output_path",
210 | type=str,
211 | help="Output path to store the file. Default: current path",
212 | default=os.path.abspath(os.getcwd()),
213 | )
214 | parser.add_argument("-p", "--prefix", type=str, help="Prefix for the calibration file. Default: none", default="")
215 | parser.add_argument(
216 | "--loss",
217 | type=str,
218 | help="Loss used in the optimization. Options: 'linear', 'soft_l1', 'huber', 'cauchy', 'arctan'",
219 | default="linear",
220 | )
221 | parser.add_argument(
222 | "--compute_bias",
223 | help="If bias should be computed",
224 | action="store_true",
225 | )
226 |
227 | args = parser.parse_args()
228 |
229 | # Open input images
230 | in_images = []
231 | if os.path.exists(args.input):
232 | if os.path.isfile(args.input):
233 | in_images.append(cv2.imread(args.input))
234 | elif os.path.isdir(args.input):
235 | # Check all files in dir
236 | images = [str(x) for x in Path(args.input).glob("*")]
237 | print("Loading images...")
238 | for im in tqdm(images):
239 | try:
240 | img = cv2.imread(im)
241 | if img is not None:
242 | in_images.append(img)
243 | else:
244 | continue
245 | except Exception as e:
246 | continue
247 | else:
248 | raise f"{args.input} does not exist"
249 |
250 | # Open reference image
251 | if os.path.exists(args.ref):
252 | ref_img = cv2.imread(args.ref)
253 | else:
254 | raise f"{args.ref} does not exist"
255 |
256 | # Extract color marker centroids
257 | # Input images
258 | print("Extracting color markers from input images...")
259 | centroids_in_list = []
260 | cropped_in_list = []
261 | for im in tqdm(in_images):
262 | centroid, crop_in, valid = get_color_centroids(im, dim=COLOR_CHECKER_DIM)
263 | if valid:
264 | centroids_in_list.append(centroid)
265 | cropped_in_list.append(crop_in)
266 |
267 | # Reference
268 | print("Extracting color markers from reference image...")
269 | centroid_ref, cropped_ref, valid = get_color_centroids(ref_img)
270 | # Check if the reference samples are valid
271 | if not valid:
272 | raise f"Failed to extract markers from the reference. Please check the reference image"
273 |
274 | # Fill reference data
275 | centroids_ref_list = [centroid_ref] * len(centroids_in_list)
276 |
277 | # Create training data
278 | centroids_in = np.concatenate(centroids_in_list, axis=0)
279 | centroids_ref = np.concatenate(centroids_ref_list, axis=0)
280 |
281 | # Find color calibration matrix
282 | print("Optimizing color correction matrix...", end="")
283 | sol = find_color_calibration(centroids_in, centroids_ref, loss=args.loss, compute_bias=args.compute_bias)
284 | print(f"done. Cost: {sol['sol']['cost']}")
285 |
286 | # Apply calibration
287 | cropped_corr_list = [apply_color_correction(sol, img).astype(np.uint8) for img in cropped_in_list]
288 |
289 | # Show result
290 | print("Generating visualization of calibrated images...")
291 | show_calibration_result(cropped_ref, cropped_in_list, cropped_corr_list, save_path=args.output_path)
292 |
293 | # Save as YAML file
294 | calib_str = f"""matrix:\n rows: 3\n cols: 3\n data: {str([x for x in sol["matrix"].flatten()])}"""
295 | calib_str += f"""\nbias:\n rows: 3\n cols: 1\n data: {str([x for x in sol["bias"].flatten()])}"""
296 | # Print calibration putput
297 | print(calib_str)
298 |
299 | yaml = YAML()
300 | yaml.width = 200
301 | output_file = os.path.join(args.output_path, "color_calibration.yaml")
302 | print(f"Saving calibration to {output_file}")
303 | with open(output_file, "w") as out_file:
304 | yaml.dump(yaml.load(calib_str), out_file)
305 |
306 |
307 | if __name__ == "__main__":
308 | main()
309 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/setup.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | # Licensed under the MIT license. See LICENSE file in the project root for details.
4 | #
5 |
6 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
7 |
8 | from distutils.core import setup
9 | from catkin_pkg.python_setup import generate_distutils_setup
10 |
11 | # fetch values from package.xml
12 | setup_args = generate_distutils_setup(packages=["py_raw_image_pipeline"], package_dir={"": "src"})
13 |
14 | setup(**setup_args)
15 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/src/py_raw_image_pipeline/__init__.py:
--------------------------------------------------------------------------------
1 | from ._py_raw_image_pipeline import *
2 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/src/raw_image_pipeline_python.cpp:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright (c) 2021-2023, ETH Zurich, Robotic Systems Lab, Matias Mattamala. All rights reserved.
3 | // Licensed under the MIT license. See LICENSE file in the project root for details.
4 | //
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | namespace py = pybind11;
12 | using namespace raw_image_pipeline;
13 |
14 | PYBIND11_MODULE(_py_raw_image_pipeline, m) {
15 | m.doc() = "Image Proc Cuda bindings"; // module docstring
16 | py::class_(m, "RawImagePipeline")
17 | .def(py::init(), //
18 | py::arg("use_gpu"))
19 | .def(py::init(), //
20 | py::arg("use_gpu") = false, py::arg("params_path") = "",
21 | py::arg("calibration_path") = "", //
22 | py::arg("color_calibration_path") = "")
23 | .def("apply", &RawImagePipeline::apply)
24 | .def("process", &RawImagePipeline::process)
25 | .def("load_params", &RawImagePipeline::loadParams)
26 | .def("set_gpu", &RawImagePipeline::setGpu)
27 | .def("set_debug", &RawImagePipeline::setDebug)
28 | .def("set_debayer", &RawImagePipeline::setDebayer)
29 | .def("set_debayer_encoding", &RawImagePipeline::setDebayerEncoding)
30 | .def("set_flip", &RawImagePipeline::setFlip)
31 | .def("set_flip_angle", &RawImagePipeline::setFlipAngle)
32 | .def("set_white_balance", &RawImagePipeline::setWhiteBalance)
33 | .def("set_white_balance_method", &RawImagePipeline::setWhiteBalanceMethod)
34 | .def("set_white_balance_percentile", &RawImagePipeline::setWhiteBalancePercentile)
35 | .def("set_white_balance_saturation_threshold", &RawImagePipeline::setWhiteBalanceSaturationThreshold)
36 | .def("set_white_balance_temporal_consistency", &RawImagePipeline::setWhiteBalanceTemporalConsistency)
37 | .def("set_gamma_correction", &RawImagePipeline::setGammaCorrection)
38 | .def("set_gamma_correction_method", &RawImagePipeline::setGammaCorrectionMethod)
39 | .def("set_gamma_correction_k", &RawImagePipeline::setGammaCorrectionK)
40 | .def("set_vignetting_correction", &RawImagePipeline::setVignettingCorrection)
41 | .def("set_vignetting_correction_parameters", &RawImagePipeline::setVignettingCorrectionParameters)
42 | .def("set_color_enhancer", &RawImagePipeline::setColorEnhancer)
43 | .def("set_color_enhancer_hue_gain", &RawImagePipeline::setColorEnhancerHueGain)
44 | .def("set_color_enhancer_saturation_gain", &RawImagePipeline::setColorEnhancerSaturationGain)
45 | .def("set_color_enhancer_value_gain", &RawImagePipeline::setColorEnhancerValueGain)
46 | .def("set_color_calibration", &RawImagePipeline::setColorCalibration)
47 | .def("set_color_calibration_matrix", &RawImagePipeline::setColorCalibrationMatrix)
48 | .def("set_color_calibration_bias", &RawImagePipeline::setColorCalibrationBias)
49 | .def("set_undistortion", &RawImagePipeline::setUndistortion)
50 | .def("set_undistortion_image_size", &RawImagePipeline::setUndistortionImageSize)
51 | .def("set_undistortion_new_image_size", &RawImagePipeline::setUndistortionNewImageSize)
52 | .def("set_undistortion_balance", &RawImagePipeline::setUndistortionBalance)
53 | .def("set_undistortion_fov_scale", &RawImagePipeline::setUndistortionFovScale)
54 | .def("set_undistortion_camera_matrix", &RawImagePipeline::setUndistortionCameraMatrix)
55 | .def("set_undistortion_distortion_coeffs", &RawImagePipeline::setUndistortionDistortionCoefficients)
56 | .def("set_undistortion_distortion_model", &RawImagePipeline::setUndistortionDistortionModel)
57 | .def("set_undistortion_rectification_matrix", &RawImagePipeline::setUndistortionRectificationMatrix)
58 | .def("set_undistortion_projection_matrix", &RawImagePipeline::setUndistortionProjectionMatrix)
59 | .def("get_dist_image_height", &RawImagePipeline::getDistImageHeight)
60 | .def("get_dist_image_width", &RawImagePipeline::getDistImageWidth)
61 | .def("get_dist_distortion_model", &RawImagePipeline::getDistDistortionModel)
62 | .def("get_dist_camera_matrix", &RawImagePipeline::getDistCameraMatrix)
63 | .def("get_dist_distortion_coefficients", &RawImagePipeline::getDistDistortionCoefficients)
64 | .def("get_dist_rectification_matrix", &RawImagePipeline::getDistRectificationMatrix)
65 | .def("get_dist_projection_matrix", &RawImagePipeline::getDistProjectionMatrix)
66 | .def("get_rect_image_height", &RawImagePipeline::getRectImageHeight)
67 | .def("get_rect_image_width", &RawImagePipeline::getRectImageWidth)
68 | .def("get_rect_distortion_model", &RawImagePipeline::getRectDistortionModel)
69 | .def("get_rect_camera_matrix", &RawImagePipeline::getRectCameraMatrix)
70 | .def("get_rect_distortion_coefficients", &RawImagePipeline::getRectDistortionCoefficients)
71 | .def("get_rect_rectification_matrix", &RawImagePipeline::getRectRectificationMatrix)
72 | .def("get_rect_projection_matrix", &RawImagePipeline::getRectProjectionMatrix)
73 | .def("reset_white_balance_temporal_consistency", &RawImagePipeline::resetWhiteBalanceTemporalConsistency);
74 | }
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/.github/workflows/ubuntu.yml:
--------------------------------------------------------------------------------
1 | name: Ubuntu
2 |
3 | on:
4 | push:
5 | pull_request:
6 |
7 | jobs:
8 | build:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@master
12 | - name: Checkout submodules
13 | run: git submodule update --init --recursive
14 |
15 | - name: Install OpenCV and SDL via apt
16 | run: sudo apt-get install -y libopencv-dev
17 |
18 | - name: run_build_all
19 | run: |
20 | python -m venv venv
21 | source venv/bin/activate
22 | pip install -r requirements.txt
23 | mkdir build
24 | cd build
25 | conan install ../conanfile_pybind_only.txt
26 | cmake ..
27 | make -j
28 |
29 | - name: tests
30 | run: |
31 | source venv/bin/activate
32 | cd build/
33 | cmake --build . --target test
34 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | venv/
3 | *.so
4 | *.pyd
5 | __pycache__
6 | *.pyc
7 | *.pyo
8 | *.egg-info
9 | *.egg
10 | *.egg-link
11 | .vscode/
12 | cmake-build-*/
13 | .idea/
14 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.4)
2 | project(cvnp LANGUAGES CXX)
3 |
4 | set(CMAKE_CXX_STANDARD 14)
5 |
6 | if (${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
7 | # For conan, add binary dir to module search path
8 | list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_BINARY_DIR})
9 | find_package(pybind11 REQUIRED)
10 |
11 | find_package(OpenCV REQUIRED)
12 |
13 | file(GLOB sources cvnp/*.h cvnp/*.cpp)
14 | pybind11_add_module(cvnp ${sources} main/cvnp_main.cpp)
15 | target_link_libraries(cvnp PUBLIC opencv_core)
16 | target_include_directories(cvnp PUBLIC ${CMAKE_CURRENT_LIST_DIR})
17 | # Copy the .so or .pyd module into this dir, to facilitate the tests
18 | add_custom_command(
19 | TARGET cvnp POST_BUILD
20 | COMMAND ${CMAKE_COMMAND} -E copy $ ${CMAKE_CURRENT_LIST_DIR}
21 | )
22 |
23 | #
24 | # c++ test target
25 | #
26 | add_executable(test_cvnp_cpp tests/test_cvnp_cpp.cpp ${sources})
27 | target_include_directories(test_cvnp_cpp PRIVATE ${CMAKE_CURRENT_LIST_DIR})
28 | target_link_libraries(test_cvnp_cpp PRIVATE opencv_core)
29 | find_package(Python3 REQUIRED COMPONENTS Development)
30 | target_link_libraries(test_cvnp_cpp PRIVATE pybind11::pybind11)
31 | enable_testing()
32 | add_test(NAME test_cvnp_cpp COMMAND test_cvnp_cpp)
33 | add_test(NAME test_cvnp_python COMMAND pytest ${CMAKE_SOURCE_DIR}/tests)
34 |
35 | else()
36 | # in order to use cvnp from another project, do this:
37 | # add_subdirectory(path/to/cvnp)
38 | # target_link_libraries(your_target PRIVATE cvnp)
39 | file(GLOB sources cvnp/*.h cvnp/*.cpp)
40 | add_library(cvnp INTERFACE)
41 | target_sources(cvnp INTERFACE ${sources})
42 | target_include_directories(cvnp INTERFACE ${CMAKE_CURRENT_LIST_DIR})
43 | endif()
44 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright © 2022
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/Readme.md:
--------------------------------------------------------------------------------
1 | ## cvnp: pybind11 casts and transformers between numpy and OpenCV, possibly with shared memory
2 |
3 |
4 | ### Explicit transformers between cv::Mat / cv::Matx and numpy.ndarray, with or without shared memory
5 |
6 | Notes:
7 | - When going from Python to C++ (nparray_to_mat), the memory is *always* shared
8 | - When going from C++ to Python (mat_to_nparray) , you have to _specify_ whether you want to share memory via
9 | the boolean parameter `share_memory`
10 |
11 | ````cpp
12 | pybind11::array mat_to_nparray(const cv::Mat& m, bool share_memory);
13 | cv::Mat nparray_to_mat(pybind11::array& a);
14 |
15 | template
16 | pybind11::array matx_to_nparray(const cv::Matx<_Tp, _rows, _cols>& m, bool share_memory);
17 | template
18 | void nparray_to_matx(pybind11::array &a, cv::Matx<_Tp, _rows, _cols>& out_matrix);
19 | ````
20 |
21 |
22 | > __Warning__: be extremely cautious of the lifetime of your Matrixes when using shared memory!
23 | For example, the code below is guaranted to be a definitive UB, and a may cause crash much later.
24 |
25 | ````cpp
26 |
27 | pybind11::array make_array()
28 | {
29 | cv::Mat m(cv::Size(10, 10), CV_8UC1); // create a matrix on the stack
30 | pybind11::array a = cvnp::mat_to_nparray(m, true); // create a pybind array from it, using
31 | // shared memory, which is on the stack!
32 | return a;
33 | } // Here be dragons, when closing the scope!
34 | // m is now out of scope, it is thus freed,
35 | // and the returned array directly points to the old address on the stack!
36 | ````
37 |
38 |
39 | ### Automatic casts:
40 |
41 | #### Without shared memory
42 |
43 | * Casts *without* shared memory between `cv::Mat`, `cv::Matx`, `cv::Vec` and `numpy.ndarray`
44 | * Casts *without* shared memory for simple types, between `cv::Size`, `cv::Point`, `cv::Point3` and python `tuple`
45 |
46 | #### With shared memory
47 |
48 | * Casts *with* shared memory between `cvnp::Mat_shared`, `cvnp::Matx_shared`, `cvnp::Vec_shared` and `numpy.ndarray`
49 |
50 | When you want to cast with shared memory, use these wrappers, which can easily be constructed from their OpenCV counterparts.
51 | They are defined in [cvnp/cvnp_shared_mat.h](cvnp/cvnp_shared_mat.h).
52 |
53 | Be sure that your matrixes lifetime if sufficient (_do not ever share the memory of a temporary matrix!_)
54 |
55 | ### Supported matrix types
56 |
57 | Since OpenCV supports a subset of numpy types, here is the table of supported types:
58 |
59 | ````
60 | ➜ python
61 | >>> import cvnp
62 | >>> cvnp.print_types_synonyms()
63 | cv_depth cv_depth_name np_format np_format_long
64 | 0 CV_8U B np.uint8
65 | 1 CV_8S b np.int8
66 | 2 CV_16U H np.uint16
67 | 3 CV_16S h np.int16
68 | 4 CV_32S i np.int32
69 | 5 CV_32F f float
70 | 6 CV_64F d np.float64
71 | ````
72 |
73 |
74 | ## How to use it in your project
75 |
76 | 1. Add cvnp to your project. For example:
77 |
78 | ````bash
79 | cd external
80 | git submodule add https://github.com/pthom/cvnp.git
81 | ````
82 |
83 | 2. Link it to your python module:
84 |
85 | In your python module CMakeLists, add:
86 |
87 | ````cmake
88 | add_subdirectory(path/to/cvnp)
89 | target_link_libraries(your_target PRIVATE cvnp)
90 | ````
91 |
92 | 3. (Optional) If you want to import the declared functions in your module:
93 |
94 | Write this in your main module code:
95 | ````cpp
96 | void pydef_cvnp(pybind11::module& m);
97 |
98 | PYBIND11_MODULE(your_module, m)
99 | {
100 | ....
101 | ....
102 | ....
103 | pydef_cvnp(m);
104 | }
105 | ````
106 |
107 | You will get two simple functions:
108 | * cvnp.list_types_synonyms()
109 | * cvnp.print_types_synonyms()
110 |
111 | ````python
112 | >>> import cvnp
113 | >>> import pprint
114 | >>> pprint.pprint(cvnp.list_types_synonyms(), indent=2, width=120)
115 | [ {'cv_depth': 0, 'cv_depth_name': 'CV_8U', 'np_format': 'B', 'np_format_long': 'np.uint8'},
116 | {'cv_depth': 1, 'cv_depth_name': 'CV_8S', 'np_format': 'b', 'np_format_long': 'np.int8'},
117 | {'cv_depth': 2, 'cv_depth_name': 'CV_16U', 'np_format': 'H', 'np_format_long': 'np.uint16'},
118 | {'cv_depth': 3, 'cv_depth_name': 'CV_16S', 'np_format': 'h', 'np_format_long': 'np.int16'},
119 | {'cv_depth': 4, 'cv_depth_name': 'CV_32S', 'np_format': 'i', 'np_format_long': 'np.int32'},
120 | {'cv_depth': 5, 'cv_depth_name': 'CV_32F', 'np_format': 'f', 'np_format_long': 'float'},
121 | {'cv_depth': 6, 'cv_depth_name': 'CV_64F', 'np_format': 'd', 'np_format_long': 'np.float64'}]
122 | ````
123 |
124 |
125 | ### Shared and non shared matrices - Demo
126 |
127 | Demo based on extracts from the tests:
128 |
129 | We are using this struct:
130 |
131 | ````cpp
132 | // CvNp_TestHelper is a test helper struct
133 | struct CvNp_TestHelper
134 | {
135 | // m is a *shared* matrix (i.e `cvnp::Mat_shared`)
136 | cvnp::Mat_shared m = cvnp::Mat_shared(cv::Mat::eye(cv::Size(4, 3), CV_8UC1));
137 | void SetM(int row, int col, uchar v) { m.Value.at(row, col) = v; }
138 |
139 | // m_ns is a standard OpenCV matrix
140 | cv::Mat m_ns = cv::Mat::eye(cv::Size(4, 3), CV_8UC1);
141 | void SetM_ns(int row, int col, uchar v) { m_ns.at(row, col) = v; }
142 |
143 | // ...
144 | };
145 | ````
146 |
147 | #### Shared matrices
148 |
149 | Changes propagate from Python to C++ and from C++ to Python
150 |
151 | ````python
152 | def test_mat_shared():
153 | # CvNp_TestHelper is a test helper object
154 | o = CvNp_TestHelper()
155 | # o.m is a *shared* matrix i.e `cvnp::Mat_shared` in the object
156 | assert o.m.shape == (3, 4)
157 |
158 | # From python, change value in the C++ Mat (o.m) and assert that the changes are visible from python and C++
159 | o.m[0, 0] = 2
160 | assert o.m[0, 0] == 2
161 |
162 | # Make a python linked copy of the C++ Mat, named m_linked.
163 | # Values of m_mlinked and the C++ mat should change together
164 | m_linked = o.m
165 | m_linked[1, 1] = 3
166 | assert o.m[1, 1] == 3
167 |
168 | # Ask C++ to change a value in the matrix, at (0,0)
169 | # and verify that m_linked as well as o.m are impacted
170 | o.SetM(0, 0, 10)
171 | o.SetM(2, 3, 15)
172 | assert m_linked[0, 0] == 10
173 | assert m_linked[2, 3] == 15
174 | assert o.m[0, 0] == 10
175 | assert o.m[2, 3] == 15
176 | ````
177 |
178 | #### Non shared matrices
179 |
180 | Changes propagate from C++ to Python, but not the other way.
181 |
182 | ````python
183 | def test_mat_not_shared():
184 | # CvNp_TestHelper is a test helper object
185 | o = CvNp_TestHelper()
186 | # o.m_ns is a bare `cv::Mat`. Its memory is *not* shared
187 | assert o.m_ns.shape == (3, 4)
188 |
189 | # From python, change value in the C++ Mat (o.m) and assert that the changes are *not* applied
190 | o.m_ns[0, 0] = 2
191 | assert o.m_ns[0, 0] != 2 # No shared memory!
192 |
193 | # Ask C++ to change a value in the matrix, at (0,0) and verify that the change is visible from python
194 | o.SetM_ns(2, 3, 15)
195 | assert o.m_ns[2, 3] == 15
196 | ````
197 |
198 | ### Non continuous matrices
199 |
200 | #### From C++
201 | The conversion of non continuous matrices from C++ to python will fail. You need to clone them to make them continuous beforehand.
202 |
203 | Example:
204 |
205 | ````cpp
206 | cv::Mat m(cv::Size(10, 10), CV_8UC1);
207 | cv::Mat sub_matrix = m(cv::Rect(3, 0, 3, m.cols));
208 |
209 | TEST_NAME("Try to convert a non continuous Mat to py::array, ensure it throws");
210 | TEST_ASSERT_THROW(
211 | cvnp::mat_to_nparray(sub_matrix, share_memory)
212 | );
213 |
214 | TEST_NAME("Clone the mat, ensure the clone can now be converted to py::array");
215 | cv::Mat sub_matrix_clone = sub_matrix.clone();
216 | py::array a = cvnp::mat_to_nparray(sub_matrix_clone, share_memory);
217 | TEST_ASSERT(a.shape()[0] == 10);
218 | ````
219 |
220 | #### From python
221 |
222 | The conversion of non continuous matrices from python to python will work, with or without shared memory.
223 |
224 | ````python
225 | # import test utilities
226 | >>> from cvnp import CvNp_TestHelper, cvnp_roundtrip, cvnp_roundtrip_shared, short_lived_matx, short_lived_mat
227 | >>> o=CvNp_TestHelper()
228 | # o.m is of type `cvnp::Mat_shared`
229 | >>> o.m
230 | array([[1, 0, 0, 0],
231 | [0, 1, 0, 0],
232 | [0, 0, 1, 0]], dtype=uint8)
233 |
234 | # Create a non continuous array
235 | >>> m = np.zeros((10,10))
236 | >>> sub_matrix = m[4:6, :]
237 | >>> sub_matrix.flags['F_CONTIGUOUS']
238 | False
239 |
240 | # Assign it to a `cvnp::Mat_shared`
241 | >>> o.m = m
242 | # Check that memory sharing works
243 | >>> m[0,0]=42
244 | >>> o.m[0,0]
245 | 42.0
246 | ````
247 |
248 |
249 | ## Build and test
250 |
251 | _These steps are only for development and testing of this package, they are not required in order to use it in a different project._
252 |
253 | ### Build
254 |
255 | ````bash
256 | python3 -m venv venv
257 | source venv/bin/activate
258 | pip install -r requirements.txt
259 |
260 | mkdir build
261 | cd build
262 |
263 | # if you do not have a global install of OpenCV and pybind11
264 | conan install .. --build=missing
265 | # if you do have a global install of OpenCV, but not pybind11
266 | conan install ../conanfile_pybind_only.txt --build=missing
267 |
268 | cmake ..
269 | make
270 | ````
271 |
272 | ### Test
273 |
274 | In the build dir, run:
275 |
276 | ````
277 | cmake --build . --target test
278 | ````
279 |
280 | ### Deep clean
281 |
282 | ````
283 | rm -rf build
284 | rm -rf venv
285 | rm -rf .pytest_cache
286 | rm *.so
287 | rm *.pyd
288 | ````
289 |
290 |
291 | ## Notes
292 |
293 | Thanks to Dan Mašek who gave me some inspiration here:
294 | https://stackoverflow.com/questions/60949451/how-to-send-a-cvmat-to-python-over-shared-memory
295 |
296 | This code is intended to be integrated into your own pip package. As such, no pip tooling is provided.
297 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/conanfile.txt:
--------------------------------------------------------------------------------
1 | [requires]
2 | pybind11/2.9.1
3 | opencv/4.5.5
4 |
5 | [generators]
6 | cmake_find_package
7 |
8 | [options]
9 | opencv:shared=False
10 | opencv:with_ffmpeg=False
11 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/conanfile_pybind_only.txt:
--------------------------------------------------------------------------------
1 | [requires]
2 | pybind11/2.9.1
3 |
4 | [generators]
5 | cmake_find_package
6 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/cvnp/cvnp.cpp:
--------------------------------------------------------------------------------
1 | #include "cvnp/cvnp.h"
2 |
3 | // Thanks to Dan Mašek who gave me some inspiration here:
4 | // https://stackoverflow.com/questions/60949451/how-to-send-a-cvmat-to-python-over-shared-memory
5 |
6 | namespace cvnp
7 | {
8 |
9 | namespace detail
10 | {
11 | namespace py = pybind11;
12 |
13 | py::dtype determine_np_dtype(int cv_depth)
14 | {
15 | for (auto format_synonym : cvnp::sTypeSynonyms)
16 | if (format_synonym.cv_depth == cv_depth)
17 | return format_synonym.dtype();
18 |
19 | std::string msg = "numpy does not support this OpenCV depth: " + std::to_string(cv_depth) + " (in determine_np_dtype)";
20 | throw std::invalid_argument(msg.c_str());
21 | }
22 |
23 | int determine_cv_depth(pybind11::dtype dt)
24 | {
25 | for (auto format_synonym : cvnp::sTypeSynonyms)
26 | if (format_synonym.np_format[0] == dt.char_())
27 | return format_synonym.cv_depth;
28 |
29 | std::string msg = std::string("OpenCV does not support this numpy format: ") + dt.char_() + " (in determine_np_dtype)";
30 | throw std::invalid_argument(msg.c_str());
31 | }
32 |
33 | int determine_cv_type(const py::array& a, int depth)
34 | {
35 | if (a.ndim() < 2)
36 | throw std::invalid_argument("determine_cv_type needs at least two dimensions");
37 | if (a.ndim() > 3)
38 | throw std::invalid_argument("determine_cv_type needs at most three dimensions");
39 | if (a.ndim() == 2)
40 | return CV_MAKETYPE(depth, 1);
41 | //We now know that shape.size() == 3
42 | return CV_MAKETYPE(depth, a.shape()[2]);
43 | }
44 |
45 | cv::Size determine_cv_size(const py::array& a)
46 | {
47 | if (a.ndim() < 2)
48 | throw std::invalid_argument("determine_cv_size needs at least two dimensions");
49 | return cv::Size(static_cast(a.shape()[1]), static_cast(a.shape()[0]));
50 | }
51 |
52 | std::vector determine_shape(const cv::Mat& m)
53 | {
54 | if (m.channels() == 1) {
55 | return {
56 | static_cast(m.rows)
57 | , static_cast(m.cols)
58 | };
59 | }
60 | return {
61 | static_cast(m.rows)
62 | , static_cast(m.cols)
63 | , static_cast(m.channels())
64 | };
65 | }
66 |
67 | py::capsule make_capsule_mat(const cv::Mat& m)
68 | {
69 | return py::capsule(new cv::Mat(m)
70 | , [](void *v) { delete reinterpret_cast(v); }
71 | );
72 | }
73 |
74 |
75 | } // namespace detail
76 |
77 | pybind11::array mat_to_nparray(const cv::Mat& m, bool share_memory)
78 | {
79 | if (!m.isContinuous())
80 | throw std::invalid_argument("Only continuous Mats supported.");
81 | if (share_memory)
82 | return pybind11::array(detail::determine_np_dtype(m.depth())
83 | , detail::determine_shape(m)
84 | , m.data
85 | , detail::make_capsule_mat(m)
86 | );
87 | else
88 | return pybind11::array(detail::determine_np_dtype(m.depth())
89 | , detail::determine_shape(m)
90 | , m.data
91 | );
92 | }
93 |
94 | cv::Mat nparray_to_mat(pybind11::array& a)
95 | {
96 | int depth = detail::determine_cv_depth(a.dtype());
97 | int type = detail::determine_cv_type(a, depth);
98 | cv::Size size = detail::determine_cv_size(a);
99 | cv::Mat m(size, type, a.mutable_data(0));
100 | return m;
101 | }
102 |
103 | } // namespace cvnp
104 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/cvnp/cvnp.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "cvnp/cvnp_synonyms.h"
3 | #include "cvnp/cvnp_shared_mat.h"
4 |
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | //
13 | // Explicit transformers between cv::Mat / cv::Matx and numpy.ndarray, with *possibly*/ shared memory
14 | // Also see automatic casters in the namespace pybind11:detail below
15 | //
16 | namespace cvnp
17 | {
18 | //
19 | // Public interface
20 | //
21 | pybind11::array mat_to_nparray(const cv::Mat& m, bool share_memory);
22 | cv::Mat nparray_to_mat(pybind11::array& a);
23 |
24 | template
25 | pybind11::array matx_to_nparray(const cv::Matx<_Tp, _rows, _cols>& m, bool share_memory);
26 | template
27 | void nparray_to_matx(pybind11::array &a, cv::Matx<_Tp, _rows, _cols>& out_matrix);
28 |
29 |
30 | //
31 | // Private details and implementations below
32 | //
33 | namespace detail
34 | {
35 | template
36 | pybind11::capsule make_capsule_matx(const cv::Matx<_Tp, _rows, _cols>& m)
37 | {
38 | return pybind11::capsule(new cv::Matx<_Tp, _rows, _cols>(m)
39 | , [](void *v) { delete reinterpret_cast*>(v); }
40 | );
41 | }
42 | } // namespace detail
43 |
44 | template
45 | pybind11::array matx_to_nparray(const cv::Matx<_Tp, _rows, _cols>& m, bool share_memory)
46 | {
47 | if (share_memory)
48 | return pybind11::array(
49 | pybind11::dtype::of<_Tp>()
50 | , std::vector {_rows, _cols}
51 | , m.val
52 | , detail::make_capsule_matx<_Tp, _rows, _cols>(m)
53 | );
54 | else
55 | return pybind11::array(
56 | pybind11::dtype::of<_Tp>()
57 | , std::vector {_rows, _cols}
58 | , m.val
59 | );
60 | }
61 |
62 | template
63 | void nparray_to_matx(pybind11::array &a, cv::Matx<_Tp, _rows, _cols>& out_matrix)
64 | {
65 | size_t mat_size = (size_t)(_rows * _cols);
66 | if (a.size() != mat_size)
67 | throw std::runtime_error("Bad size");
68 |
69 | _Tp* arrayValues = (_Tp*) a.data(0);
70 | for (size_t i = 0; i < mat_size; ++i)
71 | out_matrix.val[i] = arrayValues[i];
72 | }
73 | } // namespace cvnp
74 |
75 |
76 |
77 | //
78 | // 1. Casts with shared memory between {cv::Mat, cv::Matx, cv::Vec} and numpy.ndarray
79 | //
80 | // 2. Casts without shared memory between {cv::Size, cv::Point, cv::Point3} and python tuples
81 | //
82 | namespace pybind11
83 | {
84 | namespace detail
85 | {
86 | //
87 | // Cast between cvnp::Mat_shared and numpy.ndarray
88 | // The cast between cv::Mat and numpy.ndarray works
89 | // - *with* shared memory when going from C++ to Python
90 | // - *with* shared memory when going from Python to C++
91 | // any modification to the Matrix size, type, and values is immediately
92 | // impacted on both sides.
93 | //
94 | template<>
95 | struct type_caster
96 | {
97 | public:
98 | PYBIND11_TYPE_CASTER(cvnp::Mat_shared, _("numpy.ndarray"));
99 |
100 | /**
101 | * Conversion part 1 (Python->C++):
102 | * Return false upon failure.
103 | * The second argument indicates whether implicit conversions should be applied.
104 | */
105 | bool load(handle src, bool)
106 | {
107 | auto a = reinterpret_borrow(src);
108 | auto new_mat = cv::Mat(cvnp::nparray_to_mat(a));
109 | value.Value = new_mat;
110 | return true;
111 | }
112 |
113 | /**
114 | * Conversion part 2 (C++ -> Python):
115 | * The second and third arguments are used to indicate the return value policy and parent object
116 | * (for ``return_value_policy::reference_internal``) and are generally
117 | * ignored by implicit casters.
118 | */
119 | static handle cast(const cvnp::Mat_shared &m, return_value_policy, handle defval)
120 | {
121 | auto a = cvnp::mat_to_nparray(m.Value, true);
122 | return a.release();
123 | }
124 | };
125 |
126 |
127 | //
128 | // Cast between cv::Mat and numpy.ndarray
129 | // The cast between cv::Mat and numpy.ndarray works *without* shared memory.
130 | // - *without* shared memory when going from C++ to Python
131 | // - *with* shared memory when going from Python to C++
132 | //
133 | template<>
134 | struct type_caster
135 | {
136 | public:
137 | PYBIND11_TYPE_CASTER(cv::Mat, _("numpy.ndarray"));
138 |
139 | /**
140 | * Conversion part 1 (Python->C++):
141 | * Return false upon failure.
142 | * The second argument indicates whether implicit conversions should be applied.
143 | */
144 | bool load(handle src, bool)
145 | {
146 | auto a = reinterpret_borrow(src);
147 | auto new_mat = cvnp::nparray_to_mat(a);
148 | value = new_mat;
149 | return true;
150 | }
151 |
152 | /**
153 | * Conversion part 2 (C++ -> Python):
154 | * The second and third arguments are used to indicate the return value policy and parent object
155 | * (for ``return_value_policy::reference_internal``) and are generally
156 | * ignored by implicit casters.
157 | */
158 | static handle cast(const cv::Mat &m, return_value_policy, handle defval)
159 | {
160 | auto a = cvnp::mat_to_nparray(m, false);
161 | return a.release();
162 | }
163 | };
164 |
165 |
166 | //
167 | // Cast between cvnp::Matx_shared<_rows,_cols> (aka Matx33d, Matx21d, etc) + Vec<_rows> (aka Vec1d, Vec2f, etc) and numpy.ndarray
168 | // The cast between cvnp::Matx_shared, cvnp::Vec_shared and numpy.ndarray works *with* shared memory:
169 | // any modification to the Matrix size, type, and values is immediately
170 | // impacted on both sides.
171 | // - *with* shared memory when going from C++ to Python
172 | // - *with* shared memory when going from Python to C++
173 | //
174 | template
175 | struct type_caster >
176 | {
177 | using Matshared_xxx = cvnp::Matx_shared<_Tp, _rows, _cols>;
178 |
179 | public:
180 | PYBIND11_TYPE_CASTER(Matshared_xxx, _("numpy.ndarray"));
181 |
182 | // Conversion part 1 (Python->C++)
183 | bool load(handle src, bool)
184 | {
185 | auto a = reinterpret_borrow(src);
186 | cvnp::nparray_to_matx<_Tp, _rows, _cols>(a, value.Value);
187 | return true;
188 | }
189 |
190 | // Conversion part 2 (C++ -> Python)
191 | static handle cast(const Matshared_xxx &m, return_value_policy, handle defval)
192 | {
193 | auto a = cvnp::matx_to_nparray<_Tp, _rows, _cols>(m.Value, true);
194 | return a.release();
195 | }
196 | };
197 |
198 |
199 | //
200 | // Cast between cv::Matx<_rows,_cols> (aka Matx33d, Matx21d, etc) + Vec<_rows> (aka Vec1d, Vec2f, etc) and numpy.ndarray
201 | // The cast between cv::Matx, cv::Vec and numpy.ndarray works *without* shared memory.
202 | // - *without* shared memory when going from C++ to Python
203 | // - *with* shared memory when going from Python to C++
204 | //
205 | template
206 | struct type_caster >
207 | {
208 | using Matxxx = cv::Matx<_Tp, _rows, _cols>;
209 |
210 | public:
211 | PYBIND11_TYPE_CASTER(Matxxx, _("numpy.ndarray"));
212 |
213 | // Conversion part 1 (Python->C++)
214 | bool load(handle src, bool)
215 | {
216 | auto a = reinterpret_borrow(src);
217 | cvnp::nparray_to_matx<_Tp, _rows, _cols>(a, value);
218 | return true;
219 | }
220 |
221 | // Conversion part 2 (C++ -> Python)
222 | static handle cast(const Matxxx &m, return_value_policy, handle defval)
223 | {
224 | auto a = cvnp::matx_to_nparray<_Tp, _rows, _cols>(m, false);
225 | return a.release();
226 | }
227 | };
228 |
229 |
230 |
231 | //
232 | // Cast between cv::Size and a simple python tuple.
233 | // No shared memory, you cannot modify the width or the height without
234 | // transferring the whole Size from C++, or the tuple from python
235 | //
236 | template
237 | struct type_caster>
238 | {
239 | using SizeTp = cv::Size_<_Tp>;
240 |
241 | public:
242 | PYBIND11_TYPE_CASTER(SizeTp, _("tuple"));
243 |
244 | // Conversion part 1 (Python->C++, i.e tuple -> Size)
245 | bool load(handle src, bool)
246 | {
247 | auto tuple = pybind11::reinterpret_borrow(src);
248 | if (tuple.size() != 2)
249 | throw std::invalid_argument("Size should be in a tuple of size 2");
250 |
251 | SizeTp r;
252 | r.width = tuple[0].cast<_Tp>();
253 | r.height = tuple[1].cast<_Tp>();
254 |
255 | value = r;
256 | return true;
257 | }
258 |
259 | // Conversion part 2 (C++ -> Python, i.e Size -> tuple)
260 | static handle cast(const SizeTp &value, return_value_policy, handle defval)
261 | {
262 | auto result = pybind11::make_tuple(value.width, value.height);
263 | return result.release();
264 | }
265 | };
266 |
267 |
268 | //
269 | // Cast between cv::Point and a simple python tuple
270 | // No shared memory, you cannot modify x or y without
271 | // transferring the whole Point from C++, or the tuple from python
272 | //
273 | template
274 | struct type_caster>
275 | {
276 | using PointTp = cv::Point_<_Tp>;
277 |
278 | public:
279 | PYBIND11_TYPE_CASTER(PointTp , _("tuple"));
280 |
281 | // Conversion part 1 (Python->C++)
282 | bool load(handle src, bool)
283 | {
284 | auto tuple = pybind11::reinterpret_borrow(src);
285 | if (tuple.size() != 2)
286 | throw std::invalid_argument("Point should be in a tuple of size 2");
287 |
288 | PointTp r;
289 | r.x = tuple[0].cast<_Tp>();
290 | r.y = tuple[1].cast<_Tp>();
291 |
292 | value = r;
293 | return true;
294 | }
295 |
296 | // Conversion part 2 (C++ -> Python)
297 | static handle cast(const PointTp &value, return_value_policy, handle defval)
298 | {
299 | auto result = pybind11::make_tuple(value.x, value.y);
300 | return result.release();
301 | }
302 | };
303 |
304 |
305 |
306 | //
307 | // Point3
308 | // No shared memory
309 | //
310 | template
311 | struct type_caster>
312 | {
313 | using PointTp = cv::Point3_<_Tp>;
314 |
315 | public:
316 | PYBIND11_TYPE_CASTER(PointTp , _("tuple"));
317 |
318 | // Conversion part 1 (Python->C++)
319 | bool load(handle src, bool)
320 | {
321 | auto tuple = pybind11::reinterpret_borrow(src);
322 | if (tuple.size() != 3)
323 | throw std::invalid_argument("Point3 should be in a tuple of size 3");
324 |
325 | PointTp r;
326 | r.x = tuple[0].cast<_Tp>();
327 | r.y = tuple[1].cast<_Tp>();
328 | r.z = tuple[2].cast<_Tp>();
329 |
330 | value = r;
331 | return true;
332 | }
333 |
334 | // Conversion part 2 (C++ -> Python)
335 | static handle cast(const PointTp &value, return_value_policy, handle defval)
336 | {
337 | auto result = pybind11::make_tuple(value.x, value.y, value.z);
338 | return result.release();
339 | }
340 | };
341 |
342 |
343 | } // namespace detail
344 | } // namespace pybind11
345 |
346 |
--------------------------------------------------------------------------------
/raw_image_pipeline_python/thirdparty/cvnp/cvnp/cvnp_shared_mat.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 |
5 |
6 | namespace cvnp
7 | {
8 | //
9 | // Wrappers for cv::Matx, cv::Mat and cv::Vec when you explicitly intend to share memory
10 | //
11 |
12 | struct Mat_shared
13 | {
14 | Mat_shared(const cv::Mat value = cv::Mat()): Value(value) {};
15 | cv::Mat Value;
16 | };
17 |
18 |
19 | template
20 | struct Matx_shared
21 | {
22 | Matx_shared(const cv::Matx& value = cv::Matx()): Value(value) {};
23 | cv::Matx Value;
24 | };
25 |
26 |
27 | template
28 | struct Vec_shared : public cv::Vec
29 | {
30 | Vec_shared(const cv::Vec& value): Value(value) {};
31 | cv::Vec Value;
32 | };
33 |
34 |
35 | typedef Matx_shared Matx_shared12f;
36 | typedef Matx_shared Matx_shared12d;
37 | typedef Matx_shared Matx_shared13f;
38 | typedef Matx_shared Matx_shared13d;
39 | typedef Matx_shared Matx_shared14f;
40 | typedef Matx_shared Matx_shared14d;
41 | typedef Matx_shared Matx_shared16f;
42 | typedef Matx_shared Matx_shared16d;
43 |
44 | typedef Matx_shared Matx_shared21f;
45 | typedef Matx_shared Matx_shared21d;
46 | typedef Matx_shared Matx_shared31f;
47 | typedef Matx_shared Matx_shared31d;
48 | typedef Matx_shared Matx_shared41f;
49 | typedef Matx_shared Matx_shared41d;
50 | typedef Matx_shared