├── .gitignore
├── LICENSE
├── README.rst
├── calibration
├── board.png
├── calibration.json
├── charuco_calibration.ipynb
├── openpose
│ ├── calibration_boards
│ │ └── intrinsics
│ │ │ ├── 18284509
│ │ │ ├── capture_0.jpg
│ │ │ ├── capture_1.jpg
│ │ │ ├── capture_10.jpg
│ │ │ ├── capture_11.jpg
│ │ │ ├── capture_12.jpg
│ │ │ ├── capture_13.jpg
│ │ │ ├── capture_14.jpg
│ │ │ ├── capture_2.jpg
│ │ │ ├── capture_24.jpg
│ │ │ ├── capture_25.jpg
│ │ │ ├── capture_26.jpg
│ │ │ ├── capture_27.jpg
│ │ │ ├── capture_28.jpg
│ │ │ ├── capture_29.jpg
│ │ │ ├── capture_3.jpg
│ │ │ ├── capture_30.jpg
│ │ │ ├── capture_31.jpg
│ │ │ ├── capture_32.jpg
│ │ │ ├── capture_4.jpg
│ │ │ ├── capture_5.jpg
│ │ │ ├── capture_6.jpg
│ │ │ ├── capture_7.jpg
│ │ │ └── capture_9.jpg
│ │ │ ├── 18284511
│ │ │ ├── capture_0.jpg
│ │ │ ├── capture_1.jpg
│ │ │ ├── capture_10.jpg
│ │ │ ├── capture_11.jpg
│ │ │ ├── capture_12.jpg
│ │ │ ├── capture_13.jpg
│ │ │ ├── capture_14.jpg
│ │ │ ├── capture_15.jpg
│ │ │ ├── capture_16.jpg
│ │ │ ├── capture_17.jpg
│ │ │ ├── capture_18.jpg
│ │ │ ├── capture_19.jpg
│ │ │ ├── capture_2.jpg
│ │ │ ├── capture_20.jpg
│ │ │ ├── capture_21.jpg
│ │ │ ├── capture_22.jpg
│ │ │ ├── capture_3.jpg
│ │ │ ├── capture_4.jpg
│ │ │ ├── capture_5.jpg
│ │ │ ├── capture_6.jpg
│ │ │ ├── capture_7.jpg
│ │ │ ├── capture_8.jpg
│ │ │ └── capture_9.jpg
│ │ │ └── 18284512
│ │ │ ├── capture_11.jpg
│ │ │ ├── capture_12.jpg
│ │ │ ├── capture_14.jpg
│ │ │ ├── capture_15.jpg
│ │ │ ├── capture_16.jpg
│ │ │ ├── capture_17.jpg
│ │ │ ├── capture_18.jpg
│ │ │ ├── capture_20.jpg
│ │ │ ├── capture_21.jpg
│ │ │ ├── capture_22.jpg
│ │ │ ├── capture_23.jpg
│ │ │ ├── capture_24.jpg
│ │ │ ├── capture_25.jpg
│ │ │ ├── capture_5.jpg
│ │ │ ├── capture_6.jpg
│ │ │ └── tmp
│ │ │ ├── capture_0.jpg
│ │ │ ├── capture_1.jpg
│ │ │ ├── capture_10.jpg
│ │ │ ├── capture_13.jpg
│ │ │ ├── capture_19.jpg
│ │ │ ├── capture_2.jpg
│ │ │ ├── capture_3.jpg
│ │ │ ├── capture_4.jpg
│ │ │ ├── capture_7.jpg
│ │ │ ├── capture_8.jpg
│ │ │ └── capture_9.jpg
│ ├── captured-data
│ │ ├── frame_00.jpg
│ │ ├── frame_00.json
│ │ ├── frame_00_annotated.jpg
│ │ ├── frame_01.jpg
│ │ ├── frame_01.json
│ │ ├── frame_01_annotated.jpg
│ │ ├── frame_02.jpg
│ │ ├── frame_02.json
│ │ ├── frame_02_annotated.jpg
│ │ ├── frame_03.jpg
│ │ ├── frame_03.json
│ │ └── frame_03_annotated.jpg
│ ├── charuco_origin.ipynb
│ ├── charuco_pose_estimation.ipynb
│ ├── chessboard_alternative_calibration.ipynb
│ ├── compare_calibration.ipynb
│ ├── flir_aruco.py
│ ├── models
│ │ ├── 18284509.xml
│ │ ├── 18284511.xml
│ │ └── 18284512.xml
│ └── sample-data
│ │ ├── 18284509
│ │ ├── capture_0.jpg
│ │ ├── capture_1.jpg
│ │ ├── capture_2.jpg
│ │ └── capture_3.jpg
│ │ ├── 18284511
│ │ ├── capture_0.jpg
│ │ ├── capture_1.jpg
│ │ ├── capture_2.jpg
│ │ └── capture_3.jpg
│ │ └── 18284512
│ │ ├── capture_0.jpg
│ │ ├── capture_1.jpg
│ │ └── capture_2.jpg
├── points_imageframe_1.jpg.json
├── points_imageframe_10.jpg.json
├── points_imageframe_11.jpg.json
├── points_imageframe_2.jpg.json
├── points_imageframe_3.jpg.json
├── points_imageframe_4.jpg.json
├── points_imageframe_5.jpg.json
├── points_imageframe_6.jpg.json
├── points_imageframe_7.jpg.json
├── points_imageframe_8.jpg.json
├── points_imageframe_9.jpg.json
└── sample-data
│ ├── calibration.json
│ ├── frame_1.jpg
│ ├── frame_10.jpg
│ ├── frame_11.jpg
│ ├── frame_2.jpg
│ ├── frame_3.jpg
│ ├── frame_4.jpg
│ ├── frame_5.jpg
│ ├── frame_6.jpg
│ ├── frame_7.jpg
│ ├── frame_8.jpg
│ ├── frame_9.jpg
│ ├── test_frame_1.jpg
│ ├── test_frame_2.jpg
│ ├── test_frame_3.jpg
│ ├── test_frame_4.jpg
│ └── test_frame_5.jpg
├── data
├── ChAruCo 11x8, 300dpi, 50mm spacing, 30mm markers.pdf
└── board.pdf
├── flir
├── README.rst
├── common.py
├── flir_capture.py
├── flir_capture_all_continuous.py
├── flir_capture_multiple.py
├── flir_capture_multiple_video.py
├── flir_capture_single.py
├── flir_capture_single_continuous.py
├── flir_detect_aruco.py
└── fps_counter.py
├── img
└── kinect_detection.jpg
├── kinect-v2
├── README.rst
├── body.py
├── cap
│ ├── frame_0.jpg
│ ├── frame_0.json
│ └── frame_0_annotated.jpg
├── capture_board.py
├── color.py
├── color_3d.py
├── depth.py
├── detect_aruco.py
├── detect_charuco.py
├── frame_0.jpg
├── frame_0.json
├── infrared.py
└── sample-data
│ ├── frame_0.jpg
│ ├── frame_0.json
│ ├── frame_1.jpg
│ ├── frame_1.json
│ ├── frame_2.jpg
│ ├── frame_2.json
│ ├── frame_3.jpg
│ └── frame_3.json
└── webcam
├── capture_webcam.py
├── detect_aruco.py
├── estimate_pose.py
├── sample-data
└── calibration.json
└── shared.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | *.swp
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 |
62 | # Flask stuff:
63 | instance/
64 | .webassets-cache
65 |
66 | # Scrapy stuff:
67 | .scrapy
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # Jupyter Notebook
76 | .ipynb_checkpoints
77 |
78 | # pyenv
79 | .python-version
80 |
81 | # celery beat schedule file
82 | celerybeat-schedule
83 |
84 | # SageMath parsed files
85 | *.sage.py
86 |
87 | # Environments
88 | .env
89 | .venv
90 | env/
91 | venv/
92 | ENV/
93 | env.bak/
94 | venv.bak/
95 |
96 | # Spyder project settings
97 | .spyderproject
98 | .spyproject
99 |
100 | # Rope project settings
101 | .ropeproject
102 |
103 | # mkdocs documentation
104 | /site
105 |
106 | # mypy
107 | .mypy_cache/
108 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2018, Daniel Saakes
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ------------------------------
2 | calibrating-with-python-opencv
3 | ------------------------------
4 | intrinsics, extrinsics, pose estimation examples with KinectV2, Flir and others. All python.
5 |
6 |
7 | .. image:: img/kinect_detection.jpg
8 | :width: 400
9 | :alt: detecting markers with the Kinect
--------------------------------------------------------------------------------
/calibration/board.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/board.png
--------------------------------------------------------------------------------
/calibration/calibration.json:
--------------------------------------------------------------------------------
1 | {
2 | "calibrationDate": "2018-10-10 10:41:26.877267",
3 | "cameraResolution": {
4 | "type_id": "opencv-matrix",
5 | "rows": 2,
6 | "cols": 1,
7 | "dt": "d",
8 | "data": [ 720.0, 1280.0 ]
9 | },
10 | "cameraMatrix": {
11 | "type_id": "opencv-matrix",
12 | "rows": 3,
13 | "cols": 3,
14 | "dt": "d",
15 | "data": [ 1.0036890509044972e+03, 0.0, 6.3670897816398144e+02,
16 | 0.0, 1.0036890509044972e+03, 4.1429682375533667e+02, 0.0,
17 | 0.0, 1.0 ]
18 | },
19 | "dist_coeffs": {
20 | "type_id": "opencv-matrix",
21 | "rows": 14,
22 | "cols": 1,
23 | "dt": "d",
24 | "data": [ -8.5188812484719122e-03, -4.4933351860255627e-01,
25 | 5.7140445300074899e-03, -1.2610711736297426e-03,
26 | 2.6073694779700285e-01, -1.3493827802828759e-01,
27 | 1.0285916634349823e-01, -3.4395347152192784e-01, 0.0, 0.0,
28 | 0.0, 0.0, 0.0, 0.0 ]
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_10.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_11.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_12.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_13.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_14.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_24.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_24.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_25.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_25.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_26.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_26.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_27.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_27.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_28.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_28.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_29.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_29.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_3.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_30.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_30.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_31.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_31.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_32.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_32.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_4.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_5.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_6.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_7.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284509/capture_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284509/capture_9.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_10.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_11.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_12.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_13.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_14.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_15.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_16.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_17.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_18.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_19.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_20.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_21.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_21.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_22.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_22.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_3.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_4.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_5.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_6.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_7.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_8.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284511/capture_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284511/capture_9.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_11.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_12.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_14.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_15.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_16.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_17.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_18.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_20.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_21.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_21.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_22.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_22.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_23.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_23.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_24.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_24.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_25.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_25.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_5.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/capture_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/capture_6.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_10.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_13.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_19.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_3.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_4.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_7.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_8.jpg
--------------------------------------------------------------------------------
/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/calibration_boards/intrinsics/18284512/tmp/capture_9.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_00.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_00.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_00.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.11454005761979534, "y": 0.18734488867143612, "z": 3.5081874797828756}, "rot3x3": [[0.9812365161790775, 0.016142457851824846, 0.19213099794475566], [0.11976112496299292, -0.8319745513603454, -0.5417339003942344], [0.15110318416348492, 0.5545789095730688, -0.8182970492384791]]}}
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_00_annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_00_annotated.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_01.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_01.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_01.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.17509263733713282, "y": 0.18506135463113796, "z": 3.5213673163459887}, "rot3x3": [[0.9586101378459623, -0.051292589791719904, 0.2800636960610992], [0.1141706474557033, -0.8318463116997036, -0.5431360575133501], [0.2608288076085364, 0.5526307844701179, -0.7915601993389075]]}}
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_01_annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_01_annotated.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_02.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_02.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_02.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.023754125693590766, "y": 0.19267393318314952, "z": 3.0500337208287243}, "rot3x3": [[0.9877126126715591, 0.04967622158565849, 0.1481758002458388], [0.12245955186088728, -0.8350757838328465, -0.5363320738255242], [0.09709507159250064, 0.547887495992498, -0.830898212061813]]}}
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_02_annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_02_annotated.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_03.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_03.jpg
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_03.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.07323559961231949, "y": 0.16624498314927447, "z": 3.4882481061755506}, "rot3x3": [[0.6978746463894456, 0.34074761884781096, 0.6299698708480851], [0.5387543970444706, -0.829315606025541, -0.14825425885194218], [0.4719265595323651, 0.44286192639697153, -0.7623376132370148]]}}
--------------------------------------------------------------------------------
/calibration/openpose/captured-data/frame_03_annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/captured-data/frame_03_annotated.jpg
--------------------------------------------------------------------------------
/calibration/openpose/flir_aruco.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 | import os
5 | import json
6 |
7 | import numpy as np
8 | import cv2
9 |
10 | import argparse
11 |
12 |
13 | charuco_square_length = 138.0 / 1000 # chessboard square side length (normally in meters)
14 | charuco_marker_length = 87.0 / 1000 # marker side length (same unit than squareLength)
15 | squaresX = 5
16 | squaresY = 7
17 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
18 | board = cv2.aruco.CharucoBoard_create(squaresX,squaresY,charuco_square_length,charuco_marker_length,dictionary)
19 |
20 |
21 |
22 |
23 | # construct the argument parse and parse the arguments
24 | ap = argparse.ArgumentParser()
25 | ap.add_argument("folder", help="folder to save images")
26 | ap.add_argument("-c", "--camera", type=str, default="0", help="use camera by id")
27 | ap.add_argument("-f", "--force", action="store_true", help="force overwrite in folder")
28 | ap.add_argument("--fps", type=int, default=20, help="set framerate")
29 |
30 | args = vars(ap.parse_args())
31 |
32 | # make folder
33 | target_folder = args['folder']
34 | if os.path.isdir(target_folder):
35 | if args['force'] == False:
36 | print("{}: error: folder {} exists. Use --force to overwrite files.".format(os.path.basename(sys.argv[0]), target_folder))
37 | sys.exit()
38 | else:
39 | os.makedirs(target_folder)
40 |
41 |
42 | def set_trigger_mode_software(cam):
43 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
44 | cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
45 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
46 | print("set trigger mode software")
47 |
48 |
49 | def reset_trigger_mode_software(cam):
50 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
51 | print("reset trigger mode")
52 |
53 |
54 | def find_aruco_corners(img, dictionary):
55 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
56 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
57 |
58 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
59 | if len(corners)>0:
60 | for corner in corners:
61 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
62 | return corners, ids, rejectedImgPoints
63 |
64 |
65 | def find_charuco_board(img, board, dictionary):
66 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
67 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
68 |
69 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
70 | if len(corners)>0:
71 | for corner in corners:
72 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
73 | ret, detectedCorners, detectedIds = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
74 | if detectedCorners is not None and detectedIds is not None and len(detectedCorners)>3:
75 | return detectedCorners, detectedIds
76 | return [], []
77 |
78 |
79 |
80 |
81 |
82 |
83 | #
84 | # setup
85 | #
86 |
87 | system = PySpin.System.GetInstance()
88 | cam_list = system.GetCameras()
89 |
90 | if cam_list.GetSize() == 0:
91 | print("no cameras found, aborting")
92 | system.ReleaseInstance()
93 | del system
94 | sys.exit()
95 |
96 | cameras = []
97 | for i in range(cam_list.GetSize()):
98 | cam = cam_list.GetByIndex(i)
99 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
100 |
101 |
102 | camera_serial = args["camera"]
103 | if camera_serial == "0":
104 | camera_serial = cam_list.GetByIndex(0).GetUniqueID()
105 | print("no camera specified (use -c), using the first one in the list {}".format(camera_serial))
106 |
107 |
108 | filename = "models/{}.xml".format(camera_serial)
109 |
110 | fs = cv2.FileStorage(filename, cv2.FILE_STORAGE_READ)
111 |
112 | if fs.isOpened() == False:
113 | print("couldn't open camera calibration {} aborting".format(filename))
114 | sys.exit()
115 |
116 | intrinsics = fs.getNode("Intrinsics").mat()
117 | dist_coeffs = fs.getNode("Distortion").mat()
118 |
119 | fs.release()
120 |
121 |
122 |
123 |
124 |
125 | cam = cam_list.GetBySerial(camera_serial)
126 |
127 | try:
128 | cam.Init()
129 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
130 | set_trigger_mode_software(cam)
131 | cam.BeginAcquisition()
132 |
133 | except:
134 | print("error initializing camera {}".format(camera_serial))
135 | sys.exit()
136 |
137 |
138 | #os.mkdir(camera_serial)
139 |
140 | def makeRT(rvec, tvec):
141 | rot3x3, _ = cv2.Rodrigues(rvec)
142 | return{"t": { "x":tvec[0][0], "y":tvec[1][0], "z":tvec[2][0] }, "rot3x3" : rot3x3.tolist()}
143 |
144 | def makeMarkersJson(rvecs, tvecs, ids):
145 | ret = []
146 | for i in range(len(rvecs)):
147 | rot3x3, _ = cv2.Rodrigues(rvecs[i])
148 | ret.append( {"id": int(ids[i][0]), "t": { "x":tvecs[i][0][0], "y":tvecs[i][0][1], "z":tvecs[i][0][2] }, "rot3x3" : rot3x3.tolist()} )
149 | return ret
150 | #
151 | # loop
152 | #
153 |
154 | count = 0
155 |
156 | while 1:
157 | key = cv2.waitKey(1)
158 |
159 |
160 |
161 | if key == 27: # ESC
162 | cv2.destroyAllWindows()
163 | break
164 |
165 | cam.TriggerSoftware()
166 | i = cam.GetNextImage()
167 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
168 | frame = None
169 |
170 | font = cv2.FONT_HERSHEY_SIMPLEX
171 |
172 | if i.IsIncomplete():
173 | pass
174 | else:
175 | # see documentation: enum ColorProcessingAlgorithm
176 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
177 | image_data = image_converted.GetData()
178 | frame = np.frombuffer(image_data, dtype=np.uint8)
179 | frame = frame.reshape((i.GetHeight(),i.GetWidth(),3))
180 | frame_annotated = frame.copy()
181 |
182 | corners, ids = find_charuco_board(frame, board, dictionary)
183 |
184 | valid = False
185 | if len(corners) > 0 and len(ids) > 0:
186 | valid, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(corners, ids, board, intrinsics, dist_coeffs)
187 | cv2.aruco.drawDetectedCornersCharuco(frame_annotated, corners, ids)
188 |
189 | corners2, ids2, _ = find_aruco_corners(frame, dictionary)
190 | cv2.aruco.drawDetectedMarkers(frame_annotated, corners2, ids2)
191 | rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners2, charuco_marker_length, intrinsics, dist_coeffs)
192 | for n in range(len(tvecs)):
193 | cv2.aruco.drawAxis(frame_annotated, intrinsics, dist_coeffs, rvecs[n], tvecs[n], 0.1)
194 |
195 | if valid == True:
196 | cv2.aruco.drawAxis(frame_annotated, intrinsics, dist_coeffs, rvec, tvec, 0.2)
197 |
198 | p3d = [tvec[0][0], tvec[1][0], tvec[2][0]]
199 |
200 | # draw the coordinates
201 | cv2.putText(frame_annotated,"{:.3f}".format(p3d[0]), (100,200), font, 4,(0,0,255), 6, cv2.LINE_AA)
202 | cv2.putText(frame_annotated,"{:.3f}".format(p3d[1]), (100,400), font, 4,(0,255,0), 6, cv2.LINE_AA)
203 | cv2.putText(frame_annotated,"{:.3f}".format(p3d[2]), (100,600), font, 4,(255,0,0), 6, cv2.LINE_AA)
204 |
205 | if key == 32:
206 | print("save frame to disc")
207 | cv2.imwrite("{}/frame_{:02}.jpg".format(target_folder, count), frame)
208 | cv2.imwrite("{}/frame_{:02}_annotated.jpg".format(target_folder, count), frame_annotated)
209 | # save the json data
210 | markers = makeMarkersJson(rvecs, tvecs, ids2)
211 | # board
212 | pose = makeRT(rvec, tvec)
213 | filename = "{}/frame_{:02}.json".format(target_folder, count)
214 | with open(filename, "w") as write_file:
215 | json.dump( { "pose": pose, "markers" : markers }, write_file)
216 | count += 1
217 | cv2.imshow("cam1",frame_annotated)
218 | i.Release()
219 | del i
220 |
221 | #
222 | # cleanup
223 | #
224 |
225 | cam.EndAcquisition()
226 | reset_trigger_mode_software(cam)
227 | cam.DeInit()
228 | del cam
229 | del cam_list
230 |
231 | system.ReleaseInstance()
232 | del system
--------------------------------------------------------------------------------
/calibration/openpose/models/18284509.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 3
5 | 4
6 | d
7 |
8 | 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 0.
9 |
10 | 3
11 | 3
12 | d
13 |
14 | 1.6062015380859375e+03 0. 6.3332989501953125e+02 0.
15 | 1.6014158935546875e+03 5.1715637207031250e+02 0. 0. 1.
16 |
17 | 4
18 | 1
19 | d
20 |
21 | -3.8691338896751404e-01 1.8264715373516083e-01
22 | 5.0779170123860240e-04 7.0454436354339123e-04
23 |
24 |
--------------------------------------------------------------------------------
/calibration/openpose/models/18284511.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 3
5 | 4
6 | d
7 |
8 | 8.6182423177752765e-01 9.2365614963480780e-02
9 | -4.9872596352455129e-01 1.4962586538184854e+00
10 | -1.3722739884753807e-01 9.8906887675602162e-01
11 | -5.3957372416754107e-02 -2.1298977473255817e-01
12 | 4.8829052266720557e-01 1.1494063774400942e-01 8.6507861796971230e-01
13 | 1.8544227437140977e+00
14 |
15 | 3
16 | 3
17 | d
18 |
19 | 1.4512408447265625e+03 0. 6.2678826904296875e+02 0.
20 | 1.4474798583984375e+03 5.0415841674804688e+02 0. 0. 1.
21 |
22 | 4
23 | 1
24 | d
25 |
26 | -4.0553498268127441e-01 2.4542711675167084e-01
27 | -5.2636169129982591e-04 -2.1352977491915226e-03
28 |
29 |
--------------------------------------------------------------------------------
/calibration/openpose/models/18284512.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 3
5 | 4
6 | d
7 |
8 | 9.0208230725757932e-01 -2.9102426381812242e-02
9 | 4.3058165278090221e-01 -1.3609990403909840e+00
10 | 9.3947013505841281e-02 9.8703875578159572e-01
11 | -1.3010938950918449e-01 4.1587256522273949e-02
12 | -4.2121427989347127e-01 1.5782123863349512e-01
13 | 8.9312428421245715e-01 7.3808612629077830e-01
14 |
15 | 3
16 | 3
17 | d
18 |
19 | 1.3515637207031250e+03 0. 6.6185534667968750e+02 0.
20 | 1.3574169921875000e+03 5.3975592041015625e+02 0. 0. 1.
21 |
22 | 4
23 | 1
24 | d
25 |
26 | -4.1849634051322937e-01 2.5624269247055054e-01
27 | -6.4104846387635916e-05 -1.6572237946093082e-03
28 |
29 |
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284509/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284509/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284509/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284509/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284509/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284509/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284509/capture_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284509/capture_3.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284511/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284511/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284511/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284511/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284511/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284511/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284511/capture_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284511/capture_3.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284512/capture_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284512/capture_0.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284512/capture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284512/capture_1.jpg
--------------------------------------------------------------------------------
/calibration/openpose/sample-data/18284512/capture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/openpose/sample-data/18284512/capture_2.jpg
--------------------------------------------------------------------------------
/calibration/points_imageframe_1.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.14111383091411678, "y": 0.08696145281611364, "z": 0.6169617417124994}, "rot3x3": [[0.9874292824544761, 0.05163008059457698, -0.1493912545272847], [0.08914970910423921, -0.9623844668182016, 0.2566485289139123], [-0.13052103860323822, -0.2667404596322861, -0.9548894101816653]]}, "markers": {"16": {"x": 0.2662993192680311, "y": 0.08232370717711356, "z": 0.6035126385882983}, "15": {"x": 0.19520673544225906, "y": 0.07568906852575992, "z": 0.6104298938873698}, "14": {"x": 0.30520460125367155, "y": 0.05129286233547108, "z": 0.5939837494875753}, "13": {"x": 0.23243760968354635, "y": 0.044697872560224644, "z": 0.5967217936937634}, "12": {"x": 0.16137741702089362, "y": 0.03796729780448893, "z": 0.603435453272404}, "11": {"x": 0.2722748009891388, "y": 0.014054895116271037, "z": 0.5893444690296425}, "10": {"x": 0.19922834828948377, "y": 0.007453596681293177, "z": 0.5924071958954249}, "9": {"x": 0.30691599693756616, "y": -0.01726956884502798, "z": 0.5710660710268894}, "8": {"x": 0.2376476239588099, "y": -0.023525037644978035, "z": 0.5817444010992959}, "7": {"x": 0.16757804389224573, "y": -0.030299302993355703, "z": 0.5933007824116059}, "6": {"x": 0.2734056373183421, "y": -0.05435986689460126, "z": 0.5655382309915983}, "5": {"x": 0.2031224889747174, "y": -0.06067910054512993, "z": 0.5744378771571128}, "4": {"x": 0.313434047403385, "y": -0.08633677718176046, "z": 0.5576105103036489}, "3": {"x": 0.24046841256428972, "y": -0.09161707240076819, "z": 0.5612655099463543}, "2": {"x": 0.17057121074357848, "y": -0.09805381148598147, "z": 0.570912332758138}, "1": {"x": 0.2778673220483072, "y": -0.12292056297649975, "z": 0.5486122141412827}, "0": {"x": 0.2089748548305246, "y": -0.12969243014599927, "z": 0.5609233222336083}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_10.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.4196132082854573, "y": 0.06168611363898768, "z": 0.9631270511902206}, "rot3x3": [[0.9879218629140165, -0.0978570057580473, -0.12014324450658742], [-0.07288739950588734, -0.9776924050090915, 0.1969898174546283], [-0.136739971368175, -0.18585361877458317, -0.973016244786603]]}, "markers": {"13": {"x": -0.3486768291590764, "y": 0.004121734727533278, "z": 0.9710155786173983}, "15": {"x": -0.37853952627254595, "y": 0.04200824642456905, "z": 0.9762431928470532}, "16": {"x": -0.31038256971581135, "y": 0.03717771416680888, "z": 0.9752052815927312}, "12": {"x": -0.41962216714142675, "y": 0.00927332451471324, "z": 0.9783421778428912}, "14": {"x": -0.27841752574458234, "y": -0.0012969903572213359, "z": 0.9640248600775305}, "10": {"x": -0.38615432624788715, "y": -0.028309126341924774, "z": 0.9638025841918239}, "11": {"x": -0.31441129243052307, "y": -0.03335368241760175, "z": 0.9523412519423432}, "7": {"x": -0.4285779593415513, "y": -0.06139069003030773, "z": 0.9695217391134945}, "8": {"x": -0.35790981855146037, "y": -0.06688269416707383, "z": 0.9630440965031172}, "9": {"x": -0.2869995294006277, "y": -0.0723440248894824, "z": 0.954450511306641}, "5": {"x": -0.38936913827550196, "y": -0.09775443257753096, "z": 0.940968467116712}, "6": {"x": -0.3219944678616273, "y": -0.1038397309789976, "z": 0.9401040840614925}, "2": {"x": -0.44316076124694, "y": -0.1346814800185918, "z": 0.9734634548865767}, "3": {"x": -0.3627046230145065, "y": -0.13717299722573428, "z": 0.9441769183057064}, "4": {"x": -0.2919446500905128, "y": -0.14233175214302113, "z": 0.9340170010101346}, "0": {"x": -0.40420913770763656, "y": -0.17088436809874905, "z": 0.9481826782537687}, "1": {"x": -0.33077987734761655, "y": -0.17487835790679102, "z": 0.9318189581333495}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_11.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.2930028830744008, "y": -0.0675316570850869, "z": 1.3423208855327662}, "rot3x3": [[0.9439875900383488, -0.05361199555982961, -0.32559665813039707], [0.020203575981688912, -0.9754720295069933, 0.2191942863467468], [-0.32936187601351496, -0.21349490294056275, -0.9197503362583009]]}, "markers": {"13": {"x": 0.38358698043558226, "y": -0.11999522720299106, "z": 1.3391045402413155}, "15": {"x": 0.35074682626474607, "y": -0.0855357304502193, "z": 1.3557115203544878}, "12": {"x": 0.3190074863405315, "y": -0.12369955264065151, "z": 1.3796760248816788}, "10": {"x": 0.3556583650471197, "y": -0.16022977702906516, "z": 1.3750684804985318}, "11": {"x": 0.4196514999054749, "y": -0.1561458894616063, "z": 1.33365853666698}, "7": {"x": 0.313751338668867, "y": -0.1937129889241968, "z": 1.3570603477633547}, "8": {"x": 0.37951534723736413, "y": -0.19054756581225124, "z": 1.322933638714441}, "9": {"x": 0.4486732129464989, "y": -0.1895099298172294, "z": 1.3021049573959083}, "5": {"x": 0.3501601408624087, "y": -0.23070934854616532, "z": 1.3527490465197853}, "6": {"x": 0.4079819273276562, "y": -0.2229636201563621, "z": 1.292358410852265}, "3": {"x": 0.38104839581439987, "y": -0.2640945089842524, "z": 1.3273708592232918}, "4": {"x": 0.433390621837926, "y": -0.2535612316235509, "z": 1.2543664316080543}, "0": {"x": 0.3508624535416991, "y": -0.3055947173505704, "z": 1.3555663078490645}, "1": {"x": 0.4131069111058913, "y": -0.29869075458381483, "z": 1.30647728684416}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_2.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.015811450386386875, "y": 0.06784295441273552, "z": 0.6407734593454211}, "rot3x3": [[0.9964313221144463, 0.05356852775802137, 0.06523061507524516], [0.03475020648333953, -0.964621868136201, 0.26133709010163647], [0.07692232093886647, -0.2581376848646158, -0.9630409608080615]]}, "markers": {"10": {"x": 0.04132399708394823, "y": -0.014546617362295495, "z": 0.6235802582348612}, "6": {"x": 0.11725948068997344, "y": -0.08147829777611888, "z": 0.6229392706679948}, "16": {"x": 0.10888360504290048, "y": 0.05594340279626359, "z": 0.6564927329941548}, "13": {"x": 0.07513135720348399, "y": 0.020416690950770377, "z": 0.641330181254665}, "11": {"x": 0.11274562747489983, "y": -0.012236331166083437, "z": 0.6363988198736994}, "8": {"x": 0.07903022149986712, "y": -0.04773559216281906, "z": 0.6227099353165357}, "15": {"x": 0.03771250746112886, "y": 0.05338812923809404, "z": 0.6480595960888124}, "14": {"x": 0.1464460962033375, "y": 0.022836071613376342, "z": 0.6504893536034706}, "12": {"x": 0.004022054355393763, "y": 0.017888504265749002, "z": 0.6313592597659214}, "9": {"x": 0.15104851396459604, "y": -0.045816519395198964, "z": 0.6355415974993516}, "7": {"x": 0.00773773627233367, "y": -0.04997834150066301, "z": 0.6118274690531293}, "5": {"x": 0.045481919219977816, "y": -0.08337382468499795, "z": 0.6113127291503687}, "4": {"x": 0.15362866694633626, "y": -0.11387820523659033, "z": 0.611919636661969}, "3": {"x": 0.0835614955681151, "y": -0.11770831729489956, "z": 0.6127102706455909}, "2": {"x": 0.011630704983744904, "y": -0.11783681350439584, "z": 0.5940396469822028}, "1": {"x": 0.12116334353433786, "y": -0.15086173870804997, "z": 0.6062949125209529}, "0": {"x": 0.04986511433904769, "y": -0.1543652859085692, "z": 0.6047093741123614}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_3.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.3130799895318682, "y": 0.1181432315430535, "z": 0.6431779860030923}, "rot3x3": [[0.9906028385230711, -0.04007136018199098, 0.13076812456864104], [-0.07380783305722638, -0.9615682286110536, 0.26445972378643423], [0.1151452130547817, -0.2716262649652379, -0.9554897969583994]]}, "markers": {"16": {"x": -0.19688729211288805, "y": 0.09474234164947198, "z": 0.6686263816550877}, "8": {"x": -0.23493016476361717, "y": -0.00617944142086753, "z": 0.6299857567813995}, "6": {"x": -0.20252687355098675, "y": -0.04316035305266859, "z": 0.6281748546039018}, "15": {"x": -0.26397714539536565, "y": 0.0983964237387854, "z": 0.6501944413351538}, "12": {"x": -0.3003026591756071, "y": 0.06677041347722182, "z": 0.635881303836493}, "13": {"x": -0.22988338057162197, "y": 0.06152894909441612, "z": 0.6436435182554252}, "14": {"x": -0.16301794132410533, "y": 0.05726982334459079, "z": 0.6627046040481998}, "10": {"x": -0.265682545585231, "y": 0.030393296062404606, "z": 0.6276995129721934}, "11": {"x": -0.19888550549494238, "y": 0.025553878734582716, "z": 0.6455044628342667}, "7": {"x": -0.30423609040793437, "y": -0.0011299866749642183, "z": 0.6189634948866295}, "9": {"x": -0.1648480343585584, "y": -0.011403543231616574, "z": 0.6379103124995852}, "5": {"x": -0.26992230881386997, "y": -0.03756388466482604, "z": 0.6117294310778376}, "2": {"x": -0.30741952672631423, "y": -0.06938714147798554, "z": 0.6008960055912678}, "3": {"x": -0.23681076348520816, "y": -0.07437710925662953, "z": 0.6085109302769723}, "4": {"x": -0.16862759909534325, "y": -0.08022971248176729, "z": 0.6217234190087988}, "0": {"x": -0.27404195393028596, "y": -0.10622429627377558, "z": 0.5962595472285114}, "1": {"x": -0.2052896721511173, "y": -0.11196724007097782, "z": 0.6082923572062011}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_4.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.33893688977932945, "y": -0.05677761715484562, "z": 0.8726737234326375}, "rot3x3": [[0.9976006473665132, -0.06802667573143921, 0.01285767330598377], [-0.0691908940047055, -0.9733449083212982, 0.21866026075131556], [-0.0023597798082437135, -0.21902525158970443, -0.9757163371622529]]}, "markers": {"15": {"x": 0.3936036379158709, "y": -0.0779192622019095, "z": 0.8765651823826142}, "16": {"x": 0.46619773403713716, "y": -0.08319196473409123, "z": 0.8802566868122713}, "12": {"x": 0.3592699237415142, "y": -0.11127995485691929, "z": 0.8777873225865606}, "13": {"x": 0.4287713524741772, "y": -0.11551380084849143, "z": 0.8731233531151575}, "14": {"x": 0.502082229974235, "y": -0.12122370269564595, "z": 0.8778953673517336}, "10": {"x": 0.3923963463424466, "y": -0.14817575222201795, "z": 0.8688044748190671}, "11": {"x": 0.4662530582987811, "y": -0.15407017342553891, "z": 0.8740797317274671}, "7": {"x": 0.3538684441942518, "y": -0.18027867469693637, "z": 0.8607724125405584}, "8": {"x": 0.4283915137496778, "y": -0.18656941224033363, "z": 0.8667761105930779}, "9": {"x": 0.49933597874389885, "y": -0.19145167721734563, "z": 0.8652191137980243}, "5": {"x": 0.3925203891770982, "y": -0.2203978717628206, "z": 0.8644409201580096}, "6": {"x": 0.4637942622755144, "y": -0.22528122045841006, "z": 0.8629119286723491}, "2": {"x": 0.3538192717254543, "y": -0.2531448785106348, "z": 0.8564957660321879}, "3": {"x": 0.42636566011578036, "y": -0.25844930591563603, "z": 0.8577297576680986}, "4": {"x": 0.5097604396867387, "y": -0.26949271288169163, "z": 0.8756773265079477}, "0": {"x": 0.39369845798064546, "y": -0.2953467186213743, "z": 0.8628849282154174}, "1": {"x": 0.4603493701835912, "y": -0.296328141510274, "z": 0.8496804158686312}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_5.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": 0.44696563476560186, "y": 0.3012825446853833, "z": 1.1883367384140366}, "rot3x3": [[0.9838207917718813, -0.1332503392255755, -0.11975390087014515], [-0.1320747357901254, -0.9910810663683448, 0.01773651747534258], [-0.12104922074642224, -0.0016330898486060647, -0.9926451627717885]]}, "markers": {"8": {"x": 0.5316761096224276, "y": 0.17315348639456818, "z": 1.2112825023705807}, "6": {"x": 0.5752333668230181, "y": 0.13562810436983427, "z": 1.2350185148891446}, "15": {"x": 0.5130874800537764, "y": 0.2868110941936094, "z": 1.222680768478851}, "16": {"x": 0.5832409927513718, "y": 0.2762346010927558, "z": 1.211933946572836}, "12": {"x": 0.47693705665521385, "y": 0.25773842395421, "z": 1.2381088779961162}, "13": {"x": 0.5421375559950089, "y": 0.24498834023143487, "z": 1.2138562365186614}, "14": {"x": 0.625433417571791, "y": 0.24005757808105177, "z": 1.2293977559034404}, "10": {"x": 0.5093641912101218, "y": 0.2173392165816609, "z": 1.236567700820064}, "11": {"x": 0.5672235183029568, "y": 0.20266289904031481, "z": 1.1980215746009435}, "7": {"x": 0.4717810453228023, "y": 0.18716883852549032, "z": 1.2511661745418565}, "5": {"x": 0.5007300601203555, "y": 0.144915257368698, "z": 1.2389424323702813}, "2": {"x": 0.45270642831454, "y": 0.11134712558947457, "z": 1.2262632943709664}, "3": {"x": 0.5195702812800864, "y": 0.10144684879760886, "z": 1.2065346888149802}, "4": {"x": 0.5998697540879945, "y": 0.09331921483647208, "z": 1.2170858517367258}, "0": {"x": 0.4953386457611125, "y": 0.072538617069714, "z": 1.2511246106611618}, "1": {"x": 0.567591636245833, "y": 0.06305645837871222, "z": 1.2394218480462518}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_6.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.011874006875833861, "y": 0.24507184892159883, "z": 0.8827828977531537}, "rot3x3": [[0.9926132431609562, -0.11491044170835225, 0.03891708992048573], [-0.10614162539909659, -0.9778897598843214, -0.18018205479742483], [0.05876142322243094, 0.17472037059191386, -0.982863107070918]]}, "markers": {"11": {"x": 0.10170229495267813, "y": 0.14964531142009685, "z": 0.9239280007647052}, "15": {"x": 0.03858992892496525, "y": 0.2251148940863499, "z": 0.8982136019108506}, "16": {"x": 0.10809135480860128, "y": 0.21619299831760463, "z": 0.8964047990557067}, "12": {"x": -0.0008798506908223263, "y": 0.19416293347790295, "z": 0.904656094555189}, "13": {"x": 0.07017055928301918, "y": 0.18789120902112302, "z": 0.9133157666441902}, "14": {"x": 0.1414225707061736, "y": 0.18099390801233506, "z": 0.9215710105448952}, "10": {"x": 0.030390721068115727, "y": 0.15577178929467225, "z": 0.9121124603166001}, "7": {"x": -0.008796409835311476, "y": 0.12401277709622488, "z": 0.9111324564083816}, "8": {"x": 0.06174619232143113, "y": 0.11780689641283097, "z": 0.9237595619858275}, "9": {"x": 0.13411634043749862, "y": 0.11160714030531241, "z": 0.9405607802599038}, "5": {"x": 0.02260342409619605, "y": 0.08730938183006914, "z": 0.9328934192221495}, "6": {"x": 0.09302288125082907, "y": 0.07919429355008807, "z": 0.9328279168590738}, "2": {"x": -0.017135437578022304, "y": 0.05585506260666799, "z": 0.9336215341367627}, "3": {"x": 0.05427524610823342, "y": 0.048902021698882824, "z": 0.9456490714008463}, "4": {"x": 0.12572831686015598, "y": 0.041075912528959604, "z": 0.9519593339860816}, "0": {"x": 0.01428056490364695, "y": 0.01747169992912383, "z": 0.9415267618498805}, "1": {"x": 0.08543615094211592, "y": 0.009651599360816818, "z": 0.9498323689791867}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_7.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.6199647526739974, "y": 0.2592148200225189, "z": 1.064635485662359}, "rot3x3": [[0.9975129343407589, -0.03493021958051755, -0.06121948695427997], [-0.037577194556218566, -0.9983838857589514, -0.042632981436218825], [-0.059631369866644225, 0.04482740698405062, -0.9972134191390094]]}, "markers": {"0": {"x": -0.5892235968333865, "y": 0.030820802780411403, "z": 1.0982455699112406}, "15": {"x": -0.5750936861291531, "y": 0.24289082423603695, "z": 1.074747781850987}, "12": {"x": -0.6259445720547337, "y": 0.21321986517509695, "z": 1.1029970721458047}, "13": {"x": -0.5568040713966759, "y": 0.21178624558202075, "z": 1.1064314184786688}, "14": {"x": -0.48569188953687203, "y": 0.21017051930625588, "z": 1.106283643462181}, "10": {"x": -0.5974615256077397, "y": 0.17766300203517277, "z": 1.1151509916242803}, "11": {"x": -0.5128710050659441, "y": 0.17124849982163906, "z": 1.086508877113776}, "7": {"x": -0.6282852456183828, "y": 0.1408756754411984, "z": 1.1063342666056668}, "8": {"x": -0.5534098659206879, "y": 0.13778265199858655, "z": 1.0981131414442455}, "9": {"x": -0.48387160613169933, "y": 0.13564696862872672, "z": 1.098648911471611}, "5": {"x": -0.5889465796074843, "y": 0.1027490983375025, "z": 1.0978806642331689}, "6": {"x": -0.5165778067466105, "y": 0.09985726381756344, "z": 1.092094664319003}, "2": {"x": -0.6372669318005247, "y": 0.06929701864904833, "z": 1.1218916578760452}, "3": {"x": -0.5509740186879436, "y": 0.06514886173785318, "z": 1.0913746103257276}, "4": {"x": -0.4947923731715569, "y": 0.0643821647278635, "z": 1.1208304846850652}, "1": {"x": -0.516838084055836, "y": 0.028050430131903087, "z": 1.090772936872525}, "16": {"x": -0.5021467020465185, "y": 0.23930166754109228, "z": 1.0665851255601986}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_8.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.5415202394265792, "y": -0.10170765142267182, "z": 0.9532383437527274}, "rot3x3": [[0.8577390350828881, -0.006220730101807917, 0.5140477119996519], [-0.0885432719198345, -0.9867714894039141, 0.13580175513339723], [0.5064028403285342, -0.1619979327540367, -0.8469432289655616]]}, "markers": {"15": {"x": -0.5064949021116149, "y": -0.1260134147554437, "z": 0.9958932098841365}, "16": {"x": -0.4448751934871353, "y": -0.1325044071647506, "z": 1.0315721171174383}, "12": {"x": -0.5340305199578758, "y": -0.15701980672081042, "z": 0.9659455191155929}, "13": {"x": -0.47688582895406056, "y": -0.16462984844271716, "z": 1.0101607740045995}, "14": {"x": -0.4217415320192751, "y": -0.17383438642295918, "z": 1.062753679542915}, "11": {"x": -0.44191546690887634, "y": -0.20111214400003039, "z": 1.0126944666806712}, "10": {"x": -0.5036877440525405, "y": -0.19509909131645145, "z": 0.9784389036393752}, "9": {"x": -0.416844247936006, "y": -0.2421716760918697, "z": 1.0375917657778648}, "8": {"x": -0.48025408966582717, "y": -0.23696450625154286, "z": 1.0049885694907377}, "7": {"x": -0.5333244554154744, "y": -0.22671922200574596, "z": 0.9526098311623243}, "6": {"x": -0.44587737211619694, "y": -0.2731862505627888, "z": 1.0087924208083232}, "5": {"x": -0.5025686538788388, "y": -0.26443679919238516, "z": 0.9642466179560243}, "4": {"x": -0.4155473336382952, "y": -0.31152204183111176, "z": 1.021199748419674}, "3": {"x": -0.475092409294841, "y": -0.3041157520962082, "z": 0.9820752360331804}, "2": {"x": -0.5443562629803359, "y": -0.3026603226587773, "z": 0.9601976120577528}, "1": {"x": -0.4510674921386644, "y": -0.3474712103721061, "z": 1.0081049903781207}, "0": {"x": -0.5018467025334551, "y": -0.33345545483555156, "z": 0.950131433598232}}}
--------------------------------------------------------------------------------
/calibration/points_imageframe_9.jpg.json:
--------------------------------------------------------------------------------
1 | {"pose": {"t": {"x": -0.0583756875663739, "y": 0.06549586815531452, "z": 0.9131840367878291}, "rot3x3": [[0.9902735194430029, -0.08474675981862029, 0.1103464697677897], [-0.10536727853530639, -0.974726277708294, 0.19699345207137453], [0.09086304696647476, -0.20670430630535702, -0.9741751569665433]]}, "markers": {"15": {"x": -0.008102140901934564, "y": 0.044043622038859476, "z": 0.9377204335358903}, "16": {"x": 0.0628994787175849, "y": 0.0363837331204565, "z": 0.9438664739788201}, "12": {"x": -0.04649277379087028, "y": 0.012519052440135554, "z": 0.9154369088005645}, "13": {"x": 0.024409515833989683, "y": 0.0053492902968891205, "z": 0.9283681434770472}, "14": {"x": 0.09793965811831432, "y": -0.0024018930428377997, "z": 0.9650739224606338}, "10": {"x": -0.014150902952884915, "y": -0.025397054428196585, "z": 0.9132373999387925}, "11": {"x": 0.05685909419941556, "y": -0.033242229600810684, "z": 0.92498117373849}, "7": {"x": -0.052425437857368674, "y": -0.05649741151053073, "z": 0.9008880595102716}, "8": {"x": 0.018222568445860662, "y": -0.06411618249632932, "z": 0.9096921945984064}, "9": {"x": 0.08997862116818436, "y": -0.07260159522763175, "z": 0.9291051887309937}, "5": {"x": -0.019930338716828144, "y": -0.09468035455431914, "z": 0.894575699071871}, "6": {"x": 0.05106808908968633, "y": -0.10414238953023526, "z": 0.9196712324464158}, "2": {"x": -0.05829907437685814, "y": -0.12599967095737324, "z": 0.8888582470918315}, "3": {"x": 0.01248829772128682, "y": -0.1363238672915786, "z": 0.9135855999674832}, "4": {"x": 0.08446632830164619, "y": -0.1446474931937977, "z": 0.9254645393950787}, "0": {"x": -0.026626021475055068, "y": -0.16762599362579916, "z": 0.90367490806362}, "1": {"x": 0.04537322080024873, "y": -0.17652906482691072, "z": 0.916336334648947}}}
--------------------------------------------------------------------------------
/calibration/sample-data/calibration.json:
--------------------------------------------------------------------------------
1 | {
2 | "calibrationDate": "2018-10-08 15:32:40.781436",
3 | "cameraResolution": {
4 | "type_id": "opencv-matrix",
5 | "rows": 2,
6 | "cols": 1,
7 | "dt": "d",
8 | "data": [ 720.0, 1280.0 ]
9 | },
10 | "cameraMatrix": {
11 | "type_id": "opencv-matrix",
12 | "rows": 3,
13 | "cols": 3,
14 | "dt": "d",
15 | "data": [ 1.0036890509044972e+03, 0.0, 6.3670897816398144e+02,
16 | 0.0, 1.0036890509044972e+03, 4.1429682375533667e+02, 0.0,
17 | 0.0, 1.0 ]
18 | },
19 | "dist_coeffs": {
20 | "type_id": "opencv-matrix",
21 | "rows": 14,
22 | "cols": 1,
23 | "dt": "d",
24 | "data": [ -8.5188812484719122e-03, -4.4933351860255627e-01,
25 | 5.7140445300074899e-03, -1.2610711736297426e-03,
26 | 2.6073694779700285e-01, -1.3493827802828759e-01,
27 | 1.0285916634349823e-01, -3.4395347152192784e-01, 0.0, 0.0,
28 | 0.0, 0.0, 0.0, 0.0 ]
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/calibration/sample-data/frame_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_1.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_10.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_11.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_2.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_3.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_4.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_5.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_6.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_7.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_8.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/frame_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/frame_9.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/test_frame_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/test_frame_1.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/test_frame_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/test_frame_2.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/test_frame_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/test_frame_3.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/test_frame_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/test_frame_4.jpg
--------------------------------------------------------------------------------
/calibration/sample-data/test_frame_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/calibration/sample-data/test_frame_5.jpg
--------------------------------------------------------------------------------
/data/ChAruCo 11x8, 300dpi, 50mm spacing, 30mm markers.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/data/ChAruCo 11x8, 300dpi, 50mm spacing, 30mm markers.pdf
--------------------------------------------------------------------------------
/data/board.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/data/board.pdf
--------------------------------------------------------------------------------
/flir/README.rst:
--------------------------------------------------------------------------------
1 | Find Chessboards with Flir cameras
2 | ==================================
3 |
4 | Installation
5 | ------------
6 | Install python opencv and contrib and pyspinnaker from Flir/PTGrey.
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/flir/common.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 | import os
5 |
6 | import numpy as np
7 | import cv2
8 |
9 |
10 | import argparse
11 |
12 | # construct the argument parse and parse the arguments
13 | ap = argparse.ArgumentParser()
14 | ap.add_argument("-c", "--camera", type=str, default="0", help="camera by id")
15 |
16 | args = vars(ap.parse_args())
17 |
18 | def set_trigger_mode_software(cam):
19 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
20 | cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
21 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
22 | print("set trigger mode software")
23 |
24 |
25 | def reset_trigger_mode_software(cam):
26 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
27 | print("reset trigger mode")
28 |
29 |
30 | #
31 | # setup
32 | #
33 |
34 | system = PySpin.System.GetInstance()
35 | cam_list = system.GetCameras()
36 |
37 | if cam_list.GetSize() == 0:
38 | print("no cameras found, aborting")
39 | system.ReleaseInstance()
40 | del system
41 | sys.exit()
42 |
43 | cameras = []
44 | for i in range(cam_list.GetSize()):
45 | cam = cam_list.GetByIndex(i)
46 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
47 |
48 |
49 | camera_serial = args["camera"]
50 | if camera_serial == "0":
51 | camera_serial = cam_list.GetByIndex(0).GetUniqueID()
52 | print("no camera specified (use -c), using the first one in the list {}".format(camera_serial))
53 |
54 |
55 | cam = cam_list.GetBySerial(camera_serial)
56 |
57 | try:
58 | cam.Init()
59 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
60 | set_trigger_mode_software(cam)
61 | cam.BeginAcquisition()
62 |
63 | except:
64 | print("error initializing camera {}".format(camera_serial))
65 | sys.exit()
66 |
67 |
68 | os.mkdir(camera_serial)
69 |
70 |
71 |
72 | #
73 | # loop
74 | #
75 |
76 | count = 0
77 |
78 | while 1:
79 | key = cv2.waitKey(1)
80 |
81 |
82 |
83 | if key == 27: # ESC
84 | cv2.destroyAllWindows()
85 | break
86 |
87 | cam.TriggerSoftware()
88 | i = cam.GetNextImage()
89 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
90 | cvi = None
91 |
92 | if i.IsIncomplete():
93 | pass
94 | else:
95 | # see documentation: enum ColorProcessingAlgorithm
96 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
97 | image_data = image_converted.GetData()
98 | cvi = np.frombuffer(image_data, dtype=np.uint8)
99 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
100 | cv2.imshow("cam1",cvi)
101 | i.Release()
102 | del i
103 |
104 | if key == 32:
105 | cv2.imwrite("{}/capture_{}.jpg".format(camera_serial,count), cvi)
106 | count+= 1
107 | print("saved image {}".format(count))
108 | #
109 | # cleanup
110 | #
111 |
112 | cam.EndAcquisition()
113 | reset_trigger_mode_software(cam)
114 | cam.DeInit()
115 | del cam
116 | del cam_list
117 |
118 | system.ReleaseInstance()
119 | del system
--------------------------------------------------------------------------------
/flir/flir_capture.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 |
5 | import numpy as np
6 | import cv2
7 |
8 |
9 |
10 | def set_trigger_mode_software(cam):
11 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
12 | cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
13 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
14 | print("set trigger mode software")
15 |
16 |
17 | def reset_trigger_mode_software(cam):
18 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
19 | print("reset trigger mode")
20 |
21 |
22 | #
23 | # setup
24 | #
25 |
26 | system = PySpin.System.GetInstance()
27 | cam_list = system.GetCameras()
28 |
29 | print("number of cameras {}".format(cam_list.GetSize()))
30 |
31 | if cam_list.GetSize() == 0:
32 | print("no cameras found, aborting")
33 | system.ReleaseInstance()
34 | del system
35 | sys.exit()
36 |
37 | cameras = []
38 | for i in range(cam_list.GetSize()):
39 | cam = cam_list.GetByIndex(i)
40 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
41 | cam.Init()
42 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
43 | set_trigger_mode_software(cam)
44 | cam.BeginAcquisition()
45 | cameras.append(cam)
46 |
47 |
48 | #
49 | # loop
50 | #
51 |
52 | count = 0
53 |
54 | while 1:
55 |
56 | if cv2.waitKey(1) == 27: # ESC
57 | cv2.destroyAllWindows()
58 | break
59 |
60 | for cam in cameras:
61 | cam.TriggerSoftware()
62 | i = cam.GetNextImage()
63 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
64 |
65 | if i.IsIncomplete():
66 | pass
67 | else:
68 | # see documentation: enum ColorProcessingAlgorithm
69 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
70 | image_data = image_converted.GetData()
71 | cvi = np.frombuffer(image_data, dtype=np.uint8)
72 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
73 | print(cvi.shape)
74 | cv2.imshow("cam1",cvi)
75 | i.Release()
76 | del i
77 |
78 | #
79 | # cleanup
80 | #
81 |
82 | for cam in cameras:
83 | cam.EndAcquisition()
84 | reset_trigger_mode_software(cam)
85 | cam.DeInit()
86 | del cam
87 | del cameras
88 | del cam_list
89 |
90 | system.ReleaseInstance()
91 | del system
--------------------------------------------------------------------------------
/flir/flir_capture_all_continuous.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 | import os
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | from queue import Queue
10 | import threading
11 |
12 | import argparse
13 |
14 | # construct the argument parse and parse the arguments
15 | ap = argparse.ArgumentParser()
16 | ap.add_argument("folder", help="folder to save images")
17 | ap.add_argument("-m", "--master", type=str, default="18284509", help="master camera by id")
18 | ap.add_argument("-f", "--force", action="store_true", help="force overwrite in folder")
19 | ap.add_argument("--fps", type=int, default=20, help="set framerate")
20 |
21 | ap.add_argument("--openpose", action="store_true", help="name images for openpose (e.g. 000000000000_rendered, ..._1, ..._2, ...)")
22 |
23 | args = vars(ap.parse_args())
24 |
25 | save_for_openpose = args['openpose']
26 |
27 | fps = args["fps"]
28 |
29 | # make folder
30 | target_folder = args['folder']
31 | if os.path.isdir(target_folder):
32 | if not args['force']:
33 | print("{}: error: folder {} exists. Use --force to overwrite files.".format(os.path.basename(sys.argv[0]), target_folder))
34 | sys.exit()
35 | else:
36 | os.makedirs(target_folder)
37 |
38 |
39 |
40 | def set_fps(cam, fps):
41 | print("current acquisitionFrameRate", cam.AcquisitionFrameRate.GetValue() )
42 | cam.AcquisitionFrameRateEnable.SetValue(True)
43 | print("acquisitionFrameRate Enable", cam.AcquisitionFrameRateEnable.GetValue() )
44 | cam.AcquisitionFrameRate.SetValue(fps)
45 | print("acquisitionFrameRate set to", cam.AcquisitionFrameRate.GetValue() )
46 |
47 | def set_trigger_mode(cam, triggerSource):
48 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
49 | cam.TriggerSource.SetValue(triggerSource)
50 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
51 |
52 | def reset_trigger_mode_software(cam):
53 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
54 | print("reset trigger mode")
55 |
56 |
57 | def reset_trigger_mode_software(cam):
58 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
59 | print("reset trigger mode")
60 |
61 |
62 | #
63 | # setup
64 | #
65 | master_id = args["master"]
66 | master = None
67 |
68 |
69 | system = PySpin.System.GetInstance()
70 | cam_list = system.GetCameras()
71 |
72 | if cam_list.GetSize() == 0:
73 | print("no cameras found, aborting")
74 | system.ReleaseInstance()
75 | del system
76 | sys.exit()
77 |
78 | for i in range(cam_list.GetSize()):
79 | cam = cam_list.GetByIndex(i)
80 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
81 |
82 |
83 |
84 | cameras = []
85 | for i in range(cam_list.GetSize()):
86 | try:
87 | cam = cam_list.GetByIndex(i)
88 | cam_id = cam.GetUniqueID()
89 | cam.Init()
90 |
91 | if cam_id == master_id:
92 | print("master: {}".format(cam_id))
93 | master = cam
94 | #set_trigger_mode(cam, PySpin.TriggerSource_Software)
95 | set_fps(cam, fps)
96 | else:
97 | print("follower: {}".format(cam_id))
98 | set_trigger_mode(cam, PySpin.TriggerSource_Line3)
99 |
100 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
101 | cam.BeginAcquisition()
102 | cameras.append(cam)
103 | except PySpin.SpinnakerException as ex:
104 | print("Error: {}".format(ex))
105 | sys.exit()
106 |
107 |
108 | class ImageWorker(threading.Thread):
109 | def __init__(self):
110 | super(ImageWorker, self).__init__()
111 | self.images = Queue()
112 | self._stop_event = threading.Event()
113 |
114 | def addImage(self, img):
115 | self.images.put(img)
116 | def stop(self):
117 | self._stop_event.set()
118 | def stopped(self):
119 | return self._stop_event.is_set()
120 | def run(self):
121 | print("run")
122 | while(1):
123 | if self.images.empty():
124 | if self.stopped():
125 | break
126 | else:
127 | time.sleep(0.10)
128 | else:
129 | item = self.images.get()
130 |
131 | if item != None:
132 | filename, i = item
133 |
134 | #i.save(filename)
135 |
136 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
137 |
138 | #writing with spinaker
139 |
140 | image_converted.Save(filename)
141 |
142 | #writing with opencv
143 |
144 | #image_data = image_converted.GetData()
145 | #cvi = np.frombuffer(image_data, dtype=np.uint8)
146 | #cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
147 | #cv2.imwrite(filename, cvi)
148 |
149 |
150 |
151 |
152 | #
153 | # loop
154 | #
155 |
156 | count = 0
157 |
158 |
159 |
160 | worker = ImageWorker()
161 | worker.start()
162 |
163 |
164 | images = [None, None, None]
165 |
166 | fps_report_frequency = fps*2
167 | start_time = time.time()
168 |
169 | try:
170 | while 1:
171 | if cv2.waitKey(1) != -1:
172 | break
173 |
174 | if count % fps_report_frequency == 0:
175 | fps = fps_report_frequency / (time.time() - start_time) # / fps_report_frequency
176 | print("fps {:.3f} image count {}, in buffer {}".format(fps, count,worker.images.qsize() ))
177 | start_time = time.time()
178 |
179 |
180 |
181 | for n in range(len(cameras)):
182 | cam = cameras[n]
183 | try:
184 | # milliseconds.
185 | try:
186 | if n == 0:
187 | i = cam.GetNextImage()
188 | else:
189 | i = cam.GetNextImage(20)
190 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
191 | except PySpin.SpinnakerException as ex:
192 | print("none")
193 | continue
194 |
195 | if i.IsIncomplete():
196 | print("incomplete")
197 | pass
198 | else:
199 | if n == 0:
200 | print("new frame")
201 | count += 1
202 | images = [None, None, None]
203 |
204 | print(n, time.time() )
205 |
206 | cam_id = cam.GetUniqueID()
207 |
208 | if save_for_openpose:
209 | if n == 0:
210 | filename = "{}/{:012}_rendered.jpg".format(target_folder, count)
211 | else:
212 | filename = "{}/{:012}_rendered_{}.jpg".format(target_folder, count, n)
213 | else:
214 | filename = "{}/cam_{}__{:06}.jpg".format(target_folder, cam_id, count)
215 | #print(filename, count[n], cam_id)
216 |
217 | images[n] = (filename, i)
218 | #worker.addImage( (filename, i) )
219 |
220 |
221 | i.Release()
222 | del i
223 | except PySpin.SpinnakerException as ex:
224 | print("Error: {}".format(ex))
225 | # after a capture round of all cameras.
226 | if (images[0] is not None) and (images[1] is not None) and (images[2] is not None):
227 | print("got three images")
228 | worker.addImage( images[0])
229 | worker.addImage( images[1])
230 | worker.addImage( images[2])
231 |
232 |
233 |
234 | except KeyboardInterrupt:
235 | pass
236 | #
237 | # cleanup
238 | #
239 |
240 | worker.stop()
241 |
242 | while(1):
243 |
244 | print("{} images to be processed. waiting for thread to finish".format(worker.images.qsize()))
245 | time.sleep(0.5)
246 |
247 | done = True
248 | if worker.images.empty():
249 | break
250 |
251 |
252 | master = None
253 |
254 | for cam in cameras:
255 | cam.EndAcquisition()
256 | reset_trigger_mode_software(cam)
257 | cam.DeInit()
258 | del cam
259 |
260 | del cameras
261 | del cam_list
262 |
263 | system.ReleaseInstance()
264 | del system
265 |
--------------------------------------------------------------------------------
/flir/flir_capture_multiple.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 |
5 | import numpy as np
6 | import cv2
7 |
8 | def set_trigger_mode(cam, triggerSource):
9 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
10 | cam.TriggerSource.SetValue(triggerSource)
11 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
12 |
13 | def reset_trigger_mode_software(cam):
14 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
15 | print("reset trigger mode")
16 |
17 |
18 | #
19 | # setup
20 | #
21 |
22 | system = PySpin.System.GetInstance()
23 | cam_list = system.GetCameras()
24 |
25 | master_id = "18284509"
26 | master = None
27 |
28 | print("number of cameras {}".format(cam_list.GetSize()))
29 |
30 |
31 | if cam_list.GetSize() == 0:
32 | print("no cameras found, aborting")
33 | system.ReleaseInstance()
34 | del system
35 | sys.exit()
36 |
37 | cameras = []
38 | for i in range(cam_list.GetSize()):
39 | try:
40 | cam = cam_list.GetByIndex(i)
41 | cam_id = cam.GetUniqueID()
42 | cam.Init()
43 |
44 |
45 | if cam_id == master_id:
46 | print("master: {}".format(cam_id))
47 | master = cam
48 | set_trigger_mode(cam, PySpin.TriggerSource_Software)
49 | else:
50 | print("follower: {}".format(cam_id))
51 | set_trigger_mode(cam, PySpin.TriggerSource_Line3)
52 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
53 | cam.BeginAcquisition()
54 | cameras.append(cam)
55 | except PySpin.SpinnakerException as ex:
56 | print("Error: {}".format(ex))
57 |
58 |
59 | #
60 | # loop
61 | #
62 |
63 |
64 | count = 0
65 | size = 4
66 | frame = {}
67 |
68 | camera_dict = { "18284509": "", "18284511": "_1", "18284512": "_2"}
69 |
70 | print("start loop")
71 |
72 | while 1:
73 |
74 | key = cv2.waitKey(1)
75 |
76 | if key == 27: # ESC
77 | cv2.destroyAllWindows()
78 | break
79 | elif key == 32: # SPACE
80 | print("take picture")
81 | for key, value in frame.items():
82 | cv2.imwrite("frame_{:012}{}.jpg".format(count,camera_dict[key]), value)
83 | count = count + 1
84 |
85 | try:
86 | master.TriggerSoftware.Execute()
87 | except PySpin.SpinnakerException as ex:
88 | print("Error: {}".format(ex))
89 |
90 | for cam in cameras:
91 | try:
92 | i = cam.GetNextImage()
93 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
94 |
95 | if i.IsIncomplete():
96 | pass
97 | else:
98 | cam_id = cam.GetUniqueID()
99 | # see documentation: enum ColorProcessingAlgorithm
100 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
101 | image_data = image_converted.GetData()
102 | cvi = np.frombuffer(image_data, dtype=np.uint8)
103 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
104 | frame[cam_id] = cvi
105 |
106 | res = cv2.resize(cvi, (int(1280/size),int(1024/size)))
107 | cv2.imshow("cam{}".format(cam_id),res)
108 | i.Release()
109 | del i
110 | except PySpin.SpinnakerException as ex:
111 | print("Error: {}".format(ex))
112 |
113 | #
114 | # cleanup
115 | #
116 |
117 | del master
118 | for cam in cameras:
119 | cam.EndAcquisition()
120 | reset_trigger_mode_software(cam)
121 | cam.DeInit()
122 | del cam
123 | del cameras
124 | del cam_list
125 |
126 | system.ReleaseInstance()
127 | del system
--------------------------------------------------------------------------------
/flir/flir_capture_multiple_video.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 |
5 | import numpy as np
6 | import cv2
7 |
8 | import SaveToAvi
9 |
10 | def set_trigger_mode(cam, triggerSource):
11 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
12 | cam.TriggerSource.SetValue(triggerSource)
13 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
14 |
15 | def reset_trigger_mode_software(cam):
16 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
17 | print("reset trigger mode")
18 |
19 | def mode_status():
20 | if sw_vid:
21 | print("record video")
22 | elif not sw_vid:
23 | if sw_con:
24 | print("record continuous photo")
25 | else:
26 | print("record single photo")
27 |
28 | #
29 | # setup
30 | #
31 |
32 | system = PySpin.System.GetInstance()
33 | cam_list = system.GetCameras()
34 |
35 | master_id = "18284509"
36 | master = None
37 |
38 | print("number of cameras {}".format(cam_list.GetSize()))
39 |
40 |
41 | if cam_list.GetSize() == 0:
42 | print("no cameras found, aborting")
43 | system.ReleaseInstance()
44 | del system
45 | sys.exit()
46 |
47 | cameras = []
48 | for i in range(cam_list.GetSize()):
49 | try:
50 | cam = cam_list.GetByIndex(i)
51 | cam_id = cam.GetUniqueID()
52 | cam.Init()
53 |
54 |
55 | if cam_id == master_id:
56 | print("master: {}".format(cam_id))
57 | master = cam
58 | set_trigger_mode(cam, PySpin.TriggerSource_Software)
59 | else:
60 | print("follower: {}".format(cam_id))
61 | set_trigger_mode(cam, PySpin.TriggerSource_Line3)
62 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
63 | cam.BeginAcquisition()
64 | cameras.append(cam)
65 | except PySpin.SpinnakerException as ex:
66 | print("Error: {}".format(ex))
67 |
68 |
69 | #
70 | # loop
71 | #
72 |
73 |
74 | count = 0
75 | sw_rec = False
76 | sw_con = False
77 | sw_vid = False
78 | size = 4
79 | frame = {}
80 |
81 | from path import Path
82 | directory = Path("./")
83 |
84 |
85 | camera_dict = { "18284509": "", "18284511": "_1", "18284512": "_2"}
86 |
87 | print("start loop")
88 |
89 | while 1:
90 |
91 | key = cv2.waitKey(1)
92 |
93 | if key == 27: # ESC
94 | cv2.destroyAllWindows()
95 | break
96 | elif key == 32: # SPACE
97 | if sw_con:
98 | sw_rec = not sw_rec
99 | if sw_rec:
100 | print("start recording")
101 | elif not sw_rec:
102 | print("finish recording")
103 | elif not sw_vid:
104 | print("take picture:{}".format(count))
105 | for key, value in frame.items():
106 | cv2.imwrite("frame_{:012}{}.jpg".format(count,camera_dict[key]), value)
107 | count = count + 1
108 | elif key == 99: # C
109 | sw_con = not sw_con
110 | mode_status()
111 | elif key == 114: # R
112 | count = 0
113 | print("reset count")
114 | files = directory.walkfiles("*.jpg")
115 | files = directory.walkfiles("*.avi")
116 | for file in files:
117 | file.remove()
118 | print("reset .jpg .avi")
119 | elif key == 118: # V
120 | sw_vid = not sw_vid
121 | mode_status()
122 |
123 | try:
124 | master.TriggerSoftware.Execute()
125 | except PySpin.SpinnakerException as ex:
126 | print("Error: {}".format(ex))
127 |
128 | for cam in cameras:
129 | try:
130 | i = cam.GetNextImage()
131 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
132 |
133 | if i.IsIncomplete():
134 | pass
135 | else:
136 | cam_id = cam.GetUniqueID()
137 | # see documentation: enum ColorProcessingAlgorithm
138 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
139 | image_data = image_converted.GetData()
140 | cvi = np.frombuffer(image_data, dtype=np.uint8)
141 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
142 | frame[cam_id] = cvi
143 |
144 | if sw_vid:
145 | print("recording video")
146 | #..............................................
147 |
148 | if not sw_rec:
149 | res = cv2.resize(cvi, (int(1280/size),int(1024/size)))
150 | cv2.imshow("cam{}".format(cam_id),res)
151 |
152 | if sw_rec:
153 | if not sw_vid:
154 | print("take picture:{}".format(count))
155 | for key, value in frame.items():
156 | cv2.imwrite("frame_{:012}{}.jpg".format(count,camera_dict[key]), value)
157 | count = count + 1
158 | i.Release()
159 | del i
160 | except PySpin.SpinnakerException as ex:
161 | print("Error: {}".format(ex))
162 |
163 | #
164 | # cleanup
165 | #
166 |
167 | del master
168 | for cam in cameras:
169 | cam.EndAcquisition()
170 | reset_trigger_mode_software(cam)
171 | cam.DeInit()
172 | del cam
173 | del cameras
174 | del cam_list
175 |
176 | system.ReleaseInstance()
177 | del system
--------------------------------------------------------------------------------
/flir/flir_capture_single.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 | import os
5 |
6 | import numpy as np
7 | import cv2
8 |
9 |
10 | import argparse
11 |
12 | # construct the argument parse and parse the arguments
13 | ap = argparse.ArgumentParser()
14 | ap.add_argument("-c", "--camera", type=str, default="0", help="camera by id")
15 |
16 | args = vars(ap.parse_args())
17 |
18 | def set_trigger_mode_software(cam):
19 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
20 | cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
21 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
22 | print("set trigger mode software")
23 |
24 |
25 | def reset_trigger_mode_software(cam):
26 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
27 | print("reset trigger mode")
28 |
29 |
30 | #
31 | # setup
32 | #
33 |
34 | system = PySpin.System.GetInstance()
35 | cam_list = system.GetCameras()
36 |
37 | if cam_list.GetSize() == 0:
38 | print("no cameras found, aborting")
39 | system.ReleaseInstance()
40 | del system
41 | sys.exit()
42 |
43 | cameras = []
44 | for i in range(cam_list.GetSize()):
45 | cam = cam_list.GetByIndex(i)
46 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
47 |
48 |
49 |
50 | camera_serial = args["camera"]
51 | if camera_serial == "0":
52 | camera_serial = cam_list.GetByIndex(0).GetUniqueID()
53 | print("no camera specified (use -c), using the first one in the list {}".format(camera_serial))
54 |
55 |
56 | cam = cam_list.GetBySerial(camera_serial)
57 |
58 | try:
59 | cam.Init()
60 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
61 | set_trigger_mode_software(cam)
62 | cam.BeginAcquisition()
63 |
64 | except:
65 | print("error initializing camera {}".format(camera_serial))
66 | sys.exit()
67 |
68 |
69 | os.mkdir(camera_serial)
70 |
71 |
72 |
73 | #
74 | # loop
75 | #
76 |
77 | count = 0
78 |
79 | while 1:
80 | key = cv2.waitKey(1)
81 |
82 |
83 |
84 | if key == 27: # ESC
85 | cv2.destroyAllWindows()
86 | break
87 |
88 | #cam.TriggerSoftware()
89 | i = cam.GetNextImage()
90 | print(count)
91 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
92 | cvi = None
93 |
94 | if i.IsIncomplete():
95 | pass
96 | else:
97 | # see documentation: enum ColorProcessingAlgorithm
98 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
99 | image_data = image_converted.GetData()
100 | cvi = np.frombuffer(image_data, dtype=np.uint8)
101 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
102 | cv2.imshow("cam1",cvi)
103 | i.Release()
104 | del i
105 |
106 | if key == 32:
107 | cv2.imwrite("{}/capture_{}.jpg".format(camera_serial,count), cvi)
108 | count+= 1
109 | print("saved image {}".format(count))
110 | #
111 | # cleanup
112 | #
113 |
114 | cam.EndAcquisition()
115 | reset_trigger_mode_software(cam)
116 | cam.DeInit()
117 | del cam
118 | del cam_list
119 |
120 | system.ReleaseInstance()
121 | del system
--------------------------------------------------------------------------------
/flir/flir_capture_single_continuous.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 | import os
5 |
6 | import numpy as np
7 | import cv2
8 |
9 | from queue import Queue
10 | import threading
11 |
12 | import argparse
13 |
14 | # construct the argument parse and parse the arguments
15 | ap = argparse.ArgumentParser()
16 | ap.add_argument("folder", help="folder to save images")
17 | ap.add_argument("-c", "--camera", type=str, default="0", help="use camera by id")
18 | ap.add_argument("-f", "--force", action="store_true", help="force overwrite in folder")
19 | ap.add_argument("--fps", type=int, default=20, help="set framerate in Hertz")
20 |
21 | args = vars(ap.parse_args())
22 |
23 | fps = args["fps"]
24 |
25 |
26 |
27 | # make folder
28 | target_folder = args['folder']
29 | if os.path.isdir(target_folder):
30 | if args['force'] == False:
31 | print("{}: error: folder {} exists. Use --force to overwrite files.".format(os.path.basename(sys.argv[0]), target_folder))
32 | sys.exit()
33 | else:
34 | os.makedirs(target_folder)
35 |
36 | def set_fps(cam, fps):
37 | print("current acquisitionFrameRate", cam.AcquisitionFrameRate.GetValue() )
38 | cam.AcquisitionFrameRateEnable.SetValue(True)
39 | print("acquisitionFrameRate Enable", cam.AcquisitionFrameRateEnable.GetValue() )
40 | cam.AcquisitionFrameRate.SetValue(fps)
41 | print("acquisitionFrameRate set to", cam.AcquisitionFrameRate.GetValue() )
42 |
43 |
44 | def set_trigger_mode_software(cam):
45 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
46 | cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)
47 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
48 | print("set trigger mode software")
49 |
50 |
51 | def reset_trigger_mode_software(cam):
52 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
53 | print("reset trigger mode")
54 |
55 |
56 | #
57 | # setup
58 | #
59 |
60 | system = PySpin.System.GetInstance()
61 | cam_list = system.GetCameras()
62 |
63 | if cam_list.GetSize() == 0:
64 | print("no cameras found, aborting")
65 | system.ReleaseInstance()
66 | del system
67 | sys.exit()
68 |
69 | cameras = []
70 | for i in range(cam_list.GetSize()):
71 | cam = cam_list.GetByIndex(i)
72 | print("camera {} serial: {}".format(i, cam.GetUniqueID()))
73 |
74 |
75 | camera_serial = args["camera"]
76 | if camera_serial == "0":
77 | camera_serial = cam_list.GetByIndex(0).GetUniqueID()
78 | print("no camera specified (use -c). Using the first one in the list {}".format(camera_serial))
79 |
80 |
81 | cam = cam_list.GetBySerial(camera_serial)
82 |
83 | try:
84 | cam.Init()
85 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
86 |
87 | # we now disable the trigger mode and let the camera capture at fps
88 | # set_trigger_mode_software(cam)
89 | reset_trigger_mode_software(cam)
90 | set_fps(cam, fps)
91 |
92 | cam.BeginAcquisition()
93 |
94 | except:
95 | print("error initializing camera {}".format(camera_serial))
96 | sys.exit()
97 |
98 |
99 |
100 | print("saving images to folder {}".format(target_folder))
101 | print("stop recording using ctr-c")
102 |
103 |
104 |
105 | class ImageWorker(threading.Thread):
106 | def __init__(self):
107 | super(ImageWorker, self).__init__()
108 | self.images = Queue()
109 | self._stop_event = threading.Event()
110 |
111 | def addImage(self, img):
112 | self.images.put(img)
113 | def stop(self):
114 | self._stop_event.set()
115 | def stopped(self):
116 | return self._stop_event.is_set()
117 | def run(self):
118 | print("run")
119 | while(1):
120 | if self.images.empty():
121 | if self.stopped():
122 | break
123 | else:
124 | time.sleep(0.10)
125 | else:
126 | item = self.images.get()
127 |
128 | if item != None:
129 | filename, i = item
130 |
131 | #i.save(filename)
132 |
133 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
134 |
135 | #writing with spinaker
136 |
137 | image_converted.Save(filename)
138 |
139 | #writing with opencv
140 |
141 | #image_data = image_converted.GetData()
142 | #cvi = np.frombuffer(image_data, dtype=np.uint8)
143 | #cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
144 | #cv2.imwrite(filename, cvi)
145 |
146 |
147 | #
148 | # loop
149 | #
150 |
151 |
152 | worker = ImageWorker()
153 | worker.start()
154 |
155 | count = 0
156 | fps_report_frequency = fps*2
157 | start_time = time.time()
158 |
159 | try:
160 | while 1:
161 | if cv2.waitKey(1) != -1:
162 | break
163 |
164 | if count % fps_report_frequency == 0:
165 | fps = fps_report_frequency / (time.time() - start_time) # / fps_report_frequency
166 | print("fps {:.3f} image count {}, in buffer {}".format(fps, count,worker.images.qsize() ))
167 | start_time = time.time()
168 |
169 | #cam.TriggerSoftware()
170 | i = cam.GetNextImage()
171 | #print("new one", count)
172 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
173 | cvi = None
174 |
175 | if i.IsIncomplete():
176 | pass
177 | else:
178 | filename = "{}/cam_{:06}.jpg".format(target_folder,count)
179 | worker.addImage( (filename, i) )
180 | count += 1
181 | i.Release()
182 | del i
183 |
184 | except KeyboardInterrupt:
185 | pass
186 | #
187 | # cleanup
188 | #
189 |
190 | worker.stop()
191 |
192 | while(1):
193 | print("{} images to be processed. waiting for thread to finish".format(worker.images.qsize()))
194 | time.sleep(0.5)
195 | if worker.images.empty():
196 | break
197 |
198 |
199 | cam.EndAcquisition()
200 | reset_trigger_mode_software(cam)
201 | cam.DeInit()
202 | del cam
203 | del cam_list
204 |
205 | system.ReleaseInstance()
206 | del system
207 |
208 |
--------------------------------------------------------------------------------
/flir/flir_detect_aruco.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | import PySpin
4 |
5 | import numpy as np
6 | import cv2
7 |
8 |
9 | def detect_aruco_markers(img, dictionary):
10 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
11 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
12 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
13 | if len(corners)>0:
14 | for corner in corners:
15 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
16 | return corners, ids
17 |
18 |
19 | def set_trigger_mode(cam, triggerSource):
20 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
21 | cam.TriggerSource.SetValue(triggerSource)
22 | cam.TriggerMode.SetValue(PySpin.TriggerMode_On)
23 |
24 | def reset_trigger_mode_software(cam):
25 | cam.TriggerMode.SetValue(PySpin.TriggerMode_Off)
26 | print("reset trigger mode")
27 |
28 | #
29 | # setup
30 | #
31 |
32 | system = PySpin.System.GetInstance()
33 | cam_list = system.GetCameras()
34 |
35 | master_id = "18284509"
36 | master = None
37 |
38 | print("number of cameras {}".format(cam_list.GetSize()))
39 |
40 |
41 | if cam_list.GetSize() == 0:
42 | print("no cameras found, aborting")
43 | system.ReleaseInstance()
44 | del system
45 | sys.exit()
46 |
47 | cameras = []
48 | for i in range(cam_list.GetSize()):
49 | try:
50 | cam = cam_list.GetByIndex(i)
51 | cam_id = cam.GetUniqueID()
52 | cam.Init()
53 |
54 |
55 | if cam_id == master_id:
56 | print("master: {}".format(cam_id))
57 | master = cam
58 | set_trigger_mode(cam, PySpin.TriggerSource_Software)
59 | else:
60 | print("follower: {}".format(cam_id))
61 | set_trigger_mode(cam, PySpin.TriggerSource_Line3)
62 | cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
63 | cam.BeginAcquisition()
64 | cameras.append(cam)
65 | except PySpin.SpinnakerException as ex:
66 | print("Error: {}".format(ex))
67 |
68 |
69 | #
70 | # loop
71 | #
72 |
73 |
74 | count = 0
75 | size = 4
76 | frame = {}
77 |
78 | camera_dict = { "18284509": "", "18284511": "_1", "18284512": "_2"}
79 |
80 |
81 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
82 |
83 | print("start loop")
84 |
85 | while 1:
86 |
87 | key = cv2.waitKey(1)
88 |
89 | if key == 27: # ESC
90 | cv2.destroyAllWindows()
91 | break
92 |
93 | try:
94 | master.TriggerSoftware.Execute()
95 | except PySpin.SpinnakerException as ex:
96 | print("Error: {}".format(ex))
97 |
98 | for cam in cameras:
99 | try:
100 | i = cam.GetNextImage()
101 | #print(i.GetWidth(), i.GetHeight(), i.GetBitsPerPixel())
102 |
103 | if i.IsIncomplete():
104 | pass
105 | else:
106 | cam_id = cam.GetUniqueID()
107 | # see documentation: enum ColorProcessingAlgorithm
108 | image_converted = i.Convert(PySpin.PixelFormat_BGR8, PySpin.DIRECTIONAL_FILTER)
109 | image_data = image_converted.GetData()
110 | cvi = np.frombuffer(image_data, dtype=np.uint8)
111 | cvi = cvi.reshape((i.GetHeight(),i.GetWidth(),3))
112 |
113 | corners, ids = detect_aruco_markers(frame, dictionary)
114 |
115 | frame[cam_id] = cvi
116 |
117 | res = cv2.resize(cvi, (int(1280/size),int(1024/size)))
118 | cv2.imshow("cam{}".format(cam_id),res)
119 | i.Release()
120 | del i
121 | except PySpin.SpinnakerException as ex:
122 | print("Error: {}".format(ex))
123 |
124 | #
125 | # cleanup
126 | #
127 |
128 | del master
129 | for cam in cameras:
130 | cam.EndAcquisition()
131 | reset_trigger_mode_software(cam)
132 | cam.DeInit()
133 | del cam
134 | del cameras
135 | del cam_list
136 |
137 | system.ReleaseInstance()
138 | del system
--------------------------------------------------------------------------------
/flir/fps_counter.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 |
4 |
5 | fps_report_frequency = 10
6 | fps = 30
7 |
8 | count = 0
9 |
10 | start = last = time.time()
11 |
12 | last_fps = 0
13 |
14 | while(1):
15 | count += 1
16 |
17 | while time.time() < (start + 1.0/fps):
18 | time.sleep(0.0001)
19 |
20 | start = time.time()
21 |
22 | last_fps += 1.0 / (start-last)
23 | last = time.time()
24 |
25 | if count % fps_report_frequency == 0:
26 | print("fps {0:.3f}".format( last_fps / fps_report_frequency))
27 | last_fps = 0
28 |
--------------------------------------------------------------------------------
/img/kinect_detection.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/img/kinect_detection.jpg
--------------------------------------------------------------------------------
/kinect-v2/README.rst:
--------------------------------------------------------------------------------
1 | Find Chessboards with Kinect for Windows
2 | ========================================
3 |
4 | The basic idea is to find points in the color image and use the coordinate mapper to retrieve the 3D position.
5 | These examples use pyKinect2 to interface with the Kinect API, and only work on Windows with the Kinect API installed.
6 |
7 | Installation
8 | ------------
9 |
10 | Install the kinect v2 SDK https://www.microsoft.com/en-us/download/details.aspx?id=44561
11 | Install python, opencv and contrib (see top level readme) and install pyKinect2.
12 |
13 | note: (october 5 2018: if you install with pip, you need to overwrite the files with the ones from github....)
14 |
15 | Examples
16 | --------
17 | All scripts wait until the sensor is connected. Once the video feed is shown, stop a script with the escape key.
18 |
19 | - **color.py** shows the color frame.
20 | - **depth.py** shows the depth frame.
21 | - **infrared.py** shows the infrared frame.
22 | - **detect_charuco.py** detects an charuco board in the color frame. Use board.pdf in the top level data folder.
23 | - **color_3d** uses the coordinate mapper to get the 3D position for each pixel in the color frame.
24 | The z value (away from the camera) is drawn scaled down. The depth value of the center point is indicated
25 |
26 |
27 | - **capture_board.py** detects the charuco board and retrieves the 3D coordinates. The space key saves the image,
28 | and coordinates for the board and markers in json.
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/kinect-v2/body.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import math
7 | import numpy as np
8 | import cv2
9 | import sys
10 |
11 | #
12 | # initial version ported from https://github.com/Kinect/PyKinect2/blob/master/examples/PyKinectBodyGame.py
13 | # removed pygame dependency, draw with opencv
14 | #
15 | # added simple recording of 3D coordinates
16 | #
17 |
18 | color_image_size = (1080,1920)
19 | color_image_shape = (1080,1920,4)
20 |
21 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
22 |
23 | def draw_skeleton(color_flipped, skeleton_id, joints):
24 | for j in joints:
25 | if math.isinf(j.x) or math.isinf(j.y):
26 | continue
27 | cv2.circle(color_flipped, ( 1920-int(j.x), int(j.y)), 5, (0,0,255), -1)
28 |
29 | joints_in_color_space = []
30 |
31 | while(1):
32 |
33 | if kinect.has_new_body_frame():
34 | bodies = kinect.get_last_body_frame()
35 | if bodies is not None:
36 | for i in range(0, kinect.max_body_count):
37 | body = bodies.bodies[i]
38 | if not body.is_tracked:
39 | continue
40 | joints = body.joints
41 |
42 | # convert joint coordinates to color space
43 | joints_in_color_space = kinect.body_joints_to_color_space(joints)
44 |
45 |
46 | if joints[0].TrackingState != PyKinectV2.TrackingState_NotTracked and joints[0].TrackingState != PyKinectV2.TrackingState_Inferred:
47 | print( joints[0].Position.x, joints[0].Position.y, joints[0].Position.z )
48 |
49 |
50 |
51 |
52 |
53 | if kinect.has_new_color_frame():
54 | color_frame = kinect.get_last_color_frame()
55 | color_frame = color_frame.reshape(color_image_shape)
56 |
57 | color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2BGR)
58 | color_flipped = cv2.flip(color_frame,1)
59 |
60 | draw_skeleton(color_flipped, i, joints_in_color_space)
61 |
62 |
63 | cv2.imshow('color',color_flipped)
64 |
65 |
66 | if (cv2.waitKey(1) == 27):
67 | cv2.destroyAllWindows()
68 | kinect.close()
69 | break
--------------------------------------------------------------------------------
/kinect-v2/cap/frame_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/cap/frame_0.jpg
--------------------------------------------------------------------------------
/kinect-v2/cap/frame_0.json:
--------------------------------------------------------------------------------
1 | {"points": [{"id": "0", "x": 0.16351009905338287, "y": 0.08542162925004959, "z": 2.576000213623047}, {"id": "1", "x": 0.023896237835288048, "y": 0.08541519939899445, "z": 2.575000047683716}, {"id": "2", "x": -0.1151033565402031, "y": 0.08497083932161331, "z": 2.562000036239624}, {"id": "3", "x": -0.25372666120529175, "y": 0.08488654345273972, "z": 2.561000108718872}, {"id": "4", "x": 0.1686871498823166, "y": 0.2157464176416397, "z": 2.5500001907348633}, {"id": "5", "x": 0.023661363869905472, "y": 0.22280505299568176, "z": 2.5510001182556152}, {"id": "6", "x": -0.11437154561281204, "y": 0.22242294251918793, "z": 2.5470001697540283}, {"id": "7", "x": -0.25221914052963257, "y": 0.2222953587770462, "z": 2.5470001697540283}, {"id": "8", "x": 0.16740942001342773, "y": 0.35805556178092957, "z": 2.5330002307891846}, {"id": "9", "x": 0.023481054231524467, "y": 0.36514201760292053, "z": 2.5340001583099365}, {"id": "10", "x": -0.11327607184648514, "y": 0.3637966513633728, "z": 2.5250000953674316}, {"id": "11", "x": -0.24981747567653656, "y": 0.36360856890678406, "z": 2.5250000953674316}, {"id": "12", "x": 0.17253842949867249, "y": 0.4970284402370453, "z": 2.511000156402588}, {"id": "13", "x": 0.03000449761748314, "y": 0.49676570296287537, "z": 2.509000062942505}, {"id": "14", "x": -0.10572926700115204, "y": 0.4971136450767517, "z": 2.511000156402588}, {"id": "15", "x": -0.25422540307044983, "y": 0.49548789858818054, "z": 2.504000186920166}, {"id": "16", "x": 0.17072491347789764, "y": 0.6395395398139954, "z": 2.4880001544952393}, {"id": "17", "x": 0.029710905626416206, "y": 0.6396704912185669, "z": 2.4880001544952393}, {"id": "18", "x": -0.10457165539264679, "y": 0.639366090297699, "z": 2.4870002269744873}, {"id": "19", "x": -0.25136327743530273, "y": 0.6370757818222046, "z": 2.4790000915527344}, {"id": "20", "x": 0.17566585540771484, "y": 0.7727544903755188, "z": 2.4660000801086426}, {"id": "21", "x": 0.029435863718390465, "y": 0.7734922170639038, "z": 2.4680001735687256}, {"id": "22", "x": -0.10347992181777954, "y": 0.7722011208534241, "z": 2.4640002250671387}, {"id": "23", "x": -0.2483595609664917, "y": 0.7682611346244812, "z": 2.452000141143799}]}
--------------------------------------------------------------------------------
/kinect-v2/cap/frame_0_annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/cap/frame_0_annotated.jpg
--------------------------------------------------------------------------------
/kinect-v2/capture_board.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 | import math
9 | import json
10 |
11 | import sys
12 | import os
13 |
14 | import argparse
15 |
16 | # construct the argument parse and parse the arguments
17 | ap = argparse.ArgumentParser()
18 | ap.add_argument("folder", help="folder to save images")
19 | ap.add_argument("-f", "--force", action="store_true", help="force overwrite in folder")
20 |
21 | args = vars(ap.parse_args())
22 |
23 | # make folder
24 | target_folder = args['folder']
25 | if os.path.isdir(target_folder):
26 | if args['force'] == False:
27 | print("{}: error: folder {} exists. Use --force to overwrite files.".format(os.path.basename(sys.argv[0]), target_folder))
28 | sys.exit()
29 | else:
30 | os.makedirs(target_folder)
31 |
32 |
33 |
34 |
35 | charuco_square_length = 140.0 / 1000 # chessboard square side length (normally in meters)
36 | charuco_marker_length = 88.0 / 1000 # marker side length (same unit than squareLength)
37 | squaresX = 5
38 | squaresY = 7
39 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
40 | board = cv2.aruco.CharucoBoard_create(squaresX,squaresY,charuco_square_length,charuco_marker_length,dictionary)
41 |
42 | depth_image_size = (424, 512)
43 | color_image_size = (1080, 1920)
44 | color_image_shape = (color_image_size[0], color_image_size[1], 4)
45 | color_size = color_image_size[0] * color_image_size[1]
46 |
47 |
48 | # TODO: make a helper file (this is the same code as detect_charuco)
49 | def find_charuco_board(img, board, dictionary):
50 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
51 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
52 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
53 |
54 |
55 | if len(corners)>0:
56 | for corner in corners:
57 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
58 | ret, detectedCorners, detectedIds = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
59 |
60 | if detectedIds is None:
61 | return [], []
62 |
63 | if len(detectedCorners) != len(detectedIds):
64 | print("should not happen")
65 | return [],[]
66 | if detectedCorners is not None and detectedIds is not None and len(detectedCorners)>3:
67 | return detectedCorners, detectedIds
68 | return [], []
69 |
70 | # TODO: make a helper file (this is the same code as color_3d.py)
71 | class Color2CameraMap():
72 | def __init__(self, depth_frame):
73 | self._depth_frame = depth_frame
74 | depth_frame_ptr = np.ctypeslib.as_ctypes(depth_frame.flatten())
75 | # create camera space points for each pixel in the color frame
76 | TYPE_CameraSpacePointArray = PyKinectV2._CameraSpacePoint * color_size
77 | self._camera_points = TYPE_CameraSpacePointArray()
78 | error_state = kinect._mapper.MapColorFrameToCameraSpace(
79 | depth_frame.size, depth_frame_ptr, color_size, self._camera_points)
80 | if error_state != 0:
81 | raise OSError("error {} in mapping: MapcolorFrameToCamera.".format(
82 | error_state))
83 |
84 | def make_color_image(self, divide_by=6):
85 | image = np.zeros((int(color_image_size[0] / divide_by),
86 | int(color_image_size[1] / divide_by)), np.uint8)
87 | for iy in range(0, color_image_size[0], divide_by):
88 | for ix in range(0, color_image_size[1], divide_by):
89 | z = float(self._camera_points[iy * 1920 + ix].z)
90 | if math.isinf(z) or math.isnan(z):
91 | z = 0
92 | image[int(iy / divide_by), int(ix / divide_by)] = int(
93 | (z / 9.0) * 255) # map 9 meters
94 | # apply a color map
95 | colored = cv2.applyColorMap(image, cv2.COLORMAP_HOT)
96 | return colored
97 |
98 | def get_camera_point(self, color_point, isFlipped = False):
99 | if isFlipped:
100 | color_point[0] = color_image_size[1] - color_point[0]
101 | # find the coordinate in the 3D map
102 | index = int(color_point[1] * color_image_size[1] + color_point[0])
103 | camera_point = self._camera_points[index]
104 | return [
105 | float(camera_point.x),
106 | float(camera_point.y),
107 | float(camera_point.z)
108 | ]
109 |
110 |
111 |
112 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth)
113 |
114 | font = cv2.FONT_HERSHEY_SIMPLEX
115 |
116 | p3d = None
117 | p2d = None
118 | p2d = [480, 240]
119 |
120 | corners3d = []
121 | corners = []
122 | ids = []
123 |
124 | count = 0
125 |
126 | while(1):
127 | if kinect.has_new_depth_frame():
128 | depth_frame = kinect.get_last_depth_frame()
129 |
130 | if p2d != None:
131 | try:
132 | map = Color2CameraMap(depth_frame)
133 | p3d = map.get_camera_point( p2d, True )
134 | corners3d = []
135 | for c in corners:
136 | x = round(c[0][0])
137 | y = round(c[0][1])
138 | c3d = map.get_camera_point( [x,y], True )
139 | corners3d.append(c3d)
140 | except OSError as err:
141 | print(err)
142 | p3d = None
143 | else:
144 | p3d = None
145 |
146 | if kinect.has_new_color_frame():
147 | color_frame = kinect.get_last_color_frame()
148 | color_frame = color_frame.reshape(color_image_shape)
149 |
150 | color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2BGR)
151 | color_flipped = cv2.flip(color_frame,1)
152 |
153 | color_org = color_flipped.copy()
154 |
155 |
156 | corners, ids = find_charuco_board(color_flipped, board, dictionary)
157 | if len(corners) > 0:
158 | cv2.aruco.drawDetectedCornersCharuco(color_flipped, corners, ids)
159 |
160 | # draw lines to depict the first marker (id_0 if not obstructed.....)
161 | x,y = corners[0][0][0], corners[0][0][1]
162 | p2d = [round(x),round(y)]
163 | cv2.line(color_flipped, (0,y),(1920,y),(0,0,255),2)
164 | cv2.line(color_flipped, (x,0),(x,1080),(255,0,0),2)
165 |
166 | cv2.circle(color_flipped, (p2d[0], p2d[1]), 100, (255,255,255))
167 |
168 | if p2d != None and p3d != None:
169 | # draw the coordinates
170 | cv2.putText(color_flipped,"{0:.3f}".format(p3d[0]), (100,200), font, 4,(0,0,255), 6, cv2.LINE_AA)
171 | cv2.putText(color_flipped,"{0:.3f}".format(p3d[1]), (100,400), font, 4,(0,255,0), 6, cv2.LINE_AA)
172 | cv2.putText(color_flipped,"{0:.3f}".format(p3d[2]), (100,600), font, 4,(255,0,0), 6, cv2.LINE_AA)
173 |
174 |
175 | cv2.imshow('color',color_flipped)
176 |
177 | key = cv2.waitKey(1)
178 |
179 | if key == 27:
180 | cv2.destroyAllWindows()
181 | kinect.close()
182 | break
183 | elif key == 32:
184 | print("take picture")
185 | print(len(ids), len(corners3d))
186 | cv2.imwrite("{}/frame_{}_annotated.jpg".format(target_folder, count), color_flipped)
187 |
188 | cv2.imwrite("{}/frame_{}.jpg".format(target_folder, count), color_org)
189 | #write the coords
190 | points = []
191 | for i in range(len(ids)):
192 | p = {}
193 | p['id'] = str(ids[i][0])
194 | p['x'] = corners3d[i][0]
195 | p['y'] = corners3d[i][1]
196 | p['z'] = corners3d[i][2]
197 | points.append(p)
198 | json_data = { "charucoCorners": points }
199 | with open("{}/frame_{}.json".format(target_folder, count), 'w') as f:
200 | json.dump(json_data, f)
201 | count += 1
202 |
203 |
204 |
205 |
206 |
207 |
208 |
--------------------------------------------------------------------------------
/kinect-v2/color.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 | import sys
9 |
10 | color_image_size = (1080,1920)
11 | color_image_shape = (1080,1920,4)
12 |
13 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
14 |
15 | while(1):
16 | if kinect.has_new_color_frame():
17 | color_frame = kinect.get_last_color_frame()
18 | color_frame = color_frame.reshape(color_image_shape)
19 |
20 | color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2BGR)
21 | color_flipped = cv2.flip(color_frame,1)
22 |
23 |
24 | cv2.imshow('color',color_flipped)
25 |
26 | if (cv2.waitKey(1) == 27):
27 | cv2.destroyAllWindows()
28 | kinect.close()
29 | break
--------------------------------------------------------------------------------
/kinect-v2/color_3d.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 | import math
9 |
10 | # opencv images are matrices and row-major-order (row, column)
11 | # opencv pts and coordinates are (x,y)
12 | #
13 | # x left to right
14 | # y top to bottom
15 | #
16 | depth_image_size = (424, 512)
17 | color_image_size = (1080, 1920)
18 | color_image_shape = (color_image_size[0], color_image_size[1], 4)
19 | color_size = color_image_size[0] * color_image_size[1]
20 |
21 |
22 | class Color2CameraMap():
23 | def __init__(self, depth_frame):
24 | self._depth_frame = depth_frame
25 | depth_frame_ptr = np.ctypeslib.as_ctypes(depth_frame.flatten())
26 | # create camera space points for each pixel in the color frame
27 | TYPE_CameraSpacePointArray = PyKinectV2._CameraSpacePoint * color_size
28 | self._camera_points = TYPE_CameraSpacePointArray()
29 | error_state = kinect._mapper.MapColorFrameToCameraSpace(
30 | depth_frame.size, depth_frame_ptr, color_size, self._camera_points)
31 | if error_state != 0:
32 | raise OSError("error {} in mapping: MapcolorFrameToCamera.".format(
33 | error_state))
34 |
35 | def make_color_image(self, divide_by=6):
36 | image = np.zeros((int(color_image_size[0] / divide_by),
37 | int(color_image_size[1] / divide_by)), np.uint8)
38 | for iy in range(0, color_image_size[0], divide_by):
39 | for ix in range(0, color_image_size[1], divide_by):
40 | z = float(self._camera_points[iy * 1920 + ix].z)
41 | if math.isinf(z) or math.isnan(z):
42 | z = 0
43 | image[int(iy / divide_by), int(ix / divide_by)] = int(
44 | (z / 9.0) * 255) # map 9 meters
45 | # apply a color map
46 | colored = cv2.applyColorMap(image, cv2.COLORMAP_HOT)
47 | return colored
48 |
49 | def get_camera_point(self, color_point):
50 | # find the coordinate in the image
51 | index = color_point[1] * color_image_size[1] + color_point[0]
52 | camera_point = self._camera_points[index]
53 | return [
54 | float(camera_point.x),
55 | float(camera_point.y),
56 | float(camera_point.z)
57 | ]
58 |
59 | def get_camera_points(self, color_points):
60 | r = []
61 | for p in color_points:
62 | r.append(self.get_camera_point(p))
63 | return r
64 |
65 |
66 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color
67 | | PyKinectV2.FrameSourceTypes_Depth)
68 |
69 | font = cv2.FONT_HERSHEY_SIMPLEX
70 |
71 | while (1):
72 | if kinect.has_new_depth_frame():
73 | depth_frame = kinect.get_last_depth_frame()
74 |
75 | try:
76 | map = Color2CameraMap(depth_frame)
77 | divide_by = 6
78 | img = map.make_color_image(divide_by)
79 |
80 | # points are in (x,y)
81 | sample_point = [ int(1920/2), int(1080/2)]
82 | sample_point = [480, 240]
83 | p3d = map.get_camera_point( sample_point )
84 | print(p3d)
85 | # draw a reference circle at pt
86 | scaled_point = ( int( sample_point[0]/divide_by), int(sample_point[1]/divide_by) )
87 | cv2.circle(img, scaled_point, int(30/divide_by), (255,255,255))
88 | # print the distance.
89 | cv2.putText(img," {0:.3f}".format(p3d[2]), scaled_point, font, 8/divide_by,(255,255,255),2,cv2.LINE_AA)
90 | cv2.imshow('color', img)
91 |
92 | except OSError as err:
93 | print(err)
94 |
95 | if (cv2.waitKey(1) == 27):
96 | cv2.destroyAllWindows()
97 | kinect.close()
98 | break
99 |
--------------------------------------------------------------------------------
/kinect-v2/depth.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 |
9 | depth_image_size = (424,512)
10 |
11 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)
12 |
13 | while(1):
14 | if kinect.has_new_depth_frame():
15 | depth_frame = kinect.get_last_depth_frame()
16 | depth_frame = depth_frame.reshape(depth_image_size)
17 |
18 | # map the depth frame to uint8
19 | depth_frame = depth_frame * (256.0/np.amax(depth_frame))
20 |
21 | colorized_frame = cv2.applyColorMap(np.uint8(depth_frame), cv2.COLORMAP_JET)
22 | cv2.imshow('depth',colorized_frame)
23 |
24 | if (cv2.waitKey(1) == 27):
25 | cv2.destroyAllWindows()
26 | kinect.close()
27 | break
--------------------------------------------------------------------------------
/kinect-v2/detect_aruco.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 |
9 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
10 |
11 | color_image_size = (1080,1920)
12 | color_image_shape = (1080,1920,4)
13 |
14 | def detect_aruco_markers(img, dictionary):
15 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
16 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
17 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
18 | if len(corners)>0:
19 | for corner in corners:
20 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
21 | ret, detectedCorners, detectedIds = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
22 | if detectedCorners is not None and detectedIds is not None and len(detectedCorners)>3:
23 | return detectedCorners, detectedIds
24 | return [], []
25 |
26 |
27 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
28 |
29 | color_frame = None
30 | counter = 0
31 |
32 | while(1):
33 | if kinect.has_new_color_frame():
34 | color_frame = kinect.get_last_color_frame()
35 | color_frame = color_frame.reshape(color_image_shape)
36 |
37 | color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2BGR)
38 | color_flipped = cv2.flip(color_frame,1)
39 |
40 | corners, ids = find_aruco_markers(color_flipped, board, dictionary)
41 |
42 | if len(corners) > 0:
43 | cv2.aruco.drawDetectedMarkers(color_flipped, corners, ids)
44 | cv2.imshow('color',color_flipped)
45 |
46 | key = cv2.waitKey(1)
47 | if key == 27:
48 | cv2.destroyAllWindows()
49 | kinect.close()
50 | break
51 | elif key == 32:
52 | if color_frame != None:
53 | cv2.imwrtie("frame_{}.jpg".format(counter), cv2.flip(color_frame,1))
54 | counter += 1
--------------------------------------------------------------------------------
/kinect-v2/detect_charuco.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 |
9 |
10 | squareLength = 35.0 / 1000 # chessboard square side length (normally in meters)
11 | markerLength = 22.0 / 1000 # marker side length (same unit than squareLength)
12 | squaresX = 5
13 | squaresY = 7
14 |
15 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
16 | board = cv2.aruco.CharucoBoard_create(squaresX,squaresY,squareLength,markerLength,dictionary)
17 |
18 | color_image_size = (1080,1920)
19 | color_image_shape = (1080,1920,4)
20 |
21 | def find_charuco_board(img, board, dictionary):
22 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
23 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
24 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
25 | if len(corners)>0:
26 | for corner in corners:
27 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
28 | ret, detectedCorners, detectedIds = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
29 | if detectedCorners is not None and detectedIds is not None and len(detectedCorners)>3:
30 | return detectedCorners, detectedIds
31 | return [], []
32 |
33 |
34 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
35 |
36 | while(1):
37 | if kinect.has_new_color_frame():
38 | color_frame = kinect.get_last_color_frame()
39 | color_frame = color_frame.reshape(color_image_shape)
40 |
41 | color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2BGR)
42 | color_flipped = cv2.flip(color_frame,1)
43 |
44 | corners, ids = find_charuco_board(color_flipped, board, dictionary)
45 | if len(corners) > 0:
46 | cv2.aruco.drawDetectedCornersCharuco(color_flipped, corners, ids)
47 | cv2.imshow('color',color_flipped)
48 |
49 | if (cv2.waitKey(1) == 27):
50 | cv2.destroyAllWindows()
51 | kinect.close()
52 | break
--------------------------------------------------------------------------------
/kinect-v2/frame_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/frame_0.jpg
--------------------------------------------------------------------------------
/kinect-v2/frame_0.json:
--------------------------------------------------------------------------------
1 | {"0": {"x": 0.25240445137023926, "y": -0.014789115637540817, "z": 0.8110000491142273}, "1": {"x": 0.20163775980472565, "y": -0.01039130613207817, "z": 0.8080000281333923}, "2": {"x": 0.1532900035381317, "y": -0.006014191545546055, "z": 0.8050000667572021}, "3": {"x": 0.10323451459407806, "y": -0.003837813623249531, "z": 0.8040000200271606}, "4": {"x": 0.05518585070967674, "y": 0.0005144462338648736, "z": 0.8010000586509705}, "5": {"x": 0.005242818966507912, "y": 0.007002705708146095, "z": 0.7980000376701355}, "6": {"x": -0.042128413915634155, "y": 0.011269960552453995, "z": 0.7940000295639038}, "7": {"x": -0.09334127604961395, "y": 0.015501718968153, "z": 0.7910000085830688}, "8": {"x": -0.143824502825737, "y": 0.01965929940342903, "z": 0.7870000600814819}, "9": {"x": -0.1917545348405838, "y": 0.023789234459400177, "z": 0.784000039100647}, "10": {"x": 0.2558174133300781, "y": 0.033770330250263214, "z": 0.8220000267028809}, "11": {"x": 0.20630939304828644, "y": 0.03806488960981369, "z": 0.8180000185966492}, "12": {"x": 0.15574045479297638, "y": 0.04254232347011566, "z": 0.8180000185966492}, "13": {"x": 0.10734768211841583, "y": 0.04707927256822586, "z": 0.8190000653266907}, "14": {"x": 0.058339428156614304, "y": 0.05130409076809883, "z": 0.815000057220459}, "15": {"x": 0.00749618373811245, "y": 0.05525970458984375, "z": 0.8080000281333923}, "16": {"x": -0.04051455482840538, "y": 0.0594027042388916, "z": 0.8050000667572021}, "17": {"x": -0.09004055708646774, "y": 0.06331738084554672, "z": 0.800000011920929}, "18": {"x": -0.13932953774929047, "y": 0.06739659607410431, "z": 0.7980000376701355}, "19": {"x": -0.18821200728416443, "y": 0.07143014669418335, "z": 0.796000063419342}, "20": {"x": 0.2601165473461151, "y": 0.08166312426328659, "z": 0.8360000252723694}, "21": {"x": 0.2097618728876114, "y": 0.08585275709629059, "z": 0.8320000171661377}, "22": {"x": 0.15737129747867584, "y": 0.08991529047489166, "z": 0.8270000219345093}, "23": {"x": 0.11069086194038391, "y": 0.09459570795297623, "z": 0.8280000686645508}, "24": {"x": 0.05901036038994789, "y": 0.0987924262881279, "z": 0.8250000476837158}, "25": {"x": 0.012056969106197357, "y": 0.10056639462709427, "z": 0.8210000395774841}, "26": {"x": -0.03882302716374397, "y": 0.10875479131937027, "z": 0.8160000443458557}, "27": {"x": -0.08681873232126236, "y": 0.11239828914403915, "z": 0.8110000491142273}, "28": {"x": -0.1372903287410736, "y": 0.11679982393980026, "z": 0.812000036239624}, "29": {"x": -0.18464753031730652, "y": 0.12059348076581955, "z": 0.8090000152587891}, "30": {"x": 0.26210370659828186, "y": 0.13110968470573425, "z": 0.8500000238418579}, "31": {"x": 0.21039935946464539, "y": 0.13485129177570343, "z": 0.8440000414848328}, "32": {"x": 0.16178502142429352, "y": 0.13871106505393982, "z": 0.8390000462532043}, "33": {"x": 0.11205259710550308, "y": 0.14337047934532166, "z": 0.8390000462532043}, "34": {"x": 0.0640951469540596, "y": 0.14710982143878937, "z": 0.8340000510215759}, "35": {"x": 0.014453559182584286, "y": 0.15129059553146362, "z": 0.8320000171661377}, "36": {"x": -0.03255564346909523, "y": 0.15463615953922272, "z": 0.8260000348091125}, "37": {"x": -0.08387162536382675, "y": 0.1590026170015335, "z": 0.8260000348091125}, "38": {"x": -0.1317301243543625, "y": 0.16193494200706482, "z": 0.8190000653266907}, "39": {"x": -0.18194295465946198, "y": 0.16797494888305664, "z": 0.8170000314712524}, "40": {"x": 0.26541227102279663, "y": 0.17915593087673187, "z": 0.8610000610351562}, "41": {"x": 0.21581131219863892, "y": 0.183040052652359, "z": 0.8570000529289246}, "42": {"x": 0.16357174515724182, "y": 0.186046302318573, "z": 0.8490000367164612}, "43": {"x": 0.11770334094762802, "y": 0.19052131474018097, "z": 0.8480000495910645}, "44": {"x": 0.06698333472013474, "y": 0.19404421746730804, "z": 0.843000054359436}, "45": {"x": 0.019133958965539932, "y": 0.19815881550312042, "z": 0.8410000205039978}, "46": {"x": -0.03079512156546116, "y": 0.20468144118785858, "z": 0.8400000333786011}, "47": {"x": -0.08036968111991882, "y": 0.20836947858333588, "z": 0.8370000123977661}, "48": {"x": -0.12920288741588593, "y": 0.21146981418132782, "z": 0.8320000171661377}, "49": {"x": -0.17981243133544922, "y": 0.2147599756717682, "z": 0.8280000686645508}, "50": {"x": 0.2681463658809662, "y": 0.22552712261676788, "z": 0.8700000643730164}, "51": {"x": 0.21771769225597382, "y": 0.23126569390296936, "z": 0.8650000691413879}, "52": {"x": 0.17137286067008972, "y": 0.23629049956798553, "z": 0.8660000562667847}, "53": {"x": 0.11896995455026627, "y": 0.23884217441082, "z": 0.8580000400543213}, "54": {"x": 0.07255154103040695, "y": 0.24297446012496948, "z": 0.8560000658035278}, "55": {"x": 0.021658282727003098, "y": 0.24646586179733276, "z": 0.8520000576972961}, "56": {"x": -0.028833286836743355, "y": 0.2504400908946991, "z": 0.8500000238418579}, "57": {"x": -0.07631520926952362, "y": 0.25510066747665405, "z": 0.843000054359436}, "58": {"x": -0.1262621134519577, "y": 0.25953057408332825, "z": 0.843000054359436}, "59": {"x": -0.1759631186723709, "y": 0.2659231424331665, "z": 0.8420000672340393}, "60": {"x": 0.27220165729522705, "y": 0.27651581168174744, "z": 0.8830000162124634}, "61": {"x": 0.22069507837295532, "y": 0.27931222319602966, "z": 0.8770000338554382}, "62": {"x": 0.17306926846504211, "y": 0.28341513872146606, "z": 0.8750000596046448}, "63": {"x": 0.12274625152349472, "y": 0.2862100899219513, "z": 0.8690000176429749}, "64": {"x": 0.07566745579242706, "y": 0.28993478417396545, "z": 0.8660000562667847}, "65": {"x": 0.02429909072816372, "y": 0.29428181052207947, "z": 0.8650000691413879}, "66": {"x": -0.02451048418879509, "y": 0.2994997799396515, "z": 0.8600000143051147}, "67": {"x": -0.07539115101099014, "y": 0.3060334026813507, "z": 0.859000027179718}, "68": {"x": -0.12080791592597961, "y": 0.30844369530677795, "z": 0.8530000448226929}, "69": {"x": -0.173247292637825, "y": 0.31230196356773376, "z": 0.8510000109672546}}
--------------------------------------------------------------------------------
/kinect-v2/infrared.py:
--------------------------------------------------------------------------------
1 | from pykinect2 import PyKinectV2
2 | from pykinect2.PyKinectV2 import *
3 | from pykinect2 import PyKinectRuntime
4 |
5 | import time
6 | import numpy as np
7 | import cv2
8 |
9 | ir_image_size = (424,512)
10 |
11 | kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Infrared)
12 |
13 | while(1):
14 | if kinect.has_new_infrared_frame():
15 | ir_frame = kinect.get_last_infrared_frame()
16 | ir_frame = ir_frame.reshape(ir_image_size)
17 | ir_frame = np.uint8(ir_frame/256)
18 |
19 |
20 | cv2.imshow('ir',ir_frame)
21 |
22 | if (cv2.waitKey(1) == 27):
23 | cv2.destroyAllWindows()
24 | kinect.close()
25 | break
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/sample-data/frame_0.jpg
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_0.json:
--------------------------------------------------------------------------------
1 | {"0": {"x": 0.12264157086610794, "y": 0.18982534110546112, "z": 0.8350000381469727}, "1": {"x": 0.1572970598936081, "y": 0.19086092710494995, "z": 0.8400000333786011}, "4": {"x": 0.12553592026233673, "y": 0.16294409334659576, "z": 0.8240000605583191}, "5": {"x": 0.15621362626552582, "y": 0.16245456039905548, "z": 0.8220000267028809}, "6": {"x": 0.19227436184883118, "y": 0.16293714940547943, "z": 0.8250000476837158}, "7": {"x": 0.22411394119262695, "y": 0.16344216465950012, "z": 0.8280000686645508}, "8": {"x": 0.12689535319805145, "y": 0.1330062597990036, "z": 0.8040000200271606}, "11": {"x": 0.22555434703826904, "y": 0.13355059921741486, "z": 0.8090000152587891}, "15": {"x": 0.22570067644119263, "y": 0.10327038168907166, "z": 0.7940000295639038}, "19": {"x": 0.2277611792087555, "y": 0.07402104884386063, "z": 0.7790000438690186}, "23": {"x": 0.22721485793590546, "y": 0.04575635492801666, "z": 0.7630000114440918}}
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/sample-data/frame_1.jpg
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_1.json:
--------------------------------------------------------------------------------
1 | {"0": {"x": 0.12293531745672226, "y": 0.1902800053358078, "z": 0.8370000123977661}, "1": {"x": 0.157109797000885, "y": 0.19063371419906616, "z": 0.8390000462532043}, "2": {"x": 0.1884734332561493, "y": 0.19031134247779846, "z": 0.8380000591278076}, "3": {"x": 0.22558094561100006, "y": 0.19112446904182434, "z": 0.8420000672340393}, "4": {"x": 0.12507887184619904, "y": 0.1623508334159851, "z": 0.8210000395774841}, "5": {"x": 0.15621362626552582, "y": 0.16245456039905548, "z": 0.8220000267028809}, "6": {"x": 0.19250741600990295, "y": 0.1631346493959427, "z": 0.8260000348091125}, "7": {"x": 0.22357259690761566, "y": 0.16304737329483032, "z": 0.8260000348091125}, "8": {"x": 0.1270531862974167, "y": 0.1331716924905777, "z": 0.8050000667572021}, "11": {"x": 0.22583316266536713, "y": 0.1337156891822815, "z": 0.8100000619888306}, "15": {"x": 0.22598493099212646, "y": 0.10340043902397156, "z": 0.7950000166893005}, "19": {"x": 0.22834593057632446, "y": 0.07421108335256577, "z": 0.781000018119812}, "23": {"x": 0.22781045734882355, "y": 0.04587629809975624, "z": 0.76500004529953}}
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/sample-data/frame_2.jpg
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_2.json:
--------------------------------------------------------------------------------
1 | {"0": {"x": 0.1227884441614151, "y": 0.19005267322063446, "z": 0.8360000252723694}, "1": {"x": 0.1572970598936081, "y": 0.19086092710494995, "z": 0.8400000333786011}, "4": {"x": 0.12538357079029083, "y": 0.16274632513523102, "z": 0.8230000138282776}, "5": {"x": 0.15621362626552582, "y": 0.16245456039905548, "z": 0.8220000267028809}, "6": {"x": 0.1929735541343689, "y": 0.1635296493768692, "z": 0.8280000686645508}, "7": {"x": 0.22357259690761566, "y": 0.16304737329483032, "z": 0.8260000348091125}, "8": {"x": 0.1275266706943512, "y": 0.1336679756641388, "z": 0.8080000281333923}, "11": {"x": 0.22611196339130402, "y": 0.13388076424598694, "z": 0.8110000491142273}, "15": {"x": 0.2254164218902588, "y": 0.10314031690359116, "z": 0.7930000424385071}, "19": {"x": 0.22805355489253998, "y": 0.0741160660982132, "z": 0.7800000309944153}, "23": {"x": 0.22721485793590546, "y": 0.04575635492801666, "z": 0.7630000114440918}}
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daan/calibrating-with-python-opencv/c143ddb341847e540d773f4c7bff8f523842a45f/kinect-v2/sample-data/frame_3.jpg
--------------------------------------------------------------------------------
/kinect-v2/sample-data/frame_3.json:
--------------------------------------------------------------------------------
1 | {"0": {"x": 0.1227884441614151, "y": 0.19005267322063446, "z": 0.8360000252723694}, "1": {"x": 0.15767158567905426, "y": 0.19131536781787872, "z": 0.8420000672340393}, "4": {"x": 0.12507887184619904, "y": 0.1623508334159851, "z": 0.8210000395774841}, "5": {"x": 0.15621362626552582, "y": 0.16245456039905548, "z": 0.8220000267028809}, "6": {"x": 0.1920413076877594, "y": 0.16273964941501617, "z": 0.8240000605583191}, "7": {"x": 0.22551782429218292, "y": 0.16284441947937012, "z": 0.8250000476837158}, "8": {"x": 0.12721101939678192, "y": 0.1333371251821518, "z": 0.8060000538825989}, "11": {"x": 0.22555434703826904, "y": 0.13355059921741486, "z": 0.8090000152587891}, "15": {"x": 0.2254164218902588, "y": 0.10314031690359116, "z": 0.7930000424385071}, "19": {"x": 0.227468803524971, "y": 0.07392602413892746, "z": 0.7780000567436218}, "23": {"x": 0.22721485793590546, "y": 0.04575635492801666, "z": 0.7630000114440918}}
--------------------------------------------------------------------------------
/webcam/capture_webcam.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import sys
4 | import argparse
5 |
6 | # construct the argument parse and parse the arguments
7 | ap = argparse.ArgumentParser()
8 | ap.add_argument("-o", "--output", help="path to output image file(s)")
9 | ap.add_argument("-r", "--resolution", type=str, default="1280x720", help="resolution")
10 | ap.add_argument("-c", "--camera", type=str, default="0", help="camera by id")
11 |
12 | args = vars(ap.parse_args())
13 |
14 | print(args["output"])
15 | print(args["resolution"])
16 |
17 | wh = args["resolution"].split("x")
18 | width = int(wh[0])
19 | height = int(wh[1])
20 |
21 | camera = 0
22 |
23 | print("opening video capture device {}".format(camera))
24 | cap = cv2.VideoCapture(int(camera))
25 | print("resolution {} by {} ".format(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT ))))
26 |
27 | count = 0
28 |
29 | while(True):
30 | ret, frame = cap.read()
31 | frame = cv2.resize(frame, (width, height))
32 | #frame = cv2.flip(frame,1)
33 | cv2.imshow('frame', frame)
34 |
35 | key = cv2.waitKey(10)
36 | print(key)
37 | if key == 32:
38 | print("take picture")
39 | count += 1
40 | cv2.imwrite("frame_{}.jpg".format(count), frame)
41 |
42 | elif key != -1:
43 | break
44 |
45 | cap.release()
46 |
47 | cv2.destroyAllWindows()
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/webcam/detect_aruco.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numpy as np
3 | import cv2
4 |
5 | from shared import *
6 |
7 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
8 |
9 |
10 |
11 | camera = 0
12 |
13 | cap = cv2.VideoCapture(camera)
14 |
15 | color_frame = None
16 | counter = 0
17 |
18 | while(1):
19 | ret, frame = cap.read()
20 |
21 | corners, ids = detect_aruco_markers(frame, dictionary)
22 |
23 | if len(corners) > 0:
24 | cv2.aruco.drawDetectedMarkers(frame, corners, ids)
25 | cv2.imshow('color', frame)
26 |
27 | key = cv2.waitKey(1)
28 | if key == 27:
29 | cv2.destroyAllWindows()
30 | kinect.close()
31 | break
32 | elif key == 32:
33 | if color_frame != None:
34 | cv2.imwrtie("frame_{}.jpg".format(counter), cv2.flip(color_frame,1))
35 | counter += 1
--------------------------------------------------------------------------------
/webcam/estimate_pose.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import numpy as np
3 | import cv2
4 | from cv2 import aruco
5 | import sys
6 |
7 | import json
8 | import argparse
9 |
10 | def find_charuco_board(img, board, dictionary):
11 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
12 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
13 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
14 | if corners is None or ids is None:
15 | return [], []
16 |
17 | if len(corners)>0:
18 | for corner in corners:
19 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
20 | ret, detectedCorners, detectedIds = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
21 | print("ret", ret)
22 | if detectedCorners is None or detectedIds is None:
23 | return [], []
24 | if len(detectedCorners) < 3:
25 | return [], []
26 |
27 | if len(detectedCorners) != len(detectedIds):
28 | print("numver of corners != ids")
29 | return [],[]
30 | return detectedCorners, detectedIds
31 | return [], []
32 |
33 |
34 |
35 | squareLength = 35.0 / 1000 # chessboard square side length (normally in meters)
36 | markerLength = 22.0 / 1000 # marker side length (same unit than squareLength)
37 | squaresX = 5
38 | squaresY = 7
39 |
40 | dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
41 | board = cv2.aruco.CharucoBoard_create(squaresX,squaresY,squareLength,markerLength,dictionary)
42 |
43 | # construct the argument parse and parse the arguments
44 | ap = argparse.ArgumentParser()
45 | ap.add_argument("-o", "--output", help="path to output folder")
46 | ap.add_argument("-c", "--camera", type=int, default=0, help="camera by id")
47 |
48 | # read calibration file
49 | fs = cv2.FileStorage("sample-data/calibration.json", cv2.FILE_STORAGE_READ)
50 | if fs.isOpened() == False:
51 | print("couldn't load calibration data")
52 | sys.exit()
53 |
54 | intrinsics = fs.getNode("cameraMatrix").mat()
55 | dist_coeffs = fs.getNode("dist_coeffs").mat()
56 | print("intriniscs", intrinsics)
57 | print("dist coeffs", dist_coeffs)
58 |
59 | args = vars(ap.parse_args())
60 | camera = args["camera"]
61 |
62 | print("opening video capture device {}".format(camera))
63 | cap = cv2.VideoCapture(int(camera))
64 | print("resolution {} by {} ".format(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT ))))
65 |
66 |
67 | count = 0
68 | tvecs = []
69 | font = cv2.FONT_HERSHEY_SIMPLEX
70 |
71 | while(True):
72 | ret, frame = cap.read()
73 | got_board = False
74 | corners, ids = find_charuco_board(frame, board, dictionary)
75 | if len(corners) > 0 :
76 | valid, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(corners, ids, board, intrinsics, dist_coeffs)
77 | if valid == True:
78 | org_frame = np.copy(frame)
79 | cv2.aruco.drawAxis(frame, intrinsics, dist_coeffs, rvec, tvec, 0.2)
80 | got_board = True
81 | p3d = [tvec[0][0], tvec[1][0], tvec[2][0]]
82 |
83 | # draw the coordinates
84 | cv2.putText(frame,"{0:.3f}".format(p3d[0]), (100,200), font, 4,(0,0,255), 6, cv2.LINE_AA)
85 | cv2.putText(frame,"{0:.3f}".format(p3d[1]), (100,400), font, 4,(0,255,0), 6, cv2.LINE_AA)
86 | cv2.putText(frame,"{0:.3f}".format(p3d[2]), (100,600), font, 4,(255,0,0), 6, cv2.LINE_AA)
87 |
88 |
89 | key = cv2.waitKey(1)
90 | if key == 27: # ESC
91 | cv2.destroyAllWindows()
92 | break
93 |
94 | cv2.imshow('frame', frame)
95 |
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/webcam/sample-data/calibration.json:
--------------------------------------------------------------------------------
1 | {
2 | "calibrationDate": "2018-10-08 15:32:40.781436",
3 | "cameraResolution": {
4 | "type_id": "opencv-matrix",
5 | "rows": 2,
6 | "cols": 1,
7 | "dt": "d",
8 | "data": [ 720.0, 1280.0 ]
9 | },
10 | "cameraMatrix": {
11 | "type_id": "opencv-matrix",
12 | "rows": 3,
13 | "cols": 3,
14 | "dt": "d",
15 | "data": [ 1.0036890509044972e+03, 0.0, 6.3670897816398144e+02,
16 | 0.0, 1.0036890509044972e+03, 4.1429682375533667e+02, 0.0,
17 | 0.0, 1.0 ]
18 | },
19 | "dist_coeffs": {
20 | "type_id": "opencv-matrix",
21 | "rows": 14,
22 | "cols": 1,
23 | "dt": "d",
24 | "data": [ -8.5188812484719122e-03, -4.4933351860255627e-01,
25 | 5.7140445300074899e-03, -1.2610711736297426e-03,
26 | 2.6073694779700285e-01, -1.3493827802828759e-01,
27 | 1.0285916634349823e-01, -3.4395347152192784e-01, 0.0, 0.0,
28 | 0.0, 0.0, 0.0, 0.0 ]
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/webcam/shared.py:
--------------------------------------------------------------------------------
1 | #
2 | # shared code between samples
3 | #
4 |
5 | import numpy as np
6 | import cv2
7 | def detect_aruco_markers(img, dictionary):
8 | corner_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
9 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
10 | corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, dictionary)
11 | if len(corners)>0:
12 | for corner in corners:
13 | cv2.cornerSubPix(gray, corner, winSize=(3,3), zeroZone=(-1,-1), criteria=corner_criteria)
14 | return corners, ids
15 |
--------------------------------------------------------------------------------