├── .gitignore
├── LICENSE
├── README.md
├── README_EN.md
├── demo.py
├── demo_ros2.py
├── detector.py
├── model
├── model.onnx
└── model.tflite
└── test.mp4
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # bat
132 | *.bat
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [Japanese/[English](README_EN.md)]
2 |
3 | # Person-Detection-using-RaspberryPi-CPU
4 | Raspberry Pi 4のCPU動作を想定した人検出モデルとデモスクリプトです。
5 |
6 | https://user-images.githubusercontent.com/37477845/165421632-600f5f63-51e5-4afa-a0d5-3abc59d0d711.mp4
7 |
8 | PINTOさんの「[TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)」を使用し4スレッド動作時で45~60ms程度で動作します ※1スレッドは75ms前後
9 | ノートPC等でも動作しますが、精度が必要であれば本リポジトリ以外の物体検出モデルをおすすめします。
10 | また、ノートPC使用時は「model.onnx」のほうが高速なケースが多いです。※Core i7-8750Hで10ms前後
11 |
12 | # Requirement
13 | opencv-python 4.5.3.56 or later
14 | tensorflow 2.8.0 or later ※[TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)の使用を推奨
15 | onnxruntime 1.9.0 or later ※model.onnxを使用する場合のみ
16 |
17 | # Demo
18 | デモの実行方法は以下です。
19 | ```bash
20 | python demo.py
21 | ```
22 | * --device
23 | カメラデバイス番号の指定
24 | デフォルト:0
25 | * --movie
26 | 動画ファイルの指定 ※指定時はカメラデバイスより優先
27 | デフォルト:指定なし
28 | * --width
29 | カメラキャプチャ時の横幅
30 | デフォルト:640
31 | * --height
32 | カメラキャプチャ時の縦幅
33 | デフォルト:360
34 | * --model
35 | ロードするモデルの格納パス
36 | デフォルト:model/model.tflite
37 | * --score_th
38 | 検出閾値
39 | デフォルト:0.4
40 | * --nms_th
41 | NMSの閾値
42 | デフォルト:0.5
43 | * --num_threads
44 | 使用スレッド数 ※TensorFlow-Lite使用時のみ有効
45 | デフォルト:None
46 |
47 | # Demo(ROS2)
48 | ROS2向けのデモです。
49 |
50 | ターミナル1
51 | ```bash
52 | ros2 run v4l2_camera v4l2_camera_node
53 | ```
54 |
55 | ターミナル2
56 | ```bash
57 | python3 ./demo_ros2.py
58 | ```
59 |
60 | # Application Example
61 | * [Person Tracking(Person Detection + motpy)](https://github.com/Kazuhito00/MOT-Tracking-by-Detection-Pipeline)
62 |
63 | # Reference
64 | * [PINTO0309/TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)
65 |
66 | # Author
67 | 高橋かずひと(https://twitter.com/KzhtTkhs)
68 |
69 | # License
70 | Person-Detection-using-RaspberryPi-CPU is under [Apache 2.0 License](LICENSE).
71 |
72 | # License(Movie)
73 | サンプル動画は[NHKクリエイティブ・ライブラリー](https://www.nhk.or.jp/archives/creative/)の[イギリス・ロンドン リージェント・ストリート](https://www2.nhk.or.jp/archives/creative/material/view.cgi?m=D0002160979_00000)を使用しています。
74 |
--------------------------------------------------------------------------------
/README_EN.md:
--------------------------------------------------------------------------------
1 | [[Japanese](https://github.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU)/English]
2 |
3 | # Person-Detection-using-RaspberryPi-CPU
4 | This repository is person detection model and demo script assuming CPU operation of Raspberry Pi 4.
5 |
6 | https://user-images.githubusercontent.com/37477845/165421632-600f5f63-51e5-4afa-a0d5-3abc59d0d711.mp4
7 |
8 | Using PINTO's [TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin), it works in about 45-60ms when 4 threads are running. * About 75ms in 1 thread.
9 | It works on laptops, but if you need precision, we recommend an object discovery model other than this repository.
10 | Also, when using a laptop PC, "model.onnx" is often faster. *Approximately 10ms on Core i7-8750H
11 |
12 | # Requirement
13 | opencv-python 4.5.3.56 or later
14 | tensorflow 2.8.0 or later *Recommended to use [TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)
15 | onnxruntime 1.9.0 or later *Only when using model.onnx
16 |
17 | # Demo
18 | Here's how to run the demo.
19 | ```bash
20 | python demo.py
21 | ```
22 | * --device
23 | Specifying the camera device number
24 | Default:0
25 | * --movie
26 | Specify video file *When specified, priority is given to the camera device
27 | Default:unspecified
28 | * --width
29 | Width at the time of camera capture
30 | Default:640
31 | * --height
32 | Height at the time of camera capture
33 | Default:360
34 | * --model
35 | Storage path of the model to load
36 | Default:model/model.tflite
37 | * --score_th
38 | Detection threshold
39 | Default:0.4
40 | * --nms_th
41 | NMS threshold
42 | Default:0.5
43 | * --num_threads
44 | Number of threads used *Valid only when using TensorFlow-Lite
45 | Default:None
46 |
47 | # Demo(ROS2)
48 | This is a demo for ROS2.
49 |
50 | Terminal 1
51 | ```bash
52 | ros2 run v4l2_camera v4l2_camera_node
53 | ```
54 |
55 | Terminal 2
56 | ```bash
57 | python3 ./demo_ros2.py
58 | ```
59 |
60 | # Application Example
61 | * [Person Tracking(Person Detection + motpy)](https://github.com/Kazuhito00/MOT-Tracking-by-Detection-Pipeline)
62 |
63 | # Reference
64 | * [PINTO0309/TensorflowLite-bin](https://github.com/PINTO0309/TensorflowLite-bin)
65 |
66 | # Author
67 | Kazuhito Takahashi(https://twitter.com/KzhtTkhs)
68 |
69 | # License
70 | Person-Detection-using-RaspberryPi-CPU is under [Apache 2.0 License](LICENSE).
71 |
72 | # License(Movie)
73 | The sample video uses "[London, England, Regent Street](https://www2.nhk.or.jp/archives/creative/material/view.cgi?m=D0002160979_00000)" from the [NHK Creative Library](https://www.nhk.or.jp/archives/creative/).
74 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import copy
4 | import time
5 | import argparse
6 |
7 | import cv2
8 |
9 | from detector import Detector
10 |
11 |
12 | def get_args():
13 | parser = argparse.ArgumentParser()
14 |
15 | parser.add_argument("--device", type=int, default=0)
16 | parser.add_argument("--movie", type=str, default=None)
17 | parser.add_argument("--width", help='cap width', type=int, default=640)
18 | parser.add_argument("--height", help='cap height', type=int, default=360)
19 |
20 | parser.add_argument(
21 | "--model",
22 | type=str,
23 | default='model/model.tflite',
24 | )
25 | parser.add_argument(
26 | '--input_shape',
27 | type=str,
28 | default="192,192",
29 | )
30 | parser.add_argument(
31 | '--score_th',
32 | type=float,
33 | default=0.4,
34 | )
35 | parser.add_argument(
36 | '--nms_th',
37 | type=float,
38 | default=0.5,
39 | )
40 | parser.add_argument(
41 | '--num_threads',
42 | type=int,
43 | default=None,
44 | help='Valid only when using Tensorflow-Lite',
45 | )
46 |
47 | args = parser.parse_args()
48 |
49 | return args
50 |
51 |
52 | def main():
53 | # 引数解析 #################################################################
54 | args = get_args()
55 | cap_device = args.device
56 | cap_width = args.width
57 | cap_height = args.height
58 |
59 | if args.movie is not None:
60 | cap_device = args.movie
61 |
62 | model_path = args.model
63 | input_shape = tuple(map(int, args.input_shape.split(',')))
64 | score_th = args.score_th
65 | nms_th = args.nms_th
66 | num_threads = args.num_threads
67 |
68 | # カメラ準備 ###############################################################
69 | cap = cv2.VideoCapture(cap_device)
70 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)
71 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)
72 |
73 | # モデルロード #############################################################
74 | detector = Detector(
75 | model_path=model_path,
76 | input_shape=input_shape,
77 | score_th=score_th,
78 | nms_th=nms_th,
79 | providers=['CPUExecutionProvider'],
80 | num_threads=num_threads,
81 | )
82 |
83 | while True:
84 | start_time = time.time()
85 |
86 | # カメラキャプチャ ################################################
87 | ret, frame = cap.read()
88 | if not ret:
89 | break
90 | debug_image = copy.deepcopy(frame)
91 |
92 | # 推論実施 ########################################################
93 | bboxes, scores, class_ids = detector.inference(frame)
94 |
95 | elapsed_time = time.time() - start_time
96 |
97 | # デバッグ描画
98 | debug_image = draw_debug(
99 | debug_image,
100 | elapsed_time,
101 | score_th,
102 | bboxes,
103 | scores,
104 | class_ids,
105 | )
106 |
107 | # キー処理(ESC:終了) ##############################################
108 | key = cv2.waitKey(1)
109 | if key == 27: # ESC
110 | break
111 |
112 | # 画面反映 #########################################################
113 | debug_image = cv2.resize(debug_image, (cap_width, cap_height))
114 | cv2.imshow('Person Detection Demo', debug_image)
115 |
116 | cap.release()
117 | cv2.destroyAllWindows()
118 |
119 |
120 | def draw_debug(
121 | image,
122 | elapsed_time,
123 | score_th,
124 | bboxes,
125 | scores,
126 | class_ids,
127 | ):
128 | debug_image = copy.deepcopy(image)
129 |
130 | for bbox, score, class_id in zip(bboxes, scores, class_ids):
131 | x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
132 |
133 | if score_th > score:
134 | continue
135 |
136 | # バウンディングボックス
137 | debug_image = cv2.rectangle(
138 | debug_image,
139 | (x1, y1),
140 | (x2, y2),
141 | (0, 255, 0),
142 | thickness=2,
143 | )
144 |
145 | # クラスID、スコア
146 | score = '%.2f' % score
147 | text = '%s:%s' % (str(int(class_id)), score)
148 | debug_image = cv2.putText(
149 | debug_image,
150 | text,
151 | (x1, y1 - 10),
152 | cv2.FONT_HERSHEY_SIMPLEX,
153 | 0.7,
154 | (0, 255, 0),
155 | thickness=2,
156 | )
157 |
158 | # 推論時間
159 | text = 'Elapsed time:' + '%.0f' % (elapsed_time * 1000)
160 | text = text + 'ms'
161 | debug_image = cv2.putText(
162 | debug_image,
163 | text,
164 | (10, 30),
165 | cv2.FONT_HERSHEY_SIMPLEX,
166 | 0.8,
167 | (0, 255, 0),
168 | thickness=2,
169 | )
170 |
171 | return debug_image
172 |
173 |
174 | if __name__ == '__main__':
175 | main()
176 |
--------------------------------------------------------------------------------
/demo_ros2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # -------------------
5 | # Ar-Ray-code 2022
6 | # -------------------
7 |
8 | # Env(CPU): Raspberry Pi Bullseye, Ubuntu 20
9 | # Env(ROS2): ROS2-Foxy, Galactic
10 |
11 | # input /image_raw(Sensor_msgs/Image)
12 | # output /detection(Vision_msgs/Detection2DArray)
13 |
14 | # run --------------------------------------------------
15 | # terminal1: ros2 run v4l2_camera v4l2_camera_node
16 | # terminal2: python3 ./demo_ros2.py
17 | # ------------------------------------------------------
18 |
19 | import time
20 | import cv2
21 |
22 | from detector import Detector
23 | from demo import draw_debug
24 |
25 | # ROS2 =====================================
26 | import rclpy
27 | from rclpy.node import Node
28 |
29 | from std_msgs.msg import Header
30 | from cv_bridge import CvBridge
31 | from sensor_msgs.msg import Image
32 |
33 | from vision_msgs.msg import Detection2DArray
34 | from vision_msgs.msg import Detection2D
35 |
36 | class yolox_cpu(Node):
37 | def __init__(self):
38 | super().__init__('yolox_cpu')
39 |
40 | # パラメータ設定 ###################################################
41 | self.declare_parameter('model', './model/model.onnx')
42 | self.declare_parameter('score_th', 0.4)
43 | self.declare_parameter('nms_th', 0.5)
44 | self.declare_parameter('num_threads', None)
45 | self.declare_parameter('input_shape/height', 192)
46 | self.declare_parameter('input_shape/width', 192)
47 |
48 | # パラメータ取得 ###################################################
49 | self.model_path = self.get_parameter('model').value
50 | self.score_th = self.get_parameter('score_th').value
51 | self.nms_th = self.get_parameter('nms_th').value
52 | self.num_threads = self.get_parameter('num_threads').value
53 | self.input_shape_h = self.get_parameter('input_shape/height').value
54 | self.input_shape_w = self.get_parameter('input_shape/width').value
55 |
56 | self.input_shape = (self.input_shape_h, self.input_shape_w)
57 |
58 |
59 | self.bridge = CvBridge()
60 |
61 | self.yolox = Detector(
62 | model_path=self.model_path,
63 | input_shape=self.input_shape,
64 | score_th=self.score_th,
65 | nms_th=self.nms_th,
66 | providers=['CPUExecutionProvider'],
67 | num_threads=self.num_threads,
68 | )
69 |
70 | self.sub_image = self.create_subscription(
71 | Image,
72 | 'image_raw',
73 | self.image_callback,
74 | 10
75 | )
76 |
77 | self.pub_detection = self.create_publisher(
78 | Detection2DArray,
79 | 'detection',
80 | 10
81 | )
82 |
83 | def image_callback(self, msg):
84 | start = time.time()
85 | image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
86 | # resize
87 | # image = cv2.resize(image, (self.width, self.height))
88 | bboxes, scores, class_ids = self.yolox.inference(image)
89 | elapsed_time = time.time() - start
90 | fps = 1 / elapsed_time
91 |
92 | # デバッグ描画
93 | debug_image = draw_debug(
94 | image,
95 | elapsed_time,
96 | self.score_th,
97 | bboxes,
98 | scores,
99 | class_ids,
100 | )
101 |
102 | # キー処理(ESC:終了) ##############################################
103 | key = cv2.waitKey(1)
104 | if key == 27: # ESC
105 | pass
106 |
107 | # 画面反映 #########################################################
108 | debug_image = cv2.resize(debug_image, (640, 480))
109 | cv2.imshow('debug_image', debug_image)
110 |
111 | # データ出力 #######################################################
112 | msg = Detection2DArray()
113 | msg.header = Header()
114 | msg.header.stamp = msg.header.stamp = self.get_clock().now().to_msg()
115 | msg.header.frame_id = 'detection'
116 | msg.detections = []
117 | for bbox, score, class_id in zip(bboxes, scores, class_ids):
118 | detection = Detection2D()
119 |
120 | center_x = (bbox[0] + bbox[2]) // 2
121 | center_y = (bbox[1] + bbox[3]) // 2
122 | size_w = bbox[2] - bbox[0]
123 | size_h = bbox[3] - bbox[1]
124 |
125 | msg.detections.append(detection)
126 |
127 | detection.bbox.center.x = float(center_x)
128 | detection.bbox.center.y = float(center_y)
129 | detection.bbox.size_x = float(size_w)
130 | detection.bbox.size_y = float(size_h)
131 | # if person -> add
132 | if class_id == 0:
133 | msg.detections.append(detection)
134 | self.pub_detection.publish(msg)
135 |
136 | # print
137 | print('elapsed_time: {:.3f}[ms], fps: {:.1f}'.format(elapsed_time * 1000, fps))
138 | for detection in msg.detections:
139 | print('detection:', detection.bbox.center.x, detection.bbox.center.y, detection.bbox.size_x, detection.bbox.size_y)
140 |
141 | def __del__(self):
142 | cv2.destroyAllWindows()
143 | self.sub_image.destroy()
144 | self.pub_detection.destroy()
145 | super().destroy_node()
146 |
147 | def ros_main(args = None):
148 | rclpy.init(args=args)
149 | ros_class = yolox_cpu()
150 |
151 | try:
152 | rclpy.spin(ros_class)
153 | except KeyboardInterrupt:
154 | pass
155 | finally:
156 | rclpy.shutdown()
157 |
158 | if __name__ == "__main__":
159 | ros_main()
160 |
--------------------------------------------------------------------------------
/detector.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import copy
5 |
6 | import cv2
7 | import numpy as np
8 |
9 |
10 | class Detector(object):
11 | def __init__(
12 | self,
13 | model_path='model.onnx',
14 | input_shape=(192, 192),
15 | score_th=0.3,
16 | nms_th=0.5,
17 | providers=['CUDAExecutionProvider', 'CPUExecutionProvider'],
18 | num_threads=None, # Valid only when using Tensorflow-Lite
19 | ):
20 | # 入力サイズ
21 | self.input_shape = input_shape
22 |
23 | # 閾値
24 | self.score_th = score_th
25 | self.nms_th = nms_th
26 |
27 | # モデル読み込み
28 | self.extension = os.path.splitext(model_path)[1][1:]
29 | if self.extension == 'onnx':
30 | import onnxruntime
31 |
32 | self.model = onnxruntime.InferenceSession(
33 | model_path,
34 | providers=providers,
35 | )
36 |
37 | self.input_name = self.model.get_inputs()[0].name
38 | self.output_name = self.model.get_outputs()[0].name
39 | elif self.extension == 'tflite':
40 | try:
41 | from tflite_runtime.interpreter import Interpreter
42 | self.model = Interpreter(
43 | model_path=model_path,
44 | num_threads=num_threads,
45 | )
46 | except ImportError:
47 | import tensorflow as tf
48 | self.model = tf.lite.Interpreter(
49 | model_path=model_path,
50 | num_threads=num_threads,
51 | )
52 |
53 | self.model.allocate_tensors()
54 |
55 | self.input_name = self.model.get_input_details()[0]['index']
56 | self.output_name = self.model.get_output_details()[0]['index']
57 | else:
58 | raise ValueError("Invalid extension %s." % (model_path))
59 |
60 | def inference(self, image):
61 | temp_image = copy.deepcopy(image)
62 |
63 | # 前処理
64 | image, ratio = self._preprocess(temp_image, self.input_shape)
65 |
66 | # 推論実施
67 | results = None
68 | if self.extension == 'onnx':
69 | results = self.model.run(
70 | None,
71 | {self.input_name: image[None, :, :, :]},
72 | )[0]
73 | elif self.extension == 'tflite':
74 | image = image.reshape(
75 | -1,
76 | 3,
77 | self.input_shape[0],
78 | self.input_shape[1],
79 | )
80 | self.model.set_tensor(self.input_name, image)
81 | self.model.invoke()
82 | results = self.model.get_tensor(self.output_name)
83 |
84 | # 後処理
85 | bboxes, scores, class_ids = self._postprocess(
86 | results,
87 | self.input_shape,
88 | ratio,
89 | self.score_th,
90 | self.nms_th,
91 | )
92 |
93 | return bboxes, scores, class_ids
94 |
95 | def _preprocess(self, image, input_size):
96 | # リサイズ
97 | ratio = min(input_size[0] / image.shape[0],
98 | input_size[1] / image.shape[1])
99 | resized_image = cv2.resize(
100 | image,
101 | (int(image.shape[1] * ratio), int(image.shape[0] * ratio)),
102 | interpolation=cv2.INTER_LINEAR,
103 | )
104 | resized_image = resized_image.astype(np.uint8)
105 |
106 | # パディング込み画像作成
107 | padded_image = np.ones(
108 | (input_size[0], input_size[1], 3),
109 | dtype=np.uint8,
110 | )
111 | padded_image *= 114
112 | padded_image[:int(image.shape[0] * ratio), :int(image.shape[1] *
113 | ratio)] = resized_image
114 |
115 | padded_image = padded_image.transpose((2, 0, 1))
116 | padded_image = np.ascontiguousarray(padded_image, dtype=np.float32)
117 |
118 | return padded_image, ratio
119 |
120 | def _postprocess(
121 | self,
122 | outputs,
123 | img_size,
124 | ratio,
125 | score_th,
126 | nms_th,
127 | ):
128 | grids = []
129 | expanded_strides = []
130 |
131 | strides = [8, 16, 32]
132 |
133 | hsizes = [img_size[0] // stride for stride in strides]
134 | wsizes = [img_size[1] // stride for stride in strides]
135 |
136 | for hsize, wsize, stride in zip(hsizes, wsizes, strides):
137 | xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
138 | grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
139 | grids.append(grid)
140 | shape = grid.shape[:2]
141 | expanded_strides.append(np.full((*shape, 1), stride))
142 |
143 | grids = np.concatenate(grids, 1)
144 | expanded_strides = np.concatenate(expanded_strides, 1)
145 | outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
146 | outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
147 |
148 | predictions = outputs[0]
149 | bboxes = predictions[:, :4]
150 | scores = predictions[:, 4:5] * predictions[:, 5:]
151 | scores = scores.T[0]
152 |
153 | bboxes_xyxy = np.ones_like(bboxes)
154 | bboxes_xyxy[:, 0] = bboxes[:, 0] - bboxes[:, 2] / 2.
155 | bboxes_xyxy[:, 1] = bboxes[:, 1] - bboxes[:, 3] / 2.
156 | bboxes_xyxy[:, 2] = bboxes[:, 0] + bboxes[:, 2] / 2.
157 | bboxes_xyxy[:, 3] = bboxes[:, 1] + bboxes[:, 3] / 2.
158 | bboxes_xyxy /= ratio
159 |
160 | return self._nms(bboxes_xyxy, scores, score_th, nms_th)
161 |
162 | def _nms(self, bboxes, scores, score_th, nms_th):
163 | indexes = cv2.dnn.NMSBoxes(
164 | bboxes.tolist(),
165 | scores.tolist(),
166 | score_th,
167 | nms_th,
168 | )
169 |
170 | result_bboxes, result_scores, result_class_ids = [], [], []
171 | if len(indexes) > 0:
172 | if indexes.ndim == 2:
173 | result_bboxes = bboxes[indexes[:, 0]]
174 | result_scores = scores[indexes[:, 0]]
175 | result_class_ids = np.zeros(result_scores.shape)
176 | elif indexes.ndim == 1:
177 | result_bboxes = bboxes[indexes[:]]
178 | result_scores = scores[indexes[:]]
179 | result_class_ids = np.zeros(result_scores.shape)
180 |
181 | return result_bboxes, result_scores, result_class_ids
182 |
--------------------------------------------------------------------------------
/model/model.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU/fab8c0d1129b391e8aaf0a4e557dd5bb812e749f/model/model.onnx
--------------------------------------------------------------------------------
/model/model.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU/fab8c0d1129b391e8aaf0a4e557dd5bb812e749f/model/model.tflite
--------------------------------------------------------------------------------
/test.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Kazuhito00/Person-Detection-using-RaspberryPi-CPU/fab8c0d1129b391e8aaf0a4e557dd5bb812e749f/test.mp4
--------------------------------------------------------------------------------