├── .gitmodules
├── LICENSE.Apache.md
├── LICENSE.BSD.md
├── LICENSE.CC.md
├── README.md
├── crazyflie_app
├── Makefile.common
├── README.md
├── random-following-spiral
│ ├── Makefile
│ ├── current_platform.mk
│ ├── inc
│ │ └── config_main.h
│ └── src
│ │ └── main_app.c
└── rotate
│ ├── Makefile
│ ├── current_platform.mk
│ ├── inc
│ └── config_main.h
│ └── src
│ └── rotate.c
├── dataset
└── README.md
├── gap8_app
├── Makefile
├── README.md
├── SSD_tin_can_bottle.c
├── SSD_tin_can_bottle.h
├── common.mk
├── common
│ ├── model_decl.mk
│ └── model_rules.mk
├── images
│ └── test_1_out.ppm
├── nntool_scripts
│ └── nntool_script_ssdlite
├── ssd.mk
└── tflite_model
│ ├── SSD_tin_can_bottle.tflite
│ ├── output_camera.ppm
│ ├── output_camera2.ppm
│ └── test_1_out.ppm
├── images
├── cover.png
├── dataset_samples.png
└── exploration_policies.png
└── training
├── README.md
├── classes.txt
├── configs
├── oid_v4_label_map_bottle_and_tin_can.pbtxt
├── ssd_mobilenet_v2_oid_v4.config
└── ssd_mobilenet_v2_oid_v4_qat.config
├── export_tflite_ssd_graph.py
├── graph_rewriter_builder.py
├── tensorflow1_15.yml
├── train_eval_model_main.py
└── utils
└── data-augmentation.py
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "crazyflie_app/crazyflie-firmware"]
2 | path = crazyflie_app/crazyflie-firmware
3 | url = git@github.com:bitcraze/crazyflie-firmware.git
4 | [submodule "crazyflie_app/crazyflie-firmware-modified"]
5 | path = crazyflie_app/crazyflie-firmware-modified
6 | url = git@github.com:LorenzoLamberti94/crazyflie-firmware-modified.git
7 | [submodule "viewer-pulp-detector"]
8 | path = viewer-pulp-detector
9 | url = git@github.com:LorenzoLamberti94/viewer-pulp-detector.git
10 | [submodule "training/external/tensorflow-api"]
11 | path = training/external/tensorflow-api
12 | url = https://github.com/Bomps4/tensorflow-api-03.2020
13 | [submodule "training/external/cocoapi"]
14 | path = training/external/cocoapi
15 | url = git@github.com:cocodataset/cocoapi.git
16 |
--------------------------------------------------------------------------------
/LICENSE.Apache.md:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
--------------------------------------------------------------------------------
/LICENSE.BSD.md:
--------------------------------------------------------------------------------
1 | Copyright 2020 GreenWaves Technologies
2 |
3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
4 |
5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
6 |
7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
8 |
9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/LICENSE.CC.md:
--------------------------------------------------------------------------------
1 | Attribution-NonCommercial-NoDerivatives 4.0 International
2 |
3 | =======================================================================
4 |
5 | Creative Commons Corporation ("Creative Commons") is not a law firm and
6 | does not provide legal services or legal advice. Distribution of
7 | Creative Commons public licenses does not create a lawyer-client or
8 | other relationship. Creative Commons makes its licenses and related
9 | information available on an "as-is" basis. Creative Commons gives no
10 | warranties regarding its licenses, any material licensed under their
11 | terms and conditions, or any related information. Creative Commons
12 | disclaims all liability for damages resulting from their use to the
13 | fullest extent possible.
14 |
15 | Using Creative Commons Public Licenses
16 |
17 | Creative Commons public licenses provide a standard set of terms and
18 | conditions that creators and other rights holders may use to share
19 | original works of authorship and other material subject to copyright
20 | and certain other rights specified in the public license below. The
21 | following considerations are for informational purposes only, are not
22 | exhaustive, and do not form part of our licenses.
23 |
24 | Considerations for licensors: Our public licenses are
25 | intended for use by those authorized to give the public
26 | permission to use material in ways otherwise restricted by
27 | copyright and certain other rights. Our licenses are
28 | irrevocable. Licensors should read and understand the terms
29 | and conditions of the license they choose before applying it.
30 | Licensors should also secure all rights necessary before
31 | applying our licenses so that the public can reuse the
32 | material as expected. Licensors should clearly mark any
33 | material not subject to the license. This includes other CC-
34 | licensed material, or material used under an exception or
35 | limitation to copyright. More considerations for licensors:
36 | wiki.creativecommons.org/Considerations_for_licensors
37 |
38 | Considerations for the public: By using one of our public
39 | licenses, a licensor grants the public permission to use the
40 | licensed material under specified terms and conditions. If
41 | the licensor's permission is not necessary for any reason--for
42 | example, because of any applicable exception or limitation to
43 | copyright--then that use is not regulated by the license. Our
44 | licenses grant only permissions under copyright and certain
45 | other rights that a licensor has authority to grant. Use of
46 | the licensed material may still be restricted for other
47 | reasons, including because others have copyright or other
48 | rights in the material. A licensor may make special requests,
49 | such as asking that all changes be marked or described.
50 | Although not required by our licenses, you are encouraged to
51 | respect those requests where reasonable. More considerations
52 | for the public:
53 | wiki.creativecommons.org/Considerations_for_licensees
54 |
55 | =======================================================================
56 |
57 | Creative Commons Attribution-NonCommercial-NoDerivatives 4.0
58 | International Public License
59 |
60 | By exercising the Licensed Rights (defined below), You accept and agree
61 | to be bound by the terms and conditions of this Creative Commons
62 | Attribution-NonCommercial-NoDerivatives 4.0 International Public
63 | License ("Public License"). To the extent this Public License may be
64 | interpreted as a contract, You are granted the Licensed Rights in
65 | consideration of Your acceptance of these terms and conditions, and the
66 | Licensor grants You such rights in consideration of benefits the
67 | Licensor receives from making the Licensed Material available under
68 | these terms and conditions.
69 |
70 |
71 | Section 1 -- Definitions.
72 |
73 | a. Adapted Material means material subject to Copyright and Similar
74 | Rights that is derived from or based upon the Licensed Material
75 | and in which the Licensed Material is translated, altered,
76 | arranged, transformed, or otherwise modified in a manner requiring
77 | permission under the Copyright and Similar Rights held by the
78 | Licensor. For purposes of this Public License, where the Licensed
79 | Material is a musical work, performance, or sound recording,
80 | Adapted Material is always produced where the Licensed Material is
81 | synched in timed relation with a moving image.
82 |
83 | b. Copyright and Similar Rights means copyright and/or similar rights
84 | closely related to copyright including, without limitation,
85 | performance, broadcast, sound recording, and Sui Generis Database
86 | Rights, without regard to how the rights are labeled or
87 | categorized. For purposes of this Public License, the rights
88 | specified in Section 2(b)(1)-(2) are not Copyright and Similar
89 | Rights.
90 |
91 | c. Effective Technological Measures means those measures that, in the
92 | absence of proper authority, may not be circumvented under laws
93 | fulfilling obligations under Article 11 of the WIPO Copyright
94 | Treaty adopted on December 20, 1996, and/or similar international
95 | agreements.
96 |
97 | d. Exceptions and Limitations means fair use, fair dealing, and/or
98 | any other exception or limitation to Copyright and Similar Rights
99 | that applies to Your use of the Licensed Material.
100 |
101 | e. Licensed Material means the artistic or literary work, database,
102 | or other material to which the Licensor applied this Public
103 | License.
104 |
105 | f. Licensed Rights means the rights granted to You subject to the
106 | terms and conditions of this Public License, which are limited to
107 | all Copyright and Similar Rights that apply to Your use of the
108 | Licensed Material and that the Licensor has authority to license.
109 |
110 | g. Licensor means the individual(s) or entity(ies) granting rights
111 | under this Public License.
112 |
113 | h. NonCommercial means not primarily intended for or directed towards
114 | commercial advantage or monetary compensation. For purposes of
115 | this Public License, the exchange of the Licensed Material for
116 | other material subject to Copyright and Similar Rights by digital
117 | file-sharing or similar means is NonCommercial provided there is
118 | no payment of monetary compensation in connection with the
119 | exchange.
120 |
121 | i. Share means to provide material to the public by any means or
122 | process that requires permission under the Licensed Rights, such
123 | as reproduction, public display, public performance, distribution,
124 | dissemination, communication, or importation, and to make material
125 | available to the public including in ways that members of the
126 | public may access the material from a place and at a time
127 | individually chosen by them.
128 |
129 | j. Sui Generis Database Rights means rights other than copyright
130 | resulting from Directive 96/9/EC of the European Parliament and of
131 | the Council of 11 March 1996 on the legal protection of databases,
132 | as amended and/or succeeded, as well as other essentially
133 | equivalent rights anywhere in the world.
134 |
135 | k. You means the individual or entity exercising the Licensed Rights
136 | under this Public License. Your has a corresponding meaning.
137 |
138 |
139 | Section 2 -- Scope.
140 |
141 | a. License grant.
142 |
143 | 1. Subject to the terms and conditions of this Public License,
144 | the Licensor hereby grants You a worldwide, royalty-free,
145 | non-sublicensable, non-exclusive, irrevocable license to
146 | exercise the Licensed Rights in the Licensed Material to:
147 |
148 | a. reproduce and Share the Licensed Material, in whole or
149 | in part, for NonCommercial purposes only; and
150 |
151 | b. produce and reproduce, but not Share, Adapted Material
152 | for NonCommercial purposes only.
153 |
154 | 2. Exceptions and Limitations. For the avoidance of doubt, where
155 | Exceptions and Limitations apply to Your use, this Public
156 | License does not apply, and You do not need to comply with
157 | its terms and conditions.
158 |
159 | 3. Term. The term of this Public License is specified in Section
160 | 6(a).
161 |
162 | 4. Media and formats; technical modifications allowed. The
163 | Licensor authorizes You to exercise the Licensed Rights in
164 | all media and formats whether now known or hereafter created,
165 | and to make technical modifications necessary to do so. The
166 | Licensor waives and/or agrees not to assert any right or
167 | authority to forbid You from making technical modifications
168 | necessary to exercise the Licensed Rights, including
169 | technical modifications necessary to circumvent Effective
170 | Technological Measures. For purposes of this Public License,
171 | simply making modifications authorized by this Section 2(a)
172 | (4) never produces Adapted Material.
173 |
174 | 5. Downstream recipients.
175 |
176 | a. Offer from the Licensor -- Licensed Material. Every
177 | recipient of the Licensed Material automatically
178 | receives an offer from the Licensor to exercise the
179 | Licensed Rights under the terms and conditions of this
180 | Public License.
181 |
182 | b. No downstream restrictions. You may not offer or impose
183 | any additional or different terms or conditions on, or
184 | apply any Effective Technological Measures to, the
185 | Licensed Material if doing so restricts exercise of the
186 | Licensed Rights by any recipient of the Licensed
187 | Material.
188 |
189 | 6. No endorsement. Nothing in this Public License constitutes or
190 | may be construed as permission to assert or imply that You
191 | are, or that Your use of the Licensed Material is, connected
192 | with, or sponsored, endorsed, or granted official status by,
193 | the Licensor or others designated to receive attribution as
194 | provided in Section 3(a)(1)(A)(i).
195 |
196 | b. Other rights.
197 |
198 | 1. Moral rights, such as the right of integrity, are not
199 | licensed under this Public License, nor are publicity,
200 | privacy, and/or other similar personality rights; however, to
201 | the extent possible, the Licensor waives and/or agrees not to
202 | assert any such rights held by the Licensor to the limited
203 | extent necessary to allow You to exercise the Licensed
204 | Rights, but not otherwise.
205 |
206 | 2. Patent and trademark rights are not licensed under this
207 | Public License.
208 |
209 | 3. To the extent possible, the Licensor waives any right to
210 | collect royalties from You for the exercise of the Licensed
211 | Rights, whether directly or through a collecting society
212 | under any voluntary or waivable statutory or compulsory
213 | licensing scheme. In all other cases the Licensor expressly
214 | reserves any right to collect such royalties, including when
215 | the Licensed Material is used other than for NonCommercial
216 | purposes.
217 |
218 |
219 | Section 3 -- License Conditions.
220 |
221 | Your exercise of the Licensed Rights is expressly made subject to the
222 | following conditions.
223 |
224 | a. Attribution.
225 |
226 | 1. If You Share the Licensed Material, You must:
227 |
228 | a. retain the following if it is supplied by the Licensor
229 | with the Licensed Material:
230 |
231 | i. identification of the creator(s) of the Licensed
232 | Material and any others designated to receive
233 | attribution, in any reasonable manner requested by
234 | the Licensor (including by pseudonym if
235 | designated);
236 |
237 | ii. a copyright notice;
238 |
239 | iii. a notice that refers to this Public License;
240 |
241 | iv. a notice that refers to the disclaimer of
242 | warranties;
243 |
244 | v. a URI or hyperlink to the Licensed Material to the
245 | extent reasonably practicable;
246 |
247 | b. indicate if You modified the Licensed Material and
248 | retain an indication of any previous modifications; and
249 |
250 | c. indicate the Licensed Material is licensed under this
251 | Public License, and include the text of, or the URI or
252 | hyperlink to, this Public License.
253 |
254 | For the avoidance of doubt, You do not have permission under
255 | this Public License to Share Adapted Material.
256 |
257 | 2. You may satisfy the conditions in Section 3(a)(1) in any
258 | reasonable manner based on the medium, means, and context in
259 | which You Share the Licensed Material. For example, it may be
260 | reasonable to satisfy the conditions by providing a URI or
261 | hyperlink to a resource that includes the required
262 | information.
263 |
264 | 3. If requested by the Licensor, You must remove any of the
265 | information required by Section 3(a)(1)(A) to the extent
266 | reasonably practicable.
267 |
268 |
269 | Section 4 -- Sui Generis Database Rights.
270 |
271 | Where the Licensed Rights include Sui Generis Database Rights that
272 | apply to Your use of the Licensed Material:
273 |
274 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right
275 | to extract, reuse, reproduce, and Share all or a substantial
276 | portion of the contents of the database for NonCommercial purposes
277 | only and provided You do not Share Adapted Material;
278 |
279 | b. if You include all or a substantial portion of the database
280 | contents in a database in which You have Sui Generis Database
281 | Rights, then the database in which You have Sui Generis Database
282 | Rights (but not its individual contents) is Adapted Material; and
283 |
284 | c. You must comply with the conditions in Section 3(a) if You Share
285 | all or a substantial portion of the contents of the database.
286 |
287 | For the avoidance of doubt, this Section 4 supplements and does not
288 | replace Your obligations under this Public License where the Licensed
289 | Rights include other Copyright and Similar Rights.
290 |
291 |
292 | Section 5 -- Disclaimer of Warranties and Limitation of Liability.
293 |
294 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
295 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
296 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
297 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
298 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
299 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
300 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
301 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
302 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
303 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
304 |
305 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
306 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
307 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
308 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
309 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
310 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
311 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
312 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
313 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
314 |
315 | c. The disclaimer of warranties and limitation of liability provided
316 | above shall be interpreted in a manner that, to the extent
317 | possible, most closely approximates an absolute disclaimer and
318 | waiver of all liability.
319 |
320 |
321 | Section 6 -- Term and Termination.
322 |
323 | a. This Public License applies for the term of the Copyright and
324 | Similar Rights licensed here. However, if You fail to comply with
325 | this Public License, then Your rights under this Public License
326 | terminate automatically.
327 |
328 | b. Where Your right to use the Licensed Material has terminated under
329 | Section 6(a), it reinstates:
330 |
331 | 1. automatically as of the date the violation is cured, provided
332 | it is cured within 30 days of Your discovery of the
333 | violation; or
334 |
335 | 2. upon express reinstatement by the Licensor.
336 |
337 | For the avoidance of doubt, this Section 6(b) does not affect any
338 | right the Licensor may have to seek remedies for Your violations
339 | of this Public License.
340 |
341 | c. For the avoidance of doubt, the Licensor may also offer the
342 | Licensed Material under separate terms or conditions or stop
343 | distributing the Licensed Material at any time; however, doing so
344 | will not terminate this Public License.
345 |
346 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
347 | License.
348 |
349 |
350 | Section 7 -- Other Terms and Conditions.
351 |
352 | a. The Licensor shall not be bound by any additional or different
353 | terms or conditions communicated by You unless expressly agreed.
354 |
355 | b. Any arrangements, understandings, or agreements regarding the
356 | Licensed Material not stated herein are separate from and
357 | independent of the terms and conditions of this Public License.
358 |
359 |
360 | Section 8 -- Interpretation.
361 |
362 | a. For the avoidance of doubt, this Public License does not, and
363 | shall not be interpreted to, reduce, limit, restrict, or impose
364 | conditions on any use of the Licensed Material that could lawfully
365 | be made without permission under this Public License.
366 |
367 | b. To the extent possible, if any provision of this Public License is
368 | deemed unenforceable, it shall be automatically reformed to the
369 | minimum extent necessary to make it enforceable. If the provision
370 | cannot be reformed, it shall be severed from this Public License
371 | without affecting the enforceability of the remaining terms and
372 | conditions.
373 |
374 | c. No term or condition of this Public License will be waived and no
375 | failure to comply consented to unless expressly agreed to by the
376 | Licensor.
377 |
378 | d. Nothing in this Public License constitutes or may be interpreted
379 | as a limitation upon, or waiver of, any privileges and immunities
380 | that apply to the Licensor or You, including from the legal
381 | processes of any jurisdiction or authority.
382 |
383 | =======================================================================
384 |
385 | Creative Commons is not a party to its public
386 | licenses. Notwithstanding, Creative Commons may elect to apply one of
387 | its public licenses to material it publishes and in those instances
388 | will be considered the "Licensor". The text of the Creative Commons
389 | public licenses is dedicated to the public domain under the CC0 Public
390 | Domain Dedication. Except for the limited purpose of indicating that
391 | material is shared under a Creative Commons public license or as
392 | otherwise permitted by the Creative Commons policies published at
393 | creativecommons.org/policies, Creative Commons does not authorize the
394 | use of the trademark "Creative Commons" or any other trademark or logo
395 | of Creative Commons without its prior written consent including,
396 | without limitation, in connection with any unauthorized modifications
397 | to any of its public licenses or any other arrangements,
398 | understandings, or agreements concerning use of licensed material. For
399 | the avoidance of doubt, this paragraph does not form part of the
400 | public licenses.
401 |
402 | Creative Commons may be contacted at creativecommons.org.
403 |
404 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
8 |
9 |
14 |
15 | PULP-Detector
16 |
17 | Lorenzo Lamberti, Luca Bompani, Victor Javier Kartsch, Manuele Rusci, Daniele Palossi, Luca Benini.
18 |
19 |
20 | Copyright (C) 2023 University of Bologna, ETH Zürich. All rights reserved.
21 |
22 |
23 |
30 |
31 |
32 |
33 |
34 |
35 | **Video:** [YouTube](https://youtu.be/BTin8g0nyko)
36 |
37 | **Citing:** *"Bio-inspired Autonomous Exploration Policies with CNN-based Object Detection on Nano-drones"* ([IEEExplore](https://ieeexplore.ieee.org/abstract/document/10137154), [arXiv](https://arxiv.org/abs/2301.12175))
38 |
39 | ~~~~
40 | @INPROCEEDINGS{pulp_detector,
41 | author={Lamberti, Lorenzo and Bompani, Luca and Kartsch, Victor Javier and Rusci, Manuele and Palossi, Daniele and Benini, Luca},
42 | booktitle={2023 Design, Automation \& Test in Europe Conference \& Exhibition (DATE)},
43 | title={{{Bio-inspired Autonomous Exploration Policies with CNN-based Object Detection on Nano-drones}}},
44 | year={2023},
45 | volume={},
46 | number={},
47 | pages={1-6},
48 | doi={10.23919/DATE56975.2023.10137154}}
49 | ~~~~
50 |
51 | ## 1. Introduction
52 | ### What is PULP-Detector ?
53 | **PULP-Detector** is a nano-drone system that strives for both maximizing the exploration of a room while performing visual object detection.
54 | The Exploration policies as implemented as lightweight and bio-inpired state machines.
55 | The object detection CNN is based on the MobilenetV2-SSD network.
56 | The drone performs obstacle avoidance thanks to Time-of-flight sensors.
57 | The drone is completely autonomous -- **no human operator, no ad-hoc external signals, and no remote laptop!**
58 |
59 | - **Software component:**
60 | Object detection CNN: is a shallow convolutional neural network (CNN) composed of Mobilenet-v2 backbone plus the SSD (single-shot detector) heads.
61 | It runs at 1.6-4.3 FPS onboard.
62 |
63 | - **Hardware components:**
64 | The hardware soul of PULP-Detector is an ultra-low power visual navigation module embodied by a pluggable PCB (called *shield* or *deck*) for the [Crazyflie 2.0](https://www.bitcraze.io/crazyflie-2/)/[2.1](https://www.bitcraze.io/crazyflie-2-1/) nano-drone. The shield features a Parallel Ultra-Low-Power (PULP) GAP8 System-on-Chip (SoC) from GreenWaves Technologies (GWT), an ultra-low power HiMax HBM01 camera, and off-chip Flash/DRAM memory; This pluggable PCB has evolved over time, from the [*PULP-Shield*](https://ieeexplore.ieee.org/document/8715489) , the first custom-made prototype version developed at ETH Zürich, and its commercial off-the-shelf evolution, the [*AI-deck*](https://store.bitcraze.io/products/ai-deck).
65 |
66 |
67 |
68 | Summary of characteristics:
69 |
70 | - **Hardware:** [*AI-deck*](https://store.bitcraze.io/products/ai-deck)
71 |
72 | - **Deep learning framework:** Tensorflow 1.15 ([Tensorflow Object detection API](??))
73 |
74 | - **Quantization**: fixed-point 8 bits, fully automated with [NNTool](https://greenwaves-technologies.com/sdk-manuals/nn_quick_start_guide/)
75 |
76 | - **Deployment**: fully automated with [AutoTiler](https://greenwaves-technologies.com/sdk-manuals/nn_quick_start_guide/)
77 |
78 | We release here, as open source, all our code, hardware designs, datasets, and trained networks.
79 |
80 | ## Setup
81 |
82 | Clone recursively to download all submodules
83 |
84 | ```
85 | git clone git@github.com:pulp-platform/pulp-detector.git --recursive
86 | ```
87 |
88 |
89 | ## PULP-Platforms refs
90 |
91 | [PULP Platform Youtube](https://www.youtube.com/c/PULPPlatform) channel (subscribe it!)
92 |
93 | [PULP Platform Website](https://pulp-platform.org/).
94 |
95 |
96 |
97 |
98 |
99 | ## Licenses
100 |
101 | All files under:
102 | * `./crazyflie_app/random-following-spiral`
103 | * `./crazyflie_app/rotate`
104 | * `./gap8_app/SSD_tin_can_bottle.c`
105 |
106 | are original and licensed under Apache-2.0, see [LICENSE.Apache.md](LICENSE.Apache.md).
107 |
108 |
109 | The images used for the training and testing need to be downloaded and copied into the following folder:
110 | * `dataset/`
111 |
112 | all the files can be downloaded from this [link](https://zenodo.org/records/10992237) and are under the Creative Commons Attribution Non Commercial No Derivatives 4.0 International see [LICENSE.CC.md](LICENSE.CC.md)
113 |
114 |
115 |
116 | All files under:
117 | * `./training/`
118 |
119 | Are from [Tensorflow](https://github.com/tensorflow/models/), released under Apache-2.0 License, see [LICENSE.Apache.md](LICENSE.Apache.md).
120 |
121 |
122 | All files under:
123 | * `./gap8_app/` (except for `./gap8_app/SSD_tin_can_bottle.c`)
124 |
125 | Are from [GreenWaves Technologies](https://github.com/GreenWaves-Technologies), released under a BSD License, see [LICENSE.BSD.md](LICENSE.BSD.md)
126 |
127 |
128 | The external modules under:
129 | * `./viewer-pulp-detector/`
130 | * `./crazyflie_app/crazyflie-firmware`
131 | * `./crazyflie_app/crazyflie-firmware-modified`
132 |
133 | Are from [Bitcraze](https://github.com/bitcraze), released under a GPL-3.0 license.
134 |
135 |
136 |
--------------------------------------------------------------------------------
/crazyflie_app/Makefile.common:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------#
2 | # Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland. #
3 | # All rights reserved. #
4 | # #
5 | # Licensed under the Apache License, Version 2.0 (the "License"); #
6 | # you may not use this file except in compliance with the License. #
7 | # See LICENSE.apache.md in the top directory for details. #
8 | # You may obtain a copy of the License at #
9 | # #
10 | # http://www.apache.org/licenses/LICENSE-2.0 #
11 | # #
12 | # Unless required by applicable law or agreed to in writing, software #
13 | # distributed under the License is distributed on an "AS IS" BASIS, #
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
15 | # See the License for the specific language governing permissions and #
16 | # limitations under the License. #
17 | # #
18 | # Authors: #
19 | # Lorenzo Lamberti #
20 | # Luca Bompani #
21 | # Date: 01.04.2023 #
22 | #-------------------------------------------------------------------------------#
23 |
24 | # Common part of Makefile
25 |
26 | CRAZYFLIE_BASE=$(CURDIR)/../crazyflie-firmware
27 | CRAZYFLIE_MODIFIED=$(CURDIR)/../crazyflie-firmware
28 |
29 | # Some firmware changes are required so the original files from the crazyflie-firmware are excluded
30 | # and the modified ones are added (add one line for each)
31 | SRC_FILES := $(filter-out $(CRAZYFLIE_BASE)/src/deck/drivers/src/multiranger.c, $(SRC_FILES))
32 | # add folders
33 | VPATH += ../crazyflie-firmware-modified
34 |
35 | include $(CRAZYFLIE_BASE)/Makefile
36 |
37 | TMPCFLAGS := $(CFLAGS)
38 | CFLAGS = $(filter-out -Wdouble-promotion -Werror, $(TMPCFLAGS)) # ignore conversion float to double warning
39 |
--------------------------------------------------------------------------------
/crazyflie_app/README.md:
--------------------------------------------------------------------------------
1 | # Exploration Policies
2 |
3 | Here you can find the code running on the STM32 of the Crazyflie 2.1.
4 |
5 | The crazyflie application runs one of the four exploration policies described in the paper
6 |
7 |
8 |
9 | ## Instructions
10 |
11 | To flash the stm32
12 |
13 | Select the exploration policy
14 |
15 | ```
16 | cd random-following-spiral/
17 | or
18 | cd rotate/
19 | ```
20 |
21 | flash the code on the STM32
22 |
23 |
24 | ```
25 | make clean all cload
26 | ```
27 |
28 | Then you are ready to take off!
29 |
30 | * open the CF client `cfclient`
31 | * go to the parameters window
32 | * set the parameter `START_STOP.fly` to 1
33 |
34 | The drone will now start exploring the room !
35 |
36 |
37 | ## Setup
38 |
39 |
--------------------------------------------------------------------------------
/crazyflie_app/random-following-spiral/Makefile:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------#
2 | # Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland. #
3 | # All rights reserved. #
4 | # #
5 | # Licensed under the Apache License, Version 2.0 (the "License"); #
6 | # you may not use this file except in compliance with the License. #
7 | # See LICENSE.apache.md in the top directory for details. #
8 | # You may obtain a copy of the License at #
9 | # #
10 | # http://www.apache.org/licenses/LICENSE-2.0 #
11 | # #
12 | # Unless required by applicable law or agreed to in writing, software #
13 | # distributed under the License is distributed on an "AS IS" BASIS, #
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
15 | # See the License for the specific language governing permissions and #
16 | # limitations under the License. #
17 | # #
18 | # Authors: #
19 | # Lorenzo Lamberti #
20 | # Luca Bompani #
21 | # Date: 01.04.2023 #
22 | #-------------------------------------------------------------------------------#
23 |
24 | # enable app support
25 | APP=1
26 | APP_STACKSIZE=500
27 | SRC_DIR = src
28 | INC_DIR = inc
29 |
30 | VPATH += $(SRC_DIR)/
31 | SOURCES_LOCAL = $(wildcard $(SRC_DIR)/*.c)
32 | OBJECTS_LOCAL := $(SOURCES_LOCAL:.c=.o)
33 | PROJ_OBJ += $(notdir $(OBJECTS_LOCAL))
34 | INCLUDES += -I$(INC_DIR)
35 |
36 | include ../Makefile.common
37 |
--------------------------------------------------------------------------------
/crazyflie_app/random-following-spiral/current_platform.mk:
--------------------------------------------------------------------------------
1 | PLATFORM=cf2
2 |
--------------------------------------------------------------------------------
/crazyflie_app/random-following-spiral/inc/config_main.h:
--------------------------------------------------------------------------------
1 | /*-----------------------------------------------------------------------------
2 | Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland.
3 | All rights reserved.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | See LICENSE.apache.md in the top directory for details.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 | File: config_main.c
19 | Authors:
20 | Lorenzo Lamberti
21 | Luca Bompani
22 | Manuele Rusci
23 | Daniele Palossi
24 | Date: 01.04.2023
25 | -------------------------------------------------------------------------------*/
26 |
27 | // Flight
28 | #define FORWARD_VELOCITY 0.10f // Max forward speed [m/s]. Default: 1.0f
29 | #define MAX_SIDE_SPEED 0.20f // Max forward speed [m/s]. Default: 1.0f
30 | #define TARGET_H 0.50f // Target height for drone's flight [m]. Default: 0.5f
31 |
32 | // Policy
33 | #define CLOCKWISE 1
34 |
35 | // SPINNING
36 | #define SPIN_TIME 1500.0 // [ms]
37 | #define SPIN_YAW_RATE 90.0 // [deg/s]
38 | #define SPIN_ANGLE 180.0 // [deg]
39 | #define RANDOM_SPIN_ANGLE 90.0 // [deg] add randomness to SPIN_ANGLE +/- RANDOM_SPIN_ANGLE
40 |
41 | // TOF
42 | #define TOF_FRONT_DIST_THRESHOLD 1000 // Target distance from obastacle [mm]. Default: 400.0f
43 | #define SIDE_DISTANCE 600.0f // Target distance from side walls [mm]. Default: 400.0f
44 | #define SIDE_TOLERANCE 100.0f // Target distance from side walls [mm]. Default: 100.0f
45 | #define TOF_STATE_CHECK 0 // 0 or 1 to check tof.state -> enbles the error counter for invalid measurements
46 | #define PROCESS_TOF_FLAGS 1 // process tof.state==2 into a distance =4 meters
47 |
48 | // TOF counters
49 | #define FRONT_COUNTERS_THRESHOLD 5 //
50 | #define SIDE_COUNTERS_THRESHOLD 5 //
51 | #define ERROR_COUNTERS_THRESHOLD 5 //
52 |
53 | /** THRESH_SIDE_INDENTATION: this is the threshold distance for wall following.
54 | * If the right/left sensors measure a free path of > side_dist+THRESH_SIDE_INDENTATION,
55 | * then there is an indentation on the environment and we can turn in that direction
56 | */
57 | // Wallfollowing / Maze specific
58 | #define THRESH_SIDE_INDENTATION 500 // [mm]
59 |
60 | // Maze specific
61 | #define INVERT_MAZE_AFTER_N_LAPS 4 // laps
--------------------------------------------------------------------------------
/crazyflie_app/random-following-spiral/src/main_app.c:
--------------------------------------------------------------------------------
1 | /*-----------------------------------------------------------------------------
2 | Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland.
3 | All rights reserved.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | See LICENSE.apache.md in the top directory for details.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 | File: main_app.c
19 | Authors:
20 | Lorenzo Lamberti
21 | Luca Bompani
22 | Manuele Rusci
23 | Daniele Palossi
24 | Date: 01.04.2023
25 | -------------------------------------------------------------------------------*/
26 |
27 | #include
28 | #include
29 | #include
30 |
31 | #include "app.h"
32 | #include "FreeRTOS.h"
33 | #include "system.h"
34 | #include "task.h"
35 | #include "debug.h"
36 | #include "stabilizer_types.h"
37 | #include "estimator_kalman.h"
38 | #include "commander.h"
39 | #include "log.h"
40 | #include "param.h"
41 | #include
42 | #include "config_main.h"
43 | #include
44 |
45 | /* --------------- GUI PARAMETERS --------------- */
46 |
47 | // START / STOP mission parameter
48 | uint8_t fly = 0; // Takeoff/landing command (GUI parameter)
49 | uint8_t policy = 0; //
50 | uint8_t invert_maze_after_n_laps = INVERT_MAZE_AFTER_N_LAPS;
51 | // Flight
52 | float forward_vel = FORWARD_VELOCITY; // [m/s]
53 | float flying_height = TARGET_H; // [m]
54 | float max_side_speed = MAX_SIDE_SPEED; // [m/s]
55 |
56 | // Manouver: Spin -- parameters
57 | float spin_time = SPIN_TIME; // [ms]
58 | float spin_yawrate = SPIN_YAW_RATE; // [deg/s]
59 | float spin_angle = SPIN_ANGLE; // [deg]
60 | float max_rand_angle = RANDOM_SPIN_ANGLE; // [deg]
61 |
62 | // Tof parameters
63 | int8_t tof_state_check = TOF_STATE_CHECK; // 0 or 1
64 | int16_t tof_front_dist_th = TOF_FRONT_DIST_THRESHOLD; // [mm]
65 | int16_t side_distance = SIDE_DISTANCE; // [mm]
66 | int16_t side_tolerance = SIDE_TOLERANCE; // [mm]
67 | int8_t clockwise = CLOCKWISE;
68 |
69 | // My parameters for enabling/disabling some parts ofss code. 1=Active, 0=Non active
70 | uint8_t debug = 9; // activate debug prints
71 | uint8_t motors_on = 1; // activate motors
72 | uint8_t slow_down_while_loop = 0; // slows down while(1) loop
73 |
74 | /* --------------- GLOBAL VARIABLES --------------- */
75 |
76 | // Flight
77 | float side_speed = 0.0;
78 | // -- Flags
79 | uint8_t landed = 1; // Flag for indicating whether the drone landed
80 | // -- Counters
81 | int8_t front_err_counter = 0; // front errors on ToF status
82 | int8_t front_distance_counter = 0; // front value < threshold
83 | int8_t side_distance_counter = 0; // side value > threshold
84 | // -- Counters thresholds
85 | int8_t front_counter_thresh = FRONT_COUNTERS_THRESHOLD; // triggers stop and spin
86 | int8_t side_counter_thresh = SIDE_COUNTERS_THRESHOLD; // triggers stop and spin
87 | int8_t err_counter_thresh = ERROR_COUNTERS_THRESHOLD; // triggers stop and spin
88 | // -- ToF
89 | int8_t en_process_tof_flags = PROCESS_TOF_FLAGS;
90 | int8_t en_indent = 1;
91 | int16_t valFront, valRight, valLeft;
92 | uint8_t stateFront, stateRight, stateLeft;
93 | logVarId_t idFrontVal, idFrontState, idRightVal, idRightState, idLeftVal, idLeftState;
94 | // -- State Estimation
95 | logVarId_t idX, idY, idYaw;
96 |
97 | /* --------------- DEFINES --------------- */
98 |
99 |
100 | /* --------------- STRUCTURES --------------- */
101 | typedef struct tof_s{
102 | int16_t val;
103 | int8_t state;
104 | }tof_t;
105 | typedef struct multiranger_s{
106 | tof_t front;
107 | tof_t right;
108 | tof_t left;
109 | tof_t back;
110 | } multiranger_t;
111 |
112 | multiranger_t multiranger;
113 |
114 | /* -------------- FUNCTION DEFINITION -------------- */
115 | void takeoff(float height);
116 | void flight_loop();
117 | void rotate();
118 | void land(void);
119 | void velocity_setpoint(setpoint_t* setpoint, float x_vel, float y_vel, float z_pos, float yaw_att);
120 | void headToPosition(float x, float y, float z, float yaw);
121 | void headToVelocity (float x, float y, float z, float yaw);
122 | void debug_prints();
123 |
124 | /* ----------------------------------------------------------------------- */
125 | /* ------------------------------ FUNCTIONS ------------------------------ */
126 | /* ----------------------------------------------------------------------- */
127 |
128 | /* --------------- Setpoint Utils --------------- */
129 |
130 | setpoint_t fly_setpoint;
131 | setpoint_t create_velocity_setpoint(float x_vel, float y_vel, float z_pos, float yaw_rate)
132 | {
133 | setpoint_t setpoint;
134 | memset(&setpoint, 0, sizeof(setpoint_t));
135 | setpoint.mode.x = modeVelocity;
136 | setpoint.mode.y = modeVelocity;
137 | setpoint.mode.z = modeAbs;
138 | setpoint.mode.yaw = modeVelocity;
139 | setpoint.velocity.x = x_vel;
140 | setpoint.velocity.y = y_vel;
141 | setpoint.position.z = z_pos;
142 | setpoint.attitude.yaw = yaw_rate;
143 | setpoint.velocity_body = true;
144 | return setpoint;
145 | }
146 |
147 | void headToVelocity(float x_vel, float y_vel, float z_pos, float yaw_rate)
148 | {
149 | fly_setpoint = create_velocity_setpoint(x_vel, y_vel, z_pos, yaw_rate);
150 | if (motors_on) commanderSetSetpoint(&fly_setpoint, 3);
151 | }
152 |
153 | setpoint_t create_position_setpoint(float x, float y, float z, float yaw)
154 | {
155 | setpoint_t setpoint;
156 | memset(&setpoint, 0, sizeof(setpoint_t));
157 | setpoint.mode.x = modeAbs;
158 | setpoint.mode.y = modeAbs;
159 | setpoint.mode.z = modeAbs;
160 | setpoint.mode.yaw = modeAbs;
161 | setpoint.position.x = x;
162 | setpoint.position.y = y;
163 | setpoint.position.z = z;
164 | setpoint.attitude.yaw = yaw;
165 | return setpoint;
166 | }
167 |
168 | void headToPosition(float x, float y, float z, float yaw)
169 | {
170 | fly_setpoint = create_position_setpoint(x, y, z, yaw);
171 | if (motors_on) commanderSetSetpoint(&fly_setpoint, 3);
172 | }
173 |
174 |
175 | /* --------------- Takeoff and Landing --------------- */
176 |
177 | void takeoff(float height)
178 | {
179 | point_t pos;
180 | memset(&pos, 0, sizeof(pos));
181 | estimatorKalmanGetEstimatedPos(&pos);
182 |
183 | // first step: taking off gradually, from a starting height of 0.2 to the desired height
184 | int endheight = (int)(100*(height-0.2f));
185 | for(int i=0; i100*FINAL_LANDING_HEIGHT; i--) {
209 | headToPosition(pos.x, pos.y, (float)i / 100.0f, current_yaw);
210 | vTaskDelay(20);
211 | }
212 | vTaskDelay(200);
213 | }
214 |
215 | /* --------------- ToF Utilities --------------- */
216 |
217 | multiranger_t get_tof_state(multiranger_t multiranger){
218 | multiranger.front.state = logGetInt(idFrontState);
219 | multiranger.right.state = logGetInt(idRightState);
220 | multiranger.left.state = logGetInt(idLeftState);
221 | // DEBUG_PRINT("\n [get_tof_state]: front.state %d, right.state %d, left.state %d \n", multiranger.front.state, multiranger.right.state, multiranger.left.state);
222 | return multiranger;
223 | }
224 |
225 | multiranger_t get_tof_val(multiranger_t multiranger){
226 | multiranger.front.val = logGetInt(idFrontVal);
227 | multiranger.right.val = logGetInt(idRightVal);
228 | multiranger.left.val = logGetInt(idLeftVal);
229 | // DEBUG_PRINT("\n [get_tof_measurement]: front.val %d, right.val %d, left.val %d \n", multiranger.front.val, multiranger.right.val, multiranger.left.val);
230 | return multiranger;
231 | }
232 |
233 | tof_t process_tof_flags(tof_t ToF){
234 | // usually, tof.state == 2 means that the distance is > 4m, but the measurements are very unstable. We use this flag to overwrite the tof measurement
235 | if(ToF.state==2){
236 | ToF.val=4000;
237 | }
238 | return ToF;
239 | }
240 |
241 |
242 | multiranger_t get_tof_measurement(multiranger_t multiranger){
243 | multiranger = get_tof_state(multiranger);
244 | multiranger = get_tof_val(multiranger);
245 | if (en_process_tof_flags==1){
246 | multiranger.front = process_tof_flags(multiranger.front);
247 | multiranger.front = process_tof_flags(multiranger.front);
248 | multiranger.front = process_tof_flags(multiranger.front);
249 | }
250 | return multiranger;
251 | }
252 |
253 |
254 | int8_t ToF_status_isvalid(tof_t ToF, int8_t check_ToF_status){
255 | /**
256 | * returns 1 if the current measurement is valid, 0 if it is not valid
257 | * - flag=0 or flag=2 are considered valid
258 | * Variables:
259 | * - check_status: setting check_status to zero always returns 1, so that we consider any measurement as valid
260 | * - ToF: structure of current tof [front, right, left, back, up]
261 | */
262 |
263 | // if (debug==8) DEBUG_PRINT("state is %d\n", ToF.state);
264 |
265 | // don't check ToF error status -- always returns current measurement as correct
266 | if (check_ToF_status == 0){
267 | return 1; // valid
268 | }
269 |
270 | // check ToF error status: return 1 for valid, return 0 for invalid
271 | switch(ToF.state){
272 | case 0 : // VL53L1_RANGESTATUS_RANGE_VALID: Ranging measurement is valid
273 | return 1;
274 | case 1 : // VL53L1_RANGESTATUS_SIGMA_FAIL: Raised if sigma estimator check is above the internal defined threshold
275 | return 1;
276 | case 2 : // VL53L1_RANGESTATUS_SIGNAL_FAIL: Raised if signal value is below the internal defined threshold
277 | return 1;
278 | case 3 : // VL53L1_RANGESTATUS_RANGE_VALID_MIN_RANGE_CLIPPED: Target is below minimum detection threshold.
279 | return 1;
280 | case 4 : // VL53L1_RANGESTATUS_OUTOFBOUNDS_FAIL: Raised when phase is out of bounds
281 | return 1;
282 | case 5 : // VL53L1_RANGESTATUS_HARDWARE_FAIL: Raised in case of HW or VCSEL failure
283 | return 0;
284 | case 6 : // VL53L1_RANGESTATUS_RANGE_VALID_NO_WRAP_CHECK_FAIL: The Range is valid but the wraparound check has not been done.
285 | return 0;
286 | case 7 : // VL53L1_RANGESTATUS_WRAP_TARGET_FAIL: Wrapped target, not matching phases
287 | return 1;
288 | case 8 : // VL53L1_RANGESTATUS_PROCESSING_FAIL: Internal algorithm underflow or overflow
289 | return 1;
290 | case 14 : // VL53L1_RANGESTATUS_RANGE_INVALID: The reported range is invalid
291 | return 0;
292 | default:
293 | return 1;
294 | }
295 | }
296 |
297 | uint8_t count_statusError(tof_t ToF, int8_t error_counter, int8_t state_check){
298 | /**
299 | * Note: setting state_check==1 keeps the counter to 0 no matter what!
300 | */
301 |
302 | // NO STATUS ERROR: reset error counter
303 | if (ToF_status_isvalid(ToF,state_check)==1){
304 | error_counter = 0;
305 | }
306 | // STATUS ERROR: increase error counter
307 | else{
308 | error_counter++;
309 | }
310 | return error_counter;
311 | }
312 |
313 | uint8_t count_ToF_obstacle_detection(tof_t ToF, int16_t distance_thresh, int8_t tof_counter, int8_t state_check){
314 |
315 | // if (debug==8) DEBUG_PRINT("state is_valid %d, ToF.val %d, thresh %d, tof_counter %d\n", ToF_status_isvalid(ToF,state_check), ToF.val, distance_thresh, tof_counter);
316 | if (ToF_status_isvalid(ToF,state_check)==1){
317 | // if (debug==5) DEBUG_PRINT("[count_ToF_obstacle_detection] ToF.val %d,\t distance_thresh %d,\t tof_counter %d\n", ToF.val, distance_thresh, tof_counter);
318 | // check if front measurement is < thresh: increase the counter
319 |
320 | if (ToF.valdistance_thresh){
341 | // DEBUG_PRINT("[count_ToF_free_path] tof_counter++!\n");
342 | tof_counter++;
343 | }
344 | else{
345 | // DEBUG_PRINT("[count_ToF_free_path] tof_counter=0!\n");
346 | tof_counter=0;
347 | }
348 | }
349 | return tof_counter;
350 | }
351 |
352 | /* --------------- Other Manouvers --------------- */
353 |
354 | void spin_in_place_t_cost(float angle, float time){
355 | /*
356 | angle [deg]: given the current orientation, spin by "angle" degrees in place;
357 | time [ms]: how much time to perform the entire manuever --> impacts the spinning speed;
358 | */
359 |
360 | float current_yaw; // fetch current yaw self estimation
361 | float t_steps = 1; // [ms] time steps
362 | float n_steps = (time/(t_steps)); // number of steps
363 | float r_steps = (angle/n_steps); // angle steps
364 | float new_yaw; // new yaw given to the controller. This parameter is updated by the for loop
365 |
366 | // access self estimation
367 | point_t pos;
368 | memset(&pos, 0, sizeof(pos));
369 | estimatorKalmanGetEstimatedPos(&pos);
370 | current_yaw = logGetFloat(logGetVarId("stateEstimate", "yaw"));
371 | if (debug==2) DEBUG_PRINT("\n\n[spin_in_place_t_cost]\n current_yaw %f, t_steps %f, n_steps %f, r_steps %f\n\n", current_yaw, t_steps, n_steps, r_steps);
372 |
373 | // perform manuever
374 | for (int i = 0; i <= n_steps; i++) {
375 | new_yaw = (i*r_steps) + current_yaw;
376 | // if (debug==3) DEBUG_PRINT("%f\n",(double)new_yaw);
377 | headToPosition(pos.x, pos.y, pos.z, new_yaw);
378 | vTaskDelay(M2T(t_steps));
379 | }
380 | }
381 |
382 | void spin_in_place_yawrate_cost(float angle, float yaw_rate){
383 | /*
384 | angle [deg] : given the current orientation, spin by "angle" degrees in place;
385 | yaw_rate [deg/s]: constant yaw rate for rotation --> impacts the spinning time;
386 | */
387 | float time = abs((angle/yaw_rate) * 1000); // [ms]
388 | if (debug==2) DEBUG_PRINT("\n\n [spin_in_place_yawrate_cost]\n angle %f, yaw_rate %f, time %f\n\n", angle, yaw_rate, time);
389 | spin_in_place_t_cost(angle, time);
390 | }
391 |
392 | void spin_right(float yaw_rate){
393 | float angle = -90.0;
394 | spin_in_place_yawrate_cost(angle, yaw_rate);
395 | }
396 |
397 | void spin_left(float yaw_rate){
398 | float angle = +90.0;
399 | spin_in_place_yawrate_cost(angle, yaw_rate);
400 | }
401 |
402 |
403 | void spin_in_place_random(float starting_random_angle, float yaw_rate, float rand_range){
404 | /**
405 | * spin to a random angle. The random angle is chosen between starting_random_angle +/- rand_range
406 | */
407 | // calculate a random spinning angle
408 | float random_angle = starting_random_angle - rand_range + (2*rand_range) * (float)rand()/(float)(RAND_MAX);
409 | // if the random angle is >180°, then spin to the opposite side
410 | if (random_angle > 180){
411 | random_angle = -(360 - random_angle);
412 | }
413 | if (debug==2) DEBUG_PRINT("\n\n [spin_in_place_random]:\n starting_random_angle %f, yaw_rate %f, rand_range %f, random_angle %f", starting_random_angle, yaw_rate, rand_range, random_angle);
414 | spin_in_place_yawrate_cost(random_angle, yaw_rate);
415 |
416 | }
417 |
418 |
419 | float keep_safe_side_distance(multiranger_t multiranger, int16_t side_distance, float max_side_speed, int8_t state_check){
420 | float roll_left = max_side_speed;
421 | float roll_right = -max_side_speed;
422 | float output_side_speed = 0.0;
423 |
424 | // if we are too close to right walls:
425 | if (multiranger.right.val< side_distance && ToF_status_isvalid(multiranger.right, state_check) == 1){ // roll to left
426 | output_side_speed = roll_left;
427 | }
428 | // if we are too close to left walls:
429 | else if (multiranger.left.val< side_distance && ToF_status_isvalid(multiranger.left, state_check) == 1){ // roll to right
430 | output_side_speed = roll_right;
431 | }
432 | else{
433 | output_side_speed = 0.0; // go straight
434 | }
435 |
436 | return output_side_speed;
437 | }
438 |
439 | float keep_determined_side_distance(tof_t tof, int8_t left, int16_t side_distance, float max_side_speed, int8_t check_status){
440 | /**
441 | * left=1 -> keeps constant distance from left wall
442 | * left=0 -> keeps constant distance from right wall
443 | */
444 | // roll speed
445 | float roll_left = max_side_speed;
446 | float roll_right = -max_side_speed;
447 | float output_side_speed = 0.0;
448 | // tolerances
449 | // float tolerance = 20; // percentage [%]
450 | // int16_t side_tolerance = (int16_t)(tolerance*side_distance/100); // 10% of side_distance
451 |
452 | // keep constant distance from left wall
453 | if (left==1){
454 | // if we are too close to left walls:
455 | if (tof.val < (side_distance-side_tolerance) && ToF_status_isvalid(tof, check_status) == 1){ // roll to right
456 | output_side_speed = roll_right;
457 | }
458 | // if we are too far to left wall:
459 | else if (tof.val > (side_distance+side_tolerance) && ToF_status_isvalid(tof, check_status) == 1){ // roll to left
460 | output_side_speed = roll_left;
461 | }
462 | else{
463 | output_side_speed = 0.0; // go straight
464 | }
465 | }
466 |
467 | // keep constant distance from right wall
468 | if (left==0){
469 | // if we are too close to right wall:
470 | if (tof.val < (side_distance-side_tolerance) && ToF_status_isvalid(tof, check_status) == 1){ // roll to left
471 | output_side_speed = roll_left;
472 | }
473 | // if we are too far to right wall:
474 | else if (tof.val > (side_distance+side_tolerance) && ToF_status_isvalid(tof, check_status) == 1){ // roll to right
475 | output_side_speed = roll_right;
476 | }
477 | else{
478 | output_side_speed = 0.0; // go straight
479 | }
480 |
481 | }
482 | return output_side_speed;
483 | }
484 |
485 | /* --------------- Processing --------------- */
486 |
487 | float low_pass_filtering(float data_new, float data_old, float alpha)
488 | {
489 | float output;
490 | // Low pass filter the forward velocity
491 | output = (1.0f - alpha) * data_new + alpha * data_old;
492 | return output;
493 | }
494 |
495 |
496 | /* --------------- Check on front/side tofs --------------- */
497 |
498 | int8_t is_front_obstacle_detected(tof_t tof, int16_t front_dist_th, int8_t tof_state_check) {
499 |
500 | /* ---------- Count Front state errors and detections ---------- */
501 | front_distance_counter = count_ToF_obstacle_detection(tof, front_dist_th, front_distance_counter, tof_state_check);
502 | front_err_counter = count_statusError(tof, front_err_counter, tof_state_check);
503 | // DEBUG_PRINT("[is_front_obstacle_detected] front_err_counter %d,\t front_distance_counter %d\n", front_err_counter, front_distance_counter);
504 |
505 | /* ---------- we generate spin signal if errors/detectionr > threshold ---------- */
506 |
507 | if (front_distance_counter>front_counter_thresh){
508 | // DEBUG_PRINT("[is_front_obstacle_detected] counter thresh reached: front_distance_counter = 0\n");
509 | front_distance_counter = 0;
510 | return 1;
511 | }
512 | else if (front_err_counter > err_counter_thresh){
513 | // DEBUG_PRINT("[is_front_obstacle_detected] counter thresh reached: front_err_counter = 0;\n");
514 |
515 | front_err_counter = 0;
516 | return 1;
517 | }
518 | else{
519 | return 0;
520 | }
521 | }
522 |
523 | /** THRESH_SIDE_INDENTATION: this is the threshold distance for wall following.
524 | * If the right/left sensors measure a free path of > side_dist+THRESH_SIDE_INDENTATION,
525 | * then there is an indentation on the environment and we can turn in that direction
526 | */
527 | uint8_t is_side_empty(tof_t tof_wall, int16_t side_threshold, int8_t check_status){
528 |
529 | side_distance_counter = count_ToF_free_path(tof_wall, side_threshold, side_distance_counter, check_status);
530 | if(side_distance_counter>side_counter_thresh){
531 | if (debug==5) DEBUG_PRINT("Side ToF.val= %d -> has empty path -> spin 90°\n", tof_wall.val);
532 | side_distance_counter=0;
533 | return 1;
534 | }
535 | else{
536 | if (debug==5) DEBUG_PRINT("Side ToF.val= %d -> DOESNT have empty path -> go straight\n", tof_wall.val);
537 | return 0;
538 | }
539 | }
540 |
541 | /* ----------------------------------------------------------------------- */
542 | /* ----------------------------- Flight Loop ----------------------------- */
543 | /* ----------------------------------------------------------------------- */
544 |
545 | void random_policy()
546 | {
547 | /* --------------- Sensor acquisition (left & right) --------------- */
548 | multiranger = get_tof_measurement(multiranger);
549 | if (debug==3) DEBUG_PRINT("\nmultiranger: front.val %d,\t right.val %d,\t left.val %d \n", multiranger.front.val, multiranger.right.val, multiranger.left.val);
550 | if (debug==3) DEBUG_PRINT("multiranger: front.state %d,\t right.state %d,\t left.state %d \n", multiranger.front.state, multiranger.right.state, multiranger.left.state);
551 |
552 | /* --------------- Correction for left & right obstacles --------------- */
553 |
554 | side_speed = keep_safe_side_distance(multiranger, side_distance, max_side_speed, tof_state_check);
555 |
556 | /* --------------- Count Front state errors: stop if too many !--------------- */
557 |
558 | front_err_counter = count_statusError(multiranger.front, front_err_counter, tof_state_check);
559 | front_distance_counter = count_ToF_obstacle_detection(multiranger.front, tof_front_dist_th, front_distance_counter, tof_state_check);
560 | if (debug==4) DEBUG_PRINT("multiranger.front.val %d, tof_front_dist_th %d\n", multiranger.front.val, tof_front_dist_th);
561 | if (debug==4) DEBUG_PRINT("front_err_counter %d, front_distance_counter %d\n", front_err_counter, front_distance_counter);
562 |
563 | /* --------------- Set setpoint --------------- */
564 |
565 | if (front_err_counter > err_counter_thresh || front_distance_counter>front_counter_thresh){
566 | // stop & spin
567 | if (debug==9) DEBUG_PRINT("SPIN\n");
568 | spin_in_place_random(spin_angle, spin_yawrate, max_rand_angle);
569 | front_err_counter = 0;
570 | front_distance_counter = 0;
571 | }
572 | else{
573 | // go forward
574 | headToVelocity(forward_vel, side_speed, flying_height, 0.0);
575 | }
576 |
577 | if (debug==1) DEBUG_PRINT("forward_vel: %f \t side_speed: %f\n", forward_vel, side_speed);
578 | if (debug==9) DEBUG_PRINT("forward_vel: %f \t side_speed: %f \t front.val/state %d/%d \t coll_counter: %d \t front_err_counter: %d\n", forward_vel, side_speed, multiranger.front.val, multiranger.right.state, front_distance_counter, front_err_counter);
579 |
580 | }
581 |
582 |
583 | tof_t tof_wall;
584 | uint8_t left;
585 | void wall_following_policy()
586 | {
587 | // set clockwise or couter-clockwise wall-following process
588 | if (clockwise){
589 | tof_wall = multiranger.left;
590 | left = 1;
591 | spin_angle = -90; // turn right
592 | }
593 | else{
594 | tof_wall = multiranger.right;
595 | left = 0;
596 | spin_angle = +90; // turn left
597 | }
598 |
599 | /* --------------- Sensor acquisition (front, left, right) --------------- */
600 | multiranger = get_tof_measurement(multiranger);
601 |
602 | /* --------------- Correction for left & right obstacles --------------- */
603 | side_speed = keep_determined_side_distance(tof_wall, left, side_distance, max_side_speed, tof_state_check);
604 |
605 | /* --------------- Check front obstacle: if yes, then spin --------------- */
606 | if (is_front_obstacle_detected(multiranger.front, tof_front_dist_th, tof_state_check)){
607 | // stop & spin
608 | if (debug==9) DEBUG_PRINT("SPIN\n");
609 | spin_in_place_yawrate_cost(spin_angle, spin_yawrate);
610 | }
611 | else{
612 | // go forward
613 | headToVelocity(forward_vel, side_speed, flying_height, 0.0);
614 | }
615 |
616 | /* --------------- Check side empty: if yes, then spin --------------- */
617 | if (en_indent){
618 | int16_t side_indentation_threshold = side_distance + THRESH_SIDE_INDENTATION;
619 | if(is_side_empty(tof_wall, side_indentation_threshold, tof_state_check)){
620 | float spin_angle_opposite = - spin_angle;
621 | spin_in_place_yawrate_cost(spin_angle_opposite, spin_yawrate);
622 | }
623 | }
624 | debug_prints();
625 | }
626 |
627 |
628 |
629 | // function to check whether 'n' is
630 | // a multiple of 4 or not
631 | int8_t isAMultipleOf4(int8_t n){
632 | // if true, then 'n' is a multiple of 4
633 | if ((n % 4)==0 && (n!=0))
634 | return 1;
635 | // else 'n' is not a multiple of 4
636 | return 0;
637 | }
638 |
639 |
640 |
641 |
642 | tof_t tof_wall;
643 | uint8_t left;
644 | int16_t side_distance_maze;
645 | int16_t front_distance_maze;
646 | int counter_spins = 0;
647 | int full_room_lap = 1;
648 | int increment = 1;
649 |
650 | int increment_or_decrement_lap(int full_room_lap, int increment){
651 | if (increment)
652 | full_room_lap ++;
653 | else
654 | full_room_lap--;
655 | return full_room_lap;
656 | }
657 |
658 | void maze_policy()
659 | {
660 | // set clockwise or couter-clockwise wall-following process
661 | if (clockwise){
662 | tof_wall = multiranger.left;
663 | left = 1;
664 | spin_angle = -90; // turn right
665 | }
666 | else{
667 | tof_wall = multiranger.right;
668 | left = 0;
669 | spin_angle = +90; // turn left
670 | }
671 |
672 |
673 | /* The room is 6.6 x 5.5 meters
674 | side distance gets doubled every time
675 |
676 | default:side_distance = 500
677 | lap1: side_distance = 500
678 | lap2: side_distance = 1000
679 | lap3: side_distance = 1500
680 | lap4: side_distance = 2000
681 | lap5: side_distance = 2500
682 | lap6: side_distance = 3000
683 |
684 | at this point, the room is 5m wide, so the drone is in position side_distance/2 ~ 2.5m and starts to spin in place
685 | To counter this I start decreasing again the side distance to get closer to the walls again */
686 |
687 | if (full_room_lap==1) increment=1; // increase !
688 | if (full_room_lap==invert_maze_after_n_laps) increment=0; // decrease !
689 |
690 | // every lap completed we increase the distance from the sides
691 | if(isAMultipleOf4(counter_spins)){
692 | full_room_lap = increment_or_decrement_lap(full_room_lap, increment);
693 | counter_spins=0;
694 | }
695 | side_distance_maze = side_distance * full_room_lap;
696 | front_distance_maze = tof_front_dist_th + (side_distance*(full_room_lap-1));
697 | if (side_distance_maze>4000) side_distance_maze=4000;
698 | if (front_distance_maze>4000) front_distance_maze=4000;
699 |
700 | /* --------------- Sensor acquisition (front, left, right) --------------- */
701 | multiranger = get_tof_measurement(multiranger);
702 |
703 | /* --------------- Correction for left & right obstacles --------------- */
704 | side_speed = keep_determined_side_distance(tof_wall, left, side_distance_maze, max_side_speed, tof_state_check);
705 |
706 | /* --------------- Check front obstacle: if yes, then spin --------------- */
707 | if (is_front_obstacle_detected(multiranger.front, front_distance_maze, tof_state_check)){
708 | // stop & spin
709 | if (debug==9 || debug==10) DEBUG_PRINT("SPIN\n");
710 | spin_in_place_yawrate_cost(spin_angle, spin_yawrate);
711 | counter_spins++;
712 | }
713 | else{
714 | // go forward
715 | headToVelocity(forward_vel, side_speed, flying_height, 0.0);
716 | }
717 |
718 | /* --------------- Check side empty: if yes, then spin --------------- */
719 | if (en_indent){
720 | int16_t side_indentation_threshold = side_distance_maze + THRESH_SIDE_INDENTATION;
721 |
722 | if(is_side_empty(tof_wall, side_indentation_threshold, tof_state_check)){
723 | DEBUG_PRINT("SPIN OPPOSITE\n");
724 | float spin_angle_opposite = - spin_angle;
725 | spin_in_place_yawrate_cost(spin_angle_opposite, spin_yawrate);
726 | counter_spins--;
727 | if (counter_spins<0) counter_spins=0; // maze only fix.
728 | }
729 | }
730 | debug_prints();
731 | }
732 |
733 | void debug_prints(){
734 | /* general */
735 | // print multiranger [front, right, left]: [val, state]
736 | if (debug==3) DEBUG_PRINT("\nmultiranger: front.val %d,\t right.val %d,\t left.val %d \n", multiranger.front.val, multiranger.right.val, multiranger.left.val);
737 | if (debug==3) DEBUG_PRINT("multiranger: front.state %d,\t right.state %d,\t left.state %d \n", multiranger.front.state, multiranger.right.state, multiranger.left.state);
738 | // print multiranger [front, right, left]: [val, state] [err_counter,dist_counter]
739 | if (debug==9) DEBUG_PRINT("multiranger: front.val/state %d/%d,\t right.val/state %d/%d,\t left.val/state %d/%d \n", multiranger.front.val, multiranger.front.state, multiranger.right.val, multiranger.right.state, multiranger.left.val, multiranger.left.state);
740 | if (debug==9) DEBUG_PRINT("front_err_counter %d,\t front_distance_counter %d,\t side_distance_counter %d\n", front_err_counter, front_distance_counter, side_distance_counter);
741 | // print multiranger [front]: [val, thresh], [err_counter, dist_counter]
742 | if (debug==4) DEBUG_PRINT("multiranger.front.val %d, tof_front_dist_th %d\n", multiranger.front.val, tof_front_dist_th);
743 | if (debug==4) DEBUG_PRINT("front_err_counter %d, front_distance_counter %d\n", front_err_counter, front_distance_counter);
744 | /* wall following */
745 | // print multiranger [front, side]: [velocity, val, target_side_dist] [err_counter, dist_counter]
746 | if (debug==5) DEBUG_PRINT("forward_vel: %f \t front.val: %d \t side_speed: %f \t side.val: %d \t target_side_distance: %d\n", forward_vel, multiranger.front.val, side_speed, tof_wall.val, side_distance);
747 | if (debug==5) DEBUG_PRINT("front_err_counter %d,\t front_distance_counter %d,\t side_distance_counter %d\n", front_err_counter, front_distance_counter, side_distance_counter);
748 | /* MAZE */
749 | // print multiranger [front, side]: [velocity, val, speed, target_side_dist] counters[ spin, laps]
750 | if (debug==10) DEBUG_PRINT("[dist] forward_vel: %f \t front.val: %d \t front*(laps-1): %d \t side_speed: %f \t side.val: %d \t side*laps: %d\n", forward_vel, multiranger.front.val, front_distance_maze, side_speed, tof_wall.val, side_distance_maze);
751 | if (debug==11) DEBUG_PRINT("[count] counter_spins: %d \t full_room_lap: %d \t side_distance_maze: %d \t side.val: %d \n", counter_spins, full_room_lap, side_distance_maze, tof_wall.val);
752 | }
753 |
754 | /* ---------------------------------------------------------------------- */
755 | /* ----------------------------- MAIN ----------------------------- */
756 | /* ---------------------------------------------------------------------- */
757 | void appMain()
758 | {
759 | DEBUG_PRINT("Starting the system! \n");
760 | systemWaitStart();
761 | vTaskDelay(1000);
762 |
763 | /* ------------------------- TAKING OFF ------------------------- */
764 |
765 | // reset the estimator before taking off
766 | estimatorKalmanInit();
767 | DEBUG_PRINT("Resetting Kalman Estimator\n");
768 |
769 | // multiranger id
770 | DEBUG_PRINT("Getting Multiranger ids\n");
771 | idFrontVal = logGetVarId("mRange", "ValF");
772 | idFrontState = logGetVarId("mRange", "StatF");
773 | idRightVal = logGetVarId("mRange", "ValR");
774 | idRightState = logGetVarId("mRange", "StatR");
775 | idLeftVal = logGetVarId("mRange", "ValL");
776 | idLeftState = logGetVarId("mRange", "StatL");
777 |
778 | //position id
779 | DEBUG_PRINT("Getting state estimator ids\n");
780 | idX = logGetVarId("stateEstimate", "x");
781 | idY = logGetVarId("stateEstimate", "y");
782 | idYaw = logGetVarId("stateEstimate", "yaw");
783 |
784 | /* ------------------------ while(1) Loop ------------------------ */
785 | DEBUG_PRINT("Begining flight loop\n");
786 | while(1) {
787 | vTaskDelay(30);
788 | if (slow_down_while_loop==1) vTaskDelay(M2T(500));
789 |
790 | // wait
791 | if (fly==0 && landed==1){
792 | DEBUG_PRINT("Waiting start \n");
793 | multiranger = get_tof_measurement(multiranger);
794 | // if (debug==9) DEBUG_PRINT("multiranger: front.val %d,\t right.val %d,\t left.val %d \n", multiranger.front.val, multiranger.right.val, multiranger.left.val);
795 | // if (debug==9) DEBUG_PRINT("multiranger: front.state %d,\t right.state %d,\t left.state %d \n\n", multiranger.front.state, multiranger.right.state, multiranger.left.state);
796 | if (debug==9) DEBUG_PRINT("multiranger: front.val/state %d/%d,\t right.val/state %d/%d,\t left.val/state %d/%d \n", multiranger.front.val, multiranger.front.state, multiranger.right.val, multiranger.right.state, multiranger.left.val, multiranger.left.state);
797 |
798 | vTaskDelay(100);
799 | }
800 |
801 | //land
802 | if (fly==0 && landed==0){
803 | DEBUG_PRINT("Landing \n");
804 | land();
805 | landed=1;
806 | }
807 |
808 | //start flying again
809 | if (fly==1 && landed==1){
810 | DEBUG_PRINT("Take off \n");
811 | estimatorKalmanInit(); // reset the estimator before taking off
812 | takeoff(flying_height);
813 | landed=0;
814 | }
815 |
816 | // Give setpoint to the controller
817 | if (fly==1)
818 | {
819 | if (policy==0) random_policy();
820 | if (policy==1) wall_following_policy();
821 | if (policy==2) maze_policy();
822 | }
823 | }
824 | }
825 |
826 |
827 | /* -------------------------------------------------------------------------------- */
828 | /* ------------------------------ Logging/Parameters ------------------------------ */
829 | /* -------------------------------------------------------------------------------- */
830 | /* --- TIP for Logging or parameters --- */
831 | // The variable name: PARAM_ADD(TYPE, NAME, ADDRESS)
832 | // both for logging (LOG_GROUP_START) or for parameters (PARAM_GROUP_START)
833 | // should never exceed 9 CHARACTERS, otherwise the firmware won't start correctly
834 | /* --------------- LOGGING --------------- */
835 | LOG_GROUP_START(VARIABLES_LOG)
836 | LOG_ADD(LOG_FLOAT, side_vel, &side_speed) // side_speed
837 | LOG_GROUP_STOP(VARIABLES_LOG)
838 |
839 | /* --------------- PARAMETERS --------------- */
840 | PARAM_GROUP_START(START_STOP)
841 | PARAM_ADD(PARAM_UINT8, fly, &fly)
842 | PARAM_GROUP_STOP(START_STOP)
843 |
844 | PARAM_GROUP_START(FLIGHT)
845 | PARAM_ADD(PARAM_FLOAT, velocity, &forward_vel)
846 | PARAM_ADD(PARAM_FLOAT, side_vel, &max_side_speed)
847 | PARAM_ADD(PARAM_FLOAT, height, &flying_height)
848 | PARAM_GROUP_STOP(FLIGHT)
849 |
850 | PARAM_GROUP_START(POLICY)
851 | PARAM_ADD(PARAM_INT8, policy, &policy) // for wall-following and maze
852 | PARAM_ADD(PARAM_INT8, en_indent, &en_indent) // for wall-following and maze
853 | PARAM_ADD(PARAM_INT8, clockwise, &clockwise) // for wall-following and maze
854 | PARAM_ADD(PARAM_INT8, inv_laps, &invert_maze_after_n_laps) // for wall-following and maze
855 | PARAM_GROUP_STOP(POLICY)
856 |
857 | PARAM_GROUP_START(SPINNING)
858 | PARAM_ADD(PARAM_FLOAT, spin_ang, &spin_angle)
859 | PARAM_ADD(PARAM_FLOAT, spin_time, &spin_time)
860 | PARAM_ADD(PARAM_FLOAT, spin_yawr, &spin_yawrate)
861 | PARAM_ADD(PARAM_FLOAT, rnd_angle, &max_rand_angle)
862 | PARAM_GROUP_STOP(SPINNING)
863 |
864 | PARAM_GROUP_START(ToF)
865 | PARAM_ADD(PARAM_INT16, frnt_dist, &tof_front_dist_th)
866 | PARAM_ADD(PARAM_INT16, side_dist, &side_distance)
867 | PARAM_ADD(PARAM_INT16, side_tole, &side_tolerance)
868 | PARAM_ADD(PARAM_INT8, en_tof_ck, &tof_state_check)
869 | PARAM_ADD(PARAM_INT8, prcss_flg, &en_process_tof_flags)
870 | PARAM_GROUP_STOP(ToF)
871 |
872 | PARAM_GROUP_START(ToF_TH_COUNT)
873 | PARAM_ADD(PARAM_INT8, frnt_c_th, &front_counter_thresh)
874 | PARAM_ADD(PARAM_INT8, side_c_th, &side_counter_thresh)
875 | PARAM_ADD(PARAM_INT8, err_c_th, &err_counter_thresh)
876 | PARAM_GROUP_STOP(ToF_TH_COUNT)
877 |
878 | PARAM_GROUP_START(DEBUG)
879 | PARAM_ADD(PARAM_UINT8, debug, &debug) // debug prints
880 | PARAM_ADD(PARAM_UINT8, motors_on, &motors_on) // enables/disables motors!
881 | PARAM_ADD(PARAM_UINT8, slow, &slow_down_while_loop) // debug prints
882 | PARAM_GROUP_STOP(DEBUG)
--------------------------------------------------------------------------------
/crazyflie_app/rotate/Makefile:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------#
2 | # Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland. #
3 | # All rights reserved. #
4 | # #
5 | # Licensed under the Apache License, Version 2.0 (the "License"); #
6 | # you may not use this file except in compliance with the License. #
7 | # See LICENSE.apache.md in the top directory for details. #
8 | # You may obtain a copy of the License at #
9 | # #
10 | # http://www.apache.org/licenses/LICENSE-2.0 #
11 | # #
12 | # Unless required by applicable law or agreed to in writing, software #
13 | # distributed under the License is distributed on an "AS IS" BASIS, #
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
15 | # See the License for the specific language governing permissions and #
16 | # limitations under the License. #
17 | # #
18 | # Authors: #
19 | # Lorenzo Lamberti #
20 | # Luca Bompani #
21 | # Date: 01.04.2023 #
22 | #-------------------------------------------------------------------------------#
23 |
24 | # enable app support
25 | APP=1
26 | APP_STACKSIZE=300
27 | SRC_DIR = src
28 | INC_DIR = inc
29 |
30 | VPATH += $(SRC_DIR)/
31 | SOURCES_LOCAL = $(wildcard $(SRC_DIR)/*.c)
32 | OBJECTS_LOCAL := $(SOURCES_LOCAL:.c=.o)
33 | PROJ_OBJ += $(notdir $(OBJECTS_LOCAL))
34 | INCLUDES += -I$(INC_DIR)
35 |
36 | include ../Makefile.common
37 |
--------------------------------------------------------------------------------
/crazyflie_app/rotate/current_platform.mk:
--------------------------------------------------------------------------------
1 | PLATFORM=cf2
2 |
--------------------------------------------------------------------------------
/crazyflie_app/rotate/inc/config_main.h:
--------------------------------------------------------------------------------
1 | /*-----------------------------------------------------------------------------
2 | Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland.
3 | All rights reserved.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | See LICENSE.apache.md in the top directory for details.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 | File: config_main.h
19 | Authors:
20 | Lorenzo Lamberti
21 | Luca Bompani
22 | Manuele Rusci
23 | Daniele Palossi
24 | Date: 01.04.2023
25 | -------------------------------------------------------------------------------*/
26 |
27 | // Flight mission
28 | #define FORWARD_VELOCITY 0.30f // Max forward speed [m/s]. Default: 1.0f
29 | #define TARGET_H 0.50f // Target height for drone's flight [m]. Default: 0.5f
30 | #define TARGET_D 400.0f // Target distance from obastacle [mm]. Default: 400.0f
31 |
--------------------------------------------------------------------------------
/crazyflie_app/rotate/src/rotate.c:
--------------------------------------------------------------------------------
1 | /*-----------------------------------------------------------------------------
2 | Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland.
3 | All rights reserved.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | See LICENSE.apache.md in the top directory for details.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 | File: rotate.c
19 | Authors:
20 | Lorenzo Lamberti
21 | Davide Graziani
22 | Luca Bompani
23 | Manuele Rusci
24 | Daniele Palossi
25 |
26 | Date: 01.04.2023
27 | -------------------------------------------------------------------------------*/
28 |
29 | /* Description:
30 | rotation policy: the drone performs a 360 ° rotation on itself and in the
31 | meantime acquires eight distance measurements which it will then use
32 | to determine the maximum distance; at this point it moves by a
33 | predetermined distance in that direction.
34 | */
35 |
36 | #include
37 | #include
38 | #include
39 | #include
40 |
41 | #include "app.h"
42 | #include "FreeRTOS.h"
43 | #include "system.h"
44 | #include "task.h"
45 | #include "debug.h"
46 | #include "stabilizer_types.h"
47 | #include "estimator_kalman.h"
48 | #include "commander.h"
49 | #include "log.h"
50 | #include "param.h"
51 | #include
52 | #include "config_main.h"
53 |
54 |
55 | /* --------------- GUI PARAMETERS --------------- */
56 | // Global variables for the parameters
57 | float forward_vel = FORWARD_VELOCITY;
58 | float flying_height = TARGET_H;
59 | float straight_distance = TARGET_D;
60 |
61 | // My parameters for enabling/disabling some parts ofss code. 1=Active, 0=Non active
62 | uint8_t debug = 1; // activate debug prints
63 |
64 | // START / STOP mission parameter
65 | uint8_t fly = 0; // Takeoff/landing command (GUI parameter)
66 | uint8_t landed = 0; // Flag for indicating whether the drone landed
67 |
68 | /* --------------- GLOBAL VARIABLES --------------- */
69 |
70 | #define ANGULAR_ROTATION 45
71 |
72 | static setpoint_t fly_setpoint;
73 | int16_t valDistance[8], valFront;
74 | uint8_t stateFront;
75 | logVarId_t idFrontVal, idFrontState, idX, idY, idYaw;
76 | float posXrotate, posYrotate, posYawrotate;
77 |
78 | /* --------------- FUNCTION DEFINITION --------------- */
79 | void takeoff(float height);
80 | void goStraight();
81 | int rotate();
82 | void land(void);
83 | static void create_setpoint(setpoint_t* setpoint, float x_vel, float y_vel, float z_pos, float yaw_att);
84 | void headToPosition(float x, float y, float z, float yaw);
85 | void headToSetpoint (float x, float y, float z, float yaw);
86 |
87 | /* --------------- FUNCTIONS --------------- */
88 | // Fly forward functions
89 | static void create_setpoint(setpoint_t* setpoint, float x_vel, float y_vel, float z_pos, float yaw_att)
90 | {
91 | memset(setpoint, 0, sizeof(setpoint_t));
92 | setpoint->mode.x = modeVelocity;
93 | setpoint->mode.y = modeVelocity;
94 | setpoint->mode.z = modeAbs;
95 | setpoint->mode.yaw = modeAbs;
96 | setpoint->velocity.x = x_vel;
97 | setpoint->velocity.y = y_vel;
98 | setpoint->position.z = z_pos;
99 | setpoint->attitude.yaw = yaw_att;
100 | setpoint->velocity_body = true;
101 | }
102 |
103 | void headToPosition(float x, float y, float z, float yaw)
104 | {
105 | setpoint_t setpoint;
106 | memset(&setpoint, 0, sizeof(setpoint_t));
107 |
108 | setpoint.mode.x = modeAbs;
109 | setpoint.mode.y = modeAbs;
110 | setpoint.mode.z = modeAbs;
111 | setpoint.mode.yaw = modeAbs;
112 |
113 | setpoint.position.x = x;
114 | setpoint.position.y = y;
115 | setpoint.position.z = z;
116 | setpoint.attitude.yaw = yaw;
117 | commanderSetSetpoint(&setpoint, 3);
118 | }
119 |
120 | void headToSetpoint (float x, float y, float z, float yaw)
121 | {
122 | create_setpoint (&fly_setpoint, x, y, z, yaw);
123 | commanderSetSetpoint (&fly_setpoint, 3);
124 | }
125 |
126 | // EXPLORATION FUNCTIONS
127 | void takeoff(float height)
128 | {
129 | point_t pos;
130 | memset(&pos, 0, sizeof(pos));
131 | estimatorKalmanGetEstimatedPos(&pos);
132 |
133 | // first step: taking off gradually, from a starting height of 0.2 to the desired height
134 | int endheight = (int)(100*(height-0.2f));
135 | for(int i=0; i valDistance[MaxValue])
247 | MaxValue = i;
248 | }
249 |
250 | posYawrotate = logGetFloat (idYaw);
251 |
252 | /* --------------- Gradual rotation --------------- */
253 | for (int i = 0; i < (10 * MaxValue); i++)
254 | {
255 | headToPosition (posXrotate, posYrotate, flying_height, posYawbegin + (ANGULAR_ROTATION * i)*0.1f);
256 | vTaskDelay(50);
257 | }
258 |
259 | /* --------------- Angular correction --------------- */
260 | for (int i = 0; i < 20; i++)
261 | {
262 | headToPosition (posXrotate, posYrotate, flying_height, posYawbegin + (ANGULAR_ROTATION * MaxValue));
263 | vTaskDelay(50);
264 | }
265 |
266 | return MaxValue;
267 | }
268 |
269 | void land(void)
270 | {
271 | float posX = logGetFloat(idX);
272 | float posY = logGetFloat(idY);
273 | float posYaw = logGetFloat(idYaw);
274 |
275 | for(int i=(int)100*flying_height; i>5; i--)
276 | {
277 | headToPosition(posX, posY, (float)i / 100.0f, posYaw);
278 | vTaskDelay(20);
279 | }
280 | vTaskDelay(200);
281 | }
282 |
283 | void appMain()
284 | {
285 | DEBUG_PRINT("Dronet v2 started! \n");
286 | systemWaitStart();
287 | vTaskDelay(1000);
288 | /* ------------------------- NOT FLYING ------------------------- */
289 |
290 | while(!fly)
291 | {
292 | if (debug==1) DEBUG_PRINT("Waiting start \n");
293 | vTaskDelay(100);
294 | }
295 |
296 | /* ------------------------- TAKING OFF ------------------------- */
297 |
298 | // reset the estimator before taking off
299 | estimatorKalmanInit();
300 | uint8_t isMax = 9;
301 | // id acquisition
302 | idFrontVal = logGetVarId("mRange", "ValF");
303 | idFrontState = logGetVarId("mRange", "StatF");
304 | idX = logGetVarId("stateEstimate", "x");
305 | idY = logGetVarId("stateEstimate", "y");
306 | idYaw = logGetVarId("stateEstimate", "yaw");
307 | // TAKE OFF
308 | takeoff(flying_height);
309 |
310 | /* ------------------------ Flight Loop ------------------------ */
311 |
312 | while(1) {
313 | vTaskDelay(5);
314 | if (fly==0 && landed==0)//land
315 | {
316 | land();
317 | landed = 1;
318 | }
319 | if (fly==1 && landed==1) //start flying again
320 | {
321 | estimatorKalmanInit();
322 | takeoff(flying_height);
323 | landed = 0;
324 | }
325 | if (debug==1) DEBUG_PRINT("flying\n");
326 |
327 | // Give setpoint to the controller
328 | if (fly==1)
329 | {
330 | isMax = rotate();
331 |
332 | if (isMax != 8)
333 | goStraight();
334 | }
335 |
336 | vTaskDelay(30);
337 | }
338 | }
339 |
340 |
341 | /* --- TIP for Logging or parameters --- */
342 | // The variable name: PARAM_ADD(TYPE, NAME, ADDRESS)
343 | // both for logging (LOG_GROUP_START) or for parameters (PARAM_GROUP_START)
344 | // should never exceed 9 CHARACTERS, otherwise the firmware won't start correctly
345 |
346 | /* --- PARAMETERS --- */
347 | PARAM_GROUP_START(START_STOP)
348 | PARAM_ADD(PARAM_UINT8, fly, &fly)
349 | PARAM_GROUP_STOP(DRONET_PARAM)
350 |
351 | // Activate - deactivate functionalities: 0=Non-active, 1=active
352 | PARAM_GROUP_START(FUNCTIONALITIES)
353 | PARAM_ADD(PARAM_UINT8, debug, &debug) // debug prints
354 | PARAM_GROUP_STOP(DRONET_SETTINGS)
355 |
356 | // Filters' parameters
357 | PARAM_GROUP_START(DRONET_PARAMS)
358 | PARAM_ADD(PARAM_FLOAT, velocity, &forward_vel)
359 | PARAM_ADD(PARAM_FLOAT, height, &flying_height)
360 | PARAM_ADD(PARAM_FLOAT, distance, &straight_distance)
361 | PARAM_GROUP_STOP(DRONET_SETTINGS)
362 |
363 | LOG_GROUP_START(dist)
364 | LOG_ADD(LOG_INT16, Val0, &valDistance[0])
365 | LOG_ADD(LOG_INT16, Val1, &valDistance[1])
366 | LOG_ADD(LOG_INT16, Val2, &valDistance[2])
367 | LOG_ADD(LOG_INT16, Val3, &valDistance[3])
368 | LOG_ADD(LOG_INT16, Val4, &valDistance[4])
369 | LOG_ADD(LOG_INT16, Val5, &valDistance[5])
370 | LOG_ADD(LOG_INT16, Val6, &valDistance[6])
371 | LOG_ADD(LOG_INT16, Val7, &valDistance[7])
372 | LOG_GROUP_STOP(dist)
--------------------------------------------------------------------------------
/dataset/README.md:
--------------------------------------------------------------------------------
1 | # Himax dataset
2 |
3 | The Himax dataset has been collected at University of Bologna using a Himax ultra-low power, gray-scale, and QVGA camera.
4 |
5 | The dataset has been used for training and testing the inference capability of our pulp-detector, an object detection CNN that detects bottles and tin cans.
6 | This release includes the training and testing set described in the paper: https://ieeexplore.ieee.org/abstract/document/10137154.
7 |
8 | It is composed of 321 training images and 279 testing .jpg images with resolution 324x244, each labeled with the class type and a bounding box around the bottles and tin cans.
9 |
10 | Each folder contains a variable number of gray-scale .jpg images, ordered by number (e.g., frame_1.jpg, frame_2.jpg, etc.) and one frame_1.jxml file with all the ground-truth labels.
11 |
12 | We provide also the TF Record files needed for the Tensorflow training: `mytestset_test.record`, `finetuning-2-train.record`.
13 |
14 | All the files in this dataset can be downloaded from this [link](https://zenodo.org/doi/10.5281/zenodo.8421460).
15 |
16 |
17 |
18 |
19 | ## License
20 | We release this dataset as open source under Creative Commons Attribution Non Commercial No Derivatives 4.0 International, see [license](../LICENSE.CC.md)
21 |
--------------------------------------------------------------------------------
/gap8_app/Makefile:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2020 GreenWaves Technologies
2 | # All rights reserved.
3 |
4 | # This software may be modified and distributed under the terms
5 | # of the BSD license. See the LICENSE.BSD.md file for details.
6 |
7 | ifndef GAP_SDK_HOME
8 | $(error Source sourceme in gap_sdk first)
9 | endif
10 |
11 | include common.mk
12 |
13 | io=host
14 | HAVE_HIMAX?=0
15 | USE_CAMERA?=0
16 | USE_LCD?=0
17 | SILENT?=0
18 | SHOW_PERF?=1
19 |
20 | MEAS?=0
21 | QUANT_BITS=8
22 | MODEL_SQ8=1
23 | $(info Building $(TARGET_CHIP_FAMILY) mode with $(QUANT_BITS) bit quantization)
24 |
25 | IMAGE=$(CURDIR)/tflite_model/test_1_out.ppm
26 | MAIN?=$(MODEL_PREFIX_SSD).c
27 |
28 | include common/model_decl.mk
29 |
30 | APP = SSD_tin_can_bottle
31 |
32 | APP_SRCS += $(MAIN) $(MODEL_COMMON_SRCS) $(CNN_LIB)
33 | APP_SRCS += BUILD_MODEL_SSD/SSD_tin_can_bottleKernels.c
34 |
35 | APP_CFLAGS += -O3
36 | APP_CFLAGS += -I. -I$(MODEL_COMMON_INC) -I$(TILER_EMU_INC) -I$(TILER_INC) $(CNN_LIB_INCLUDE)
37 | APP_CFLAGS += -IBUILD_MODEL_SSD
38 | APP_LDFLAGS += -lgaplib
39 |
40 |
41 | PMSIS_OS=pulpos
42 |
43 | JENKINS?=1
44 | ifeq ($(JENKINS), 1)
45 | APP_CFLAGS += -DONE_ITER -DTEST #-DPERF
46 | else
47 | ifeq ($(platform), gvsoc)
48 | APP_CFLAGS += -DONE_ITER -DHAVE_LCD #-DPERF
49 | else
50 | APP_CFLAGS += -DSILENT -DHAVE_HIMAX -DHAVE_LCD
51 | endif
52 | endif
53 |
54 | # FC stack size:
55 | #MAIN_STACK_SIZE=4096
56 | # CL stack size:
57 |
58 | ifeq '$(TARGET_CHIP_FAMILY)' 'GAP9'
59 | CLUSTER_STACK_SIZE=4096
60 | ifeq '$(PMSIS_OS)' 'freertos'
61 | CLUSTER_SLAVE_STACK_SIZE=2048
62 | else
63 | CLUSTER_SLAVE_STACK_SIZE=1024
64 | endif
65 | TOTAL_STACK_SIZE=$(shell expr $(CLUSTER_STACK_SIZE) \+ $(CLUSTER_SLAVE_STACK_SIZE) \* 8)
66 | FREQ_CL?=50
67 | FREQ_FC?=50
68 | else #GAP8
69 | CLUSTER_STACK_SIZE=4096
70 | CLUSTER_SLAVE_STACK_SIZE=1024
71 | TOTAL_STACK_SIZE=$(shell expr $(CLUSTER_STACK_SIZE) \+ $(CLUSTER_SLAVE_STACK_SIZE) \* 7)
72 | ifeq '$(TARGET_CHIP)' 'GAP8_V3'
73 | FREQ_CL?=175
74 | else
75 | FREQ_CL?=160
76 | endif
77 | FREQ_FC?=250
78 | endif
79 |
80 | APP_CFLAGS += -DSTACK_SIZE=$(CLUSTER_STACK_SIZE) -DSLAVE_STACK_SIZE=$(CLUSTER_SLAVE_STACK_SIZE)
81 |
82 | APP_CFLAGS += -DAT_IMAGE=$(IMAGE) -DFREQ_CL=$(FREQ_CL) -DFREQ_FC=$(FREQ_FC)
83 | APP_CFLAGS += -DAT_INPUT_HEIGHT_SSD=$(AT_INPUT_HEIGHT_SSD) -DAT_INPUT_WIDTH_SSD=$(AT_INPUT_WIDTH_SSD) -DAT_INPUT_COLORS_SSD=$(AT_INPUT_COLORS_SSD)
84 | ifdef VERBOSE
85 | APP_CFLAGS +=-DVERBOSE=0
86 | endif
87 | ifdef FROM_JTAG
88 | APP_CFLAGS +=-DFROM_JTAG=0
89 | endif
90 | ifdef PERFORMANCE
91 | APP_CFLAGS +=-DPERFORMANCE=0
92 | endif
93 | #ifeq ($(PERF), 1)
94 | # APP_CFLAGS += -DPERF
95 | #endif
96 |
97 | MODEL_TENSORS = BUILD_MODEL_SSD/SSD_tin_can_bottle_L3_Flash_Const.dat
98 | READFS_FILES=$(abspath $(MODEL_TENSORS))
99 | PLPBRIDGE_FLAGS += -f
100 |
101 |
102 |
103 | BUILD_MODEL_SSD/ssdlite_ocrKernels.c:
104 | make -f ssd.mk model CLUSTER_STACK_SIZE=$(CLUSTER_STACK_SIZE) CLUSTER_SLAVE_STACK_SIZE=$(CLUSTER_SLAVE_STACK_SIZE)
105 |
106 |
107 | # all depends on the models
108 | all:: BUILD_MODEL_SSD/ssdlite_ocrKernels.c
109 |
110 | clean::
111 | rm -rf BUILD*
112 |
113 | clean_models:
114 | rm -rf BUILD_MODEL*
115 |
116 | #$(info APP_SRCS... $(APP_SRCS))
117 | #$(info APP_CFLAGS... $(APP_CFLAGS))
118 | include $(RULES_DIR)/pmsis_rules.mk
119 |
120 |
--------------------------------------------------------------------------------
/gap8_app/README.md:
--------------------------------------------------------------------------------
1 | # Single shot detector (SSD) CNN
2 |
3 | In this repo, there are the training, evaluation, and deployment scripts used for the deployment of an SSD-based object detector.
4 |
5 | Objects our CNN can detect: Tin cans and bottles
6 |
7 | ## Setup
8 |
9 | There are two steps for this set up first is to set up the python environment needed which can be done using the tensorflow1_15.yml file
10 | > conda env create -f tensorflow1_15.yml
11 |
12 | then you need to install the gap_sdk, which can be installed following the instruction of the official [repository](https://github.com/GreenWaves-Technologies/gap_sdk/tree/release-v4.7.0).
13 |
14 | Gap-sdk version used in this work: 4.7
15 |
16 |
17 |
18 | ## Deployment on the nano-drone
19 |
20 | This section shows the commands necessary for the deployment of a quantized neural network on the GAP8 SoC, which is on the AI-Deck of the nano drone.
21 |
22 | ### How to run the CNN on GAP8
23 |
24 | You need to source the correct configuration this can be done with the following command:
25 |
26 | `source path/to/gap_sdk/configs/ai_deck.sh`
27 |
28 | Once this is done, you need to export the GAPY_OPENOCD_CABLE, which can be done with the command
29 |
30 | `export GAPY_OPENOCD_CABLE=path/to/gap_sdk/utils/gap8-openocd/tcl/interface/ftdi/olimex-arm-usb-ocd-h.cfg`
31 |
32 | the last step for the deployment is to use the :
33 | `make clean all run`
34 | and run the application on the drone.
35 |
36 | ### Visualize camera stream and detections
37 |
38 | To visualize the detections being performed by the drone you need first to connect to the Bitcraze AI-deck example wi-fi network.
39 |
40 | The script requires the gi package, which can be installed following this [GUIDE](https://pygobject.readthedocs.io/en/latest/getting_started.html#ubuntu-getting-started)
41 |
42 | Then you need to run the viewer_custom.py with :
43 | ```python viewer_custom.py ```
44 |
45 |
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/gap8_app/SSD_tin_can_bottle.c:
--------------------------------------------------------------------------------
1 | /*-----------------------------------------------------------------------------
2 | Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland.
3 | All rights reserved.
4 |
5 | Licensed under the Apache License, Version 2.0 (the "License");
6 | you may not use this file except in compliance with the License.
7 | See LICENSE.apache.md in the top directory for details.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 | File: SSD_tin_can_bottle.c
19 | Authors:
20 | Lorenzo Lamberti
21 | Luca Bompani
22 | Manuele Rusci
23 | Daniele Palossi
24 | Date: 01.04.2023
25 | -------------------------------------------------------------------------------*/
26 |
27 | #include "SSD_tin_can_bottle.h"
28 | #include "SSD_tin_can_bottleKernels.h"
29 | #include "SSD_tin_can_bottleInfo.h"
30 |
31 |
32 |
33 | #include "/PATH/TO/THE/SDK/gap_sdk/frame_streamer/include/tools/frame_streamer.h"
34 | #include "/PATH/TO/THE/SDK/gap_sdk/frame_streamer/frame_streamer/frame_streamer.c"
35 | //needed as the gap_sdk is missing the compiled versions.
36 | #include "SSD_tin_can_bottle.h"
37 | #include "SSD_tin_can_bottleKernels.h"
38 | #include "SSD_tin_can_bottleInfo.h"
39 | #include "pmsis.h"
40 | #include "bsp/transport.h"
41 | #include "bsp/flash/hyperflash.h"
42 | #include "bsp/bsp.h"
43 | #include "bsp/ram.h"
44 | #include "bsp/buffer.h"
45 | #include "bsp/transport/nina_w10.h"
46 | #include "bsp/camera/himax.h"
47 | #include "bsp/ram/hyperram.h"
48 | #include "gaplib/ImgIO.h"
49 | #include "stdio.h"
50 |
51 | #define __XSTR(__s) __STR(__s)
52 | #define __STR(__s) #__s
53 |
54 | #ifdef SILENT
55 | #define PRINTF(...) ((void) 0)
56 | #else
57 | #define PRINTF printf
58 | #endif
59 |
60 | #define FIX2FP(Val, Precision) ((float) (Val) / (float) (1<<(Precision)))
61 |
62 | #define AT_INPUT_SIZE (AT_INPUT_WIDTH_SSD*AT_INPUT_HEIGHT_SSD*AT_INPUT_COLORS_SSD)
63 | #define MAX_BB (300)
64 | #define CAMERA_WIDTH (324)
65 | #define CAMERA_HEIGHT (244)
66 | #define NUMBER_OF_DETECTION (10)
67 | #define BYTES_DETECTION (10)
68 | #define EXTRA_RECOGNITION (2)
69 | #define TEXT_SIZE (NUMBER_OF_DETECTION*BYTES_DETECTION +EXTRA_RECOGNITION)
70 | #define CAMERA_COLORS (1)
71 | #define CAMERA_SIZE (CAMERA_WIDTH*CAMERA_HEIGHT*CAMERA_COLORS)
72 | #define SCORE_THR 0
73 |
74 | #define LED_ON pi_gpio_pin_write(&gpio_device, 2, 1)
75 |
76 | #define LED_OFF pi_gpio_pin_write(&gpio_device, 2, 0)
77 |
78 | AT_HYPERFLASH_FS_EXT_ADDR_TYPE __PREFIX(_L3_Flash) = 0;
79 |
80 |
81 | L2_MEM static struct pi_device gpio_device;
82 |
83 | //streamers for passing text and images
84 | struct simple_streamer{
85 | int channel;
86 | struct pi_transport_header header;
87 | unsigned int size;
88 | };
89 |
90 | struct simple_streamer text_streamer;
91 | static frame_streamer_t *streamer;// frame streamer
92 | //devices declarations
93 | struct pi_device wifi;
94 | struct pi_device camera;
95 | struct pi_device cluster_dev;
96 | struct pi_device HyperRam;
97 | //signal definitions for callbacks
98 | static pi_task_t cam_task;
99 | static pi_task_t streamer_task;
100 | static pi_task_t detection_task;
101 | L2_MEM struct pi_cluster_task task[1];
102 |
103 | //buffers
104 | static pi_buffer_t buffer;//buffer for image transfer
105 | static uint32_t l3_buff;//l3 memory pointer
106 | L2_MEM static uint8_t Input_1[CAMERA_SIZE];//image storage
107 | L2_MEM signed char outputs[TEXT_SIZE];// neural network output storage for sending throught wifi
108 | L2_MEM short int out_boxes[NUMBER_OF_DETECTION*4]; //each bounding box is composed of 4 coordinates
109 | L2_MEM signed char out_scores[NUMBER_OF_DETECTION];
110 | L2_MEM signed char out_classes[NUMBER_OF_DETECTION];
111 |
112 | //callback function declarations
113 | static void detection_handler();
114 | static void camera_handler();
115 | static void main_handler();
116 |
117 |
118 |
119 |
120 |
121 | static void init_wifi() {
122 | //starting the wifi the wifi value is defined at the beginning of the document and is a global variable
123 | int32_t errors = 0;
124 | struct pi_nina_w10_conf nina_conf;
125 |
126 | pi_nina_w10_conf_init(&nina_conf);
127 |
128 | nina_conf.ssid = "";
129 | nina_conf.passwd = "";
130 | nina_conf.ip_addr = "0.0.0.0";
131 | nina_conf.port = 5555;
132 |
133 | pi_open_from_conf(&wifi, &nina_conf);
134 |
135 | errors = pi_transport_open(&wifi);
136 |
137 | #ifdef VERBOSE
138 | PRINTF("NINA WiFi init:\t\t\t\t%s\n", errors?"Failed":"Ok");
139 | #endif
140 |
141 | if(errors) pmsis_exit(errors);
142 | }
143 |
144 | static void init_streamer() {
145 | //frame streamer init
146 | struct frame_streamer_conf streamer_conf;
147 |
148 | frame_streamer_conf_init(&streamer_conf);
149 |
150 | streamer_conf.transport = &wifi;
151 | streamer_conf.format = FRAME_STREAMER_FORMAT_JPEG;
152 | streamer_conf.width = AT_INPUT_WIDTH_SSD;
153 | streamer_conf.height = AT_INPUT_HEIGHT_SSD;
154 | streamer_conf.depth = 1;
155 | streamer_conf.name = "image_Stream";
156 |
157 | streamer = frame_streamer_open(&streamer_conf);
158 |
159 | pi_buffer_init(&buffer, PI_BUFFER_TYPE_L2, Input_1);
160 | pi_buffer_set_format(&buffer, AT_INPUT_WIDTH_SSD, AT_INPUT_HEIGHT_SSD, 1, PI_BUFFER_FORMAT_GRAY);
161 |
162 | #ifdef VERBOSE
163 | PRINTF("Streamer init:\t\t\t\t%s\n", streamer?"Ok":"Failed");
164 | #endif
165 |
166 | if(streamer == NULL) pmsis_exit(-1);
167 | }
168 |
169 | #ifndef FROM_JTAG
170 | static int open_camera_himax(struct pi_device *device)
171 | {
172 | struct pi_himax_conf cam_conf;
173 |
174 | pi_himax_conf_init(&cam_conf);
175 |
176 | cam_conf.format = PI_CAMERA_QVGA;
177 |
178 | pi_open_from_conf(device, &cam_conf);
179 | if (pi_camera_open(device))return -1;
180 |
181 | uint8_t reg_value, set_value;
182 |
183 |
184 |
185 |
186 | set_value=0;
187 |
188 | pi_camera_reg_set(device, IMG_ORIENTATION, &set_value);
189 | pi_camera_reg_get(device, IMG_ORIENTATION, ®_value);
190 |
191 |
192 | pi_camera_control(device, PI_CAMERA_CMD_AEG_INIT, 0);
193 |
194 | return 0;
195 | }
196 | #endif
197 | int8_t* converter_To_int8(uint8_t* input){
198 | int8_t* Input_2=input;
199 | for(int i=0; ientry = &RunNetwork;
230 | task->stack_size = STACK_SIZE;
231 | task->slave_stack_size = SLAVE_STACK_SIZE;
232 | task->arg = NULL;
233 |
234 |
235 |
236 |
237 |
238 |
239 | #ifdef VERBOSE
240 | PRINTF("Graph constructor was OK\n");
241 | #endif
242 | #ifndef FROM_JTAG
243 | /*cropping image to AT_INPUT_HEIGHT_SSD and AT_INPUT_WIDTH_SSD dimensions*/
244 | int idx=0;
245 |
246 | for(int i =0;i0;--i){
256 | for (int j=0;j
21 | Luca Bompani
22 | Date: 01.04.2023
23 | -------------------------------------------------------------------------------*/
24 |
25 | #ifndef __OCRSSD_H__
26 | #define __OCRSSD_H__
27 |
28 | #define __PREFIX(x) SSD_tin_can_bottle ## x
29 |
30 | #include "Gap.h"
31 |
32 | #ifdef __EMUL__
33 | #include
34 | #include
35 | #include
36 | #include
37 | #include
38 | #include
39 | #endif
40 |
41 | extern AT_HYPERFLASH_FS_EXT_ADDR_TYPE __PREFIX(_L3_Flash);
42 |
43 | #endif
44 |
--------------------------------------------------------------------------------
/gap8_app/common.mk:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2020 GreenWaves Technologies
2 | # All rights reserved.
3 |
4 | # This software may be modified and distributed under the terms
5 | # of the BSD license. See the LICENSE.BSD.md file for details.
6 |
7 | MODEL?=0
8 |
9 |
10 | MODEL_PREFIX_SSD=SSD_tin_can_bottle
11 |
12 |
13 | AT_INPUT_WIDTH_SSD=320
14 | AT_INPUT_HEIGHT_SSD=240
15 | AT_INPUT_COLORS_SSD=3
16 |
17 | AT_SIZES_SSD += -DAT_INPUT_HEIGHT_SSD=$(AT_INPUT_HEIGHT_SSD) -DAT_INPUT_WIDTH_SSD=$(AT_INPUT_WIDTH_SSD) -DAT_INPUT_COLORS_SSD=$(AT_INPUT_COLORS_SSD)
18 |
19 |
--------------------------------------------------------------------------------
/gap8_app/common/model_decl.mk:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2017 GreenWaves Technologies
2 | # All rights reserved.
3 |
4 | # This software may be modified and distributed under the terms
5 | # of the BSD license. See the LICENSE.BSD.md file for details.
6 |
7 | MODEL_SUFFIX?=
8 |
9 | MODEL_PYTHON=python3
10 |
11 | MODEL_COMMON ?= common
12 | MODEL_COMMON_INC ?= $(GAP_SDK_HOME)/libs/gap_lib/include
13 | MODEL_COMMON_SRC ?= $(GAP_SDK_HOME)/libs/gap_lib/img_io
14 | MODEL_COMMON_SRC_FILES ?= ImgIO.c
15 | MODEL_COMMON_SRCS = $(realpath $(addprefix $(MODEL_COMMON_SRC)/,$(MODEL_COMMON_SRC_FILES)))
16 | MODEL_BUILD = BUILD_MODEL$(MODEL_SUFFIX)
17 |
18 | MODEL_TFLITE = $(MODEL_BUILD)/$(MODEL_PREFIX).tflite
19 |
20 | TENSORS_DIR = $(MODEL_BUILD)/tensors
21 | MODEL_TENSORS = $(MODEL_BUILD)/$(MODEL_PREFIX)_L3_Flash_Const.dat
22 |
23 | MODEL_STATE = $(MODEL_BUILD)/$(MODEL_PREFIX).json
24 | MODEL_SRC = $(MODEL_PREFIX)Model.c
25 | MODEL_HEADER = $(MODEL_PREFIX)Info.h
26 | MODEL_GEN = $(MODEL_BUILD)/$(MODEL_PREFIX)Kernels
27 | MODEL_GEN_C = $(addsuffix .c, $(MODEL_GEN))
28 | MODEL_GEN_CLEAN = $(MODEL_GEN_C) $(addsuffix .h, $(MODEL_GEN))
29 | MODEL_GEN_EXE = $(MODEL_BUILD)/GenTile
30 |
31 | ifdef MODEL_QUANTIZED
32 | NNTOOL_EXTRA_FLAGS = -q
33 | endif
34 |
35 | MODEL_GENFLAGS_EXTRA =
36 |
37 | EXTRA_GENERATOR_SRC =
38 |
39 | IMAGES = images
40 | RM=rm -f
41 |
42 | NNTOOL=nntool
43 |
44 | MODEL_SIZE_CFLAGS = -DAT_INPUT_HEIGHT=$(AT_INPUT_HEIGHT) -DAT_INPUT_WIDTH=$(AT_INPUT_WIDTH) -DAT_INPUT_COLORS=$(AT_INPUT_COLORS)
45 | include $(RULES_DIR)/at_common_decl.mk
46 |
--------------------------------------------------------------------------------
/gap8_app/common/model_rules.mk:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2017 GreenWaves Technologies
2 | # All rights reserved.
3 |
4 | # This software may be modified and distributed under the terms
5 | # of the BSD license. See the LICENSE.BSD.md file for details.
6 |
7 | # The training of the model is slightly different depending on
8 | # the quantization. This is because in 8 bit mode we used signed
9 | # 8 bit so the input to the model needs to be shifted 1 bit
10 | ifdef TRAIN_7BIT
11 | MODEL_TRAIN_FLAGS = -c
12 | else
13 | MODEL_TRAIN_FLAGS =
14 | endif
15 |
16 | USE_DISP=1
17 |
18 | ifdef USE_DISP
19 | SDL_FLAGS= -lSDL2 -lSDL2_ttf -DAT_DISPLAY
20 | else
21 | SDL_FLAGS=
22 | endif
23 |
24 | ifdef MODEL_L1_MEMORY
25 | MODEL_GEN_EXTRA_FLAGS += --L1 $(MODEL_L1_MEMORY)
26 | endif
27 |
28 | ifdef MODEL_L2_MEMORY
29 | MODEL_GEN_EXTRA_FLAGS += --L2 $(MODEL_L2_MEMORY)
30 | endif
31 |
32 | ifdef MODEL_L3_MEMORY
33 | MODEL_GEN_EXTRA_FLAGS += --L3 $(MODEL_L3_MEMORY)
34 | endif
35 |
36 |
37 | $(MODEL_BUILD):
38 | mkdir $(MODEL_BUILD)
39 |
40 | $(MODEL_TFLITE): $(TRAINED_TFLITE_MODEL) | $(MODEL_BUILD)
41 | cp $< $@
42 |
43 | # Creates an NNTOOL state file by running the commands in the script
44 | # These commands could be run interactively
45 | # The commands:
46 | # Adjust the model to match AutoTiler tensor order
47 | # Fuse nodes together to match fused AutoTiler generators
48 | # Quantize the graph if not already done with tflite quantization
49 | # Save the graph state files
50 |
51 | $(MODEL_STATE): $(MODEL_TFLITE) $(IMAGES) $(NNTOOL_SCRIPT) | $(MODEL_BUILD)
52 | echo $(MODEL_L1_MEMORY)
53 | echo "GENERATING NNTOOL STATE FILE"
54 | $(NNTOOL) -s $(NNTOOL_SCRIPT) $< $(NNTOOL_EXTRA_FLAGS)
55 |
56 | nntool_state: $(MODEL_STATE)
57 |
58 | # Runs NNTOOL with its state file to generate the autotiler model code
59 | $(MODEL_BUILD)/$(MODEL_SRC): $(MODEL_STATE) $(MODEL_TFLITE) | $(MODEL_BUILD)
60 | echo "GENERATING AUTOTILER MODEL"
61 | $(NNTOOL) -g -M $(MODEL_BUILD) -m $(MODEL_SRC) -T $(TENSORS_DIR) -H $(MODEL_HEADER) $(MODEL_GENFLAGS_EXTRA) $<
62 |
63 | nntool_gen: $(MODEL_BUILD)/$(MODEL_SRC)
64 |
65 | # Build the code generator from the model code
66 | $(MODEL_GEN_EXE): $(CNN_GEN) $(MODEL_BUILD)/$(MODEL_SRC) $(EXTRA_GENERATOR_SRC) | $(MODEL_BUILD)
67 | echo "COMPILING AUTOTILER MODEL"
68 | gcc -g -o $(MODEL_GEN_EXE) -I. -I$(TILER_INC) -I$(TILER_EMU_INC) $(CNN_GEN_INCLUDE) $(CNN_LIB_INCLUDE) $? $(TILER_LIB) $(SDL_FLAGS)
69 |
70 | compile_model: $(MODEL_GEN_EXE)
71 |
72 | # Run the code generator to generate GAP graph and kernel code
73 | $(MODEL_GEN_C): $(MODEL_GEN_EXE)
74 | echo "RUNNING AUTOTILER MODEL"
75 | $(MODEL_GEN_EXE) -o $(MODEL_BUILD) -c $(MODEL_BUILD) $(MODEL_GEN_EXTRA_FLAGS)
76 |
77 | # A phony target to simplify including this in the main Makefile
78 | model: $(MODEL_GEN_C)
79 |
80 | clean_model:
81 | $(RM) $(MODEL_GEN_EXE)
82 | $(RM) -rf $(MODEL_BUILD)
83 | $(RM) $(MODEL_BUILD)/*.dat
84 |
85 | clean_at_model:
86 | $(RM) $(MODEL_GEN_C)
87 | $(RM) $(MODEL_BUILD)/*.dat
88 |
89 | clean_train:
90 | $(RM) -rf $(MODEL_TRAIN_BUILD)
91 |
92 | clean_images:
93 | $(RM) -rf $(IMAGES)
94 |
95 | test_images: $(IMAGES)
96 |
97 | .PHONY: model clean_model clean_train test_images clean_images train nntool_gen nntool_state tflite compile_model
98 |
--------------------------------------------------------------------------------
/gap8_app/images/test_1_out.ppm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/gap8_app/images/test_1_out.ppm
--------------------------------------------------------------------------------
/gap8_app/nntool_scripts/nntool_script_ssdlite:
--------------------------------------------------------------------------------
1 | adjust
2 | fusions --scale8
3 | #nodeoption ADD_0_9 PARALLELFEATURES 0
4 | #nodeoption ADD_0_16 PARALLELFEATURES 0
5 | #nodeoption ADD_0_20 PARALLELFEATURES 0
6 | #nodeoption ADD_0_27 PARALLELFEATURES 0
7 | set graph_size_opt 2
8 | set default_input_exec_location "AT_MEM_L3_HRAM"
9 | set default_input_home_location "AT_MEM_L3_HRAM"
10 | set l3_ram_ext_managed true
11 | set graph_reorder_constant_in false
12 | set graph_const_exec_from_flash false
13 | set graph_monitor_cvar_name SSD_Monitor
14 | set graph_produce_operinfos_cvar_name SSD_Op
15 | set graph_produce_node_cvar_name SSD_Nodes
16 | set graph_produce_node_names true
17 | set graph_produce_operinfos true
18 | set graph_monitor_cycles true
19 |
20 | #set graph_trace_exec true
21 |
22 |
23 | save_state
24 |
--------------------------------------------------------------------------------
/gap8_app/ssd.mk:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2020 GreenWaves Technologies
2 | # All rights reserved.
3 |
4 | # This software may be modified and distributed under the terms
5 | # of the BSD license. See the LICENSE.BSD.md file for details.
6 |
7 | ifndef GAP_SDK_HOME
8 | $(error Source sourceme in gap_sdk first)
9 | endif
10 |
11 | include common.mk
12 |
13 | io=host
14 |
15 | MEAS?=0
16 | QUANT_BITS=8
17 | #BUILD_DIR=BUILD
18 | MODEL_SQ8=1
19 |
20 | $(info Building $(TARGET_CHIP_FAMILY) mode with $(QUANT_BITS) bit quantization)
21 |
22 | NNTOOL_SCRIPT=nntool_scripts/nntool_script_ssdlite
23 | TRAINED_TFLITE_MODEL=tflite_model/$(MODEL_PREFIX_SSD).tflite
24 | MODEL_PREFIX=$(MODEL_PREFIX_SSD)
25 | MODEL_SUFFIX=_SSD
26 | IMAGE=$(CURDIR)/tflite_model/test_1_out.ppm
27 | MAIN=$(MODEL_PREFIX_SSD).c
28 |
29 | MODEL_QUANTIZED =1
30 |
31 | include common/model_decl.mk
32 |
33 | MODEL_GENFLAGS_EXTRA+=
34 | NNTOOL_EXTRA_FLAGS += --use_hard_sigmoid
35 |
36 | CLUSTER_STACK_SIZE?=4096
37 | CLUSTER_SLAVE_STACK_SIZE?=1024
38 | ifeq '$(TARGET_CHIP_FAMILY)' 'GAP9'
39 | TOTAL_STACK_SIZE=$(shell expr $(CLUSTER_STACK_SIZE) \+ $(CLUSTER_SLAVE_STACK_SIZE) \* 8)
40 | FREQ_CL?=50
41 | FREQ_FC?=50
42 | MODEL_L1_MEMORY=$(shell expr 110000 \- $(TOTAL_STACK_SIZE))
43 | MODEL_L2_MEMORY=900000
44 | MODEL_L3_MEMORY=8388608
45 | else
46 | TOTAL_STACK_SIZE=$(shell expr $(CLUSTER_STACK_SIZE) \+ $(CLUSTER_SLAVE_STACK_SIZE) \* 7)
47 | ifeq '$(TARGET_CHIP)' 'GAP8_V3'
48 | FREQ_CL?=175
49 | else
50 | FREQ_CL?=50
51 | endif
52 | FREQ_FC?=250
53 | MODEL_L1_MEMORY=$(shell expr 64000 \- $(TOTAL_STACK_SIZE))
54 | MODEL_L2_MEMORY=250000
55 | MODEL_L3_MEMORY=8388608
56 | endif
57 | # hram - HyperBus RAM
58 | # qspiram - Quad SPI RAM
59 | MODEL_L3_EXEC=hram
60 | # hflash - HyperBus Flash
61 | # qpsiflash - Quad SPI Flash
62 | MODEL_L3_CONST=hflash
63 |
64 | APP = tin_can_ssd
65 | APP_SRCS += $(MAIN) $(MODEL_COMMON_SRCS) $(CNN_LIB)
66 |
67 | APP_CFLAGS += -O3
68 | APP_CFLAGS += -I. -I$(MODEL_COMMON_INC) -I$(TILER_EMU_INC) -I$(TILER_INC) $(CNN_LIB_INCLUDE)
69 | APP_SRCS += BUILD_MODEL_SSD/SSD_tin_can_bottleKernels.c
70 | APP_CFLAGS += -IBUILD_MODEL_SSD
71 |
72 | ifeq ($(platform), gvsoc)
73 | APP_CFLAGS += -DHAVE_LCD #-DPERF
74 | else
75 | ifeq ($(MEAS),1)
76 | APP_CFLAGS += -DSILENT -DMEASUREMENTS
77 | else
78 | APP_CFLAGS += -DHAVE_LCD -DHAVE_HIMAX -DSILENT
79 | endif
80 | endif
81 |
82 |
83 | APP_CFLAGS += -DSTACK_SIZE=$(CLUSTER_STACK_SIZE) -DSLAVE_STACK_SIZE=$(CLUSTER_SLAVE_STACK_SIZE)
84 | APP_CFLAGS += -DAT_IMAGE=$(IMAGE) -DFREQ_CL=$(FREQ_CL) -DFREQ_FC=$(FREQ_FC)
85 | APP_CFLAGS += -DAT_INPUT_HEIGHT_SSD=$(AT_INPUT_HEIGHT_SSD) -DAT_INPUT_WIDTH_SSD=$(AT_INPUT_WIDTH_SSD) -DAT_INPUT_COLORS_SSD=$(AT_INPUT_COLORS_SSD)
86 | #APP_CFLAGS += -DPERF
87 |
88 | READFS_FILES=$(abspath $(MODEL_TENSORS))
89 | PLPBRIDGE_FLAGS += -f
90 |
91 | # all depends on the model
92 | all:: model
93 |
94 |
95 | clean:: clean_model
96 |
97 | at_model_disp:: $(MODEL_BUILD) $(MODEL_GEN_EXE)
98 | $(MODEL_GEN_EXE) -o $(MODEL_BUILD) -c $(MODEL_BUILD) $(MODEL_GEN_EXTRA_FLAGS) --debug=Disp
99 |
100 | at_model:: $(MODEL_BUILD) $(MODEL_GEN_EXE)
101 | $(MODEL_GEN_EXE) -o $(MODEL_BUILD) -c $(MODEL_BUILD) $(MODEL_GEN_EXTRA_FLAGS)
102 |
103 | include common/model_rules.mk
104 | #$(info APP_SRCS... $(APP_SRCS))
105 | #$(info APP_CFLAGS... $(APP_CFLAGS))
106 | include $(RULES_DIR)/pmsis_rules.mk
107 |
--------------------------------------------------------------------------------
/gap8_app/tflite_model/SSD_tin_can_bottle.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/gap8_app/tflite_model/SSD_tin_can_bottle.tflite
--------------------------------------------------------------------------------
/gap8_app/tflite_model/output_camera.ppm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/gap8_app/tflite_model/output_camera.ppm
--------------------------------------------------------------------------------
/gap8_app/tflite_model/output_camera2.ppm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/gap8_app/tflite_model/output_camera2.ppm
--------------------------------------------------------------------------------
/gap8_app/tflite_model/test_1_out.ppm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/gap8_app/tflite_model/test_1_out.ppm
--------------------------------------------------------------------------------
/images/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/images/cover.png
--------------------------------------------------------------------------------
/images/dataset_samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/images/dataset_samples.png
--------------------------------------------------------------------------------
/images/exploration_policies.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pulp-platform/pulp-detector/ad62de3b6432f39a217287940ace1447ef68b78d/images/exploration_policies.png
--------------------------------------------------------------------------------
/training/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Tincan and bottle Detection
4 |
5 | in this repo there are the training, evaluation and deployment scripts used for the deployment of an SSD based object detector, working on Bottles and tin-cans.
6 |
7 | ## Setup
8 |
9 | To use this repo you will need to setup the python environment needed which can be done using the tensorflow1_15.yml file
10 | > conda env create -f tensorflow1_15.yml
11 |
12 | ~~ ~~
13 | # How to train a model in Tensorflow Obj Detection API
14 |
15 | Take a look at [Official TensorFlow Object Detection API Tutorial](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/index.html) to set up the environment and to have more insights.
16 |
17 | Look also to the git repo [README](https://github.com/tensorflow/models/tree/master/research/object_detection) file to have more examples and tutorials.
18 |
19 |
20 | ### OpenImagesV4 Dataset
21 | Open Images is a dataset of ~9M images annotated with image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives. It contains a total of 16M bounding boxes for 600 object classes on 1.9M images, making it the largest existing dataset with object location annotations.
22 |
23 | ### Download a subset of OpenImagesV4 Dataset
24 |
25 | There is a [GitHub Repo](https://github.com/EscVM/OIDv4_ToolKit) that allows to download all the images of this this dataset containing a specific class (and only that class!).The annotations of the images will include just labels and boxes for that class too (for example I downloaded just images of license plates).
26 |
27 | Example of a command for executing OIDv4_ToolKit
28 |
29 | `python3 main.py downloader --classes classes.txt --type_csv all`
30 |
31 | > **--classes** : you specify what classes you want to download (write the corresponding label). if the class name has a space in it , like "polar bear", you must write it with **underscores** "polar_bear". To download multiple classes, you can create a classes.txt file (and give this to the --classes opt) in which each line corresponds to a class name.
32 | >
33 | >**--type_cvs** : you can select "train", "test", "validation", "all". Selecting "all" you will download 3 folders with images divided into train, valid and test sets (so you are downloading all the images available for your class)
34 |
35 | we provide a simple data augmentation utility which attach a black stip of varing length to the rightmost side of an image
36 |
37 |
38 |
39 |
40 | ### TFRecord generation
41 |
42 | There is a [GitHub Repo](https://github.com/zamblauskas/oidv4-toolkit-tfrecord-generator/blob/master/README.md) that gives an easy script for generating the TFRecords of the OIDv4 subset downloaded.
43 |
44 | `python generate-tfrecord.py --classes_file=classes.txt --class_descriptions_file=class-descriptions-boxable.csv --annotations_file=train-annotations-bbox.csv --images_dir=train_images_folder/ --output_file=train.tfrecord`
45 |
46 | **IMPORTANT:** Here the classes.txt file doesn't want underscores instead of white spaces!!! (unlike the dataset downloader OIDv4ToolKit!)
47 | For example, you have to write again "polar bear" instead of "polar_bear".
48 |
49 | ### Label Map
50 |
51 | TensorFlow requires a label map, which namely maps each of the used labels to an integer values. This label map is used both by the training and detection processes.
52 |
53 | The Label Maps for standard datasets can be found in the tensorflowAPI repository at `models/research/object_detection/data`
54 |
55 | The classes included in the label map should be exactly the ones that you are training on. If you set to train on just 1 class, then leave only that class in the label_map.pbtxt file with `id: 1`.
56 |
57 | ### Configuration
58 |
59 | How to setup the training/config_file.config file.
60 |
61 | You can find all the default config files in `models/research/object_detection/samples/configs` folder. Make sure to set correctly all the paths (search for "PATH_TO_BE_CONFIGURED" to find the fields).
62 |
63 | More details on the essential fields to set can be found [here](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/training.html#configuring-a-training-pipeline) and at the "Useful stuff" paragraph.
64 |
65 | we provide the configurations files for both quantization aware training and normal training.
66 |
67 |
68 | ### Metrics
69 |
70 | All the metrics available are declared in `models/research/object_detection/eval_util.py`.
71 |
72 | By default, [COCO metrics](http://cocodataset.org/#detection-eval) are used.
73 |
74 | You can look at the [Tensorflow model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) for reference accuracy of the mAP metric over COCO and OpenImages dataset
75 |
76 | ### Training
77 |
78 | Use the `train_eval_model_main.py` script to train your model. It will save checkpoints and tensorflow events that will keep trace of the training process.
79 |
80 |
81 | Example training command:
82 |
83 | `python train_eval_model_main.py --pipeline_config_path=config/ssd_mobilenet_v2_oid_v4.config --model_dir=training/ --alsologtostderr`
84 |
85 | > **--model_dir** : where checkpoints ad tensorboard logs will be saved
86 |
87 |
88 | ### Testing
89 | Use the `train_eval_model_main.py` script to test your model.
90 |
91 | Example testion command:
92 | ```python train_eval_model_main.py --pipeline_config_path=configs/ssd_mobilenet_v2_oid_v4_copy.config --checkpoint_dir=training/ --run_once```
93 |
94 | > **--checkpoint_dir** : directory where the checkpoints have been saved by default it uses the last.
95 |
96 |
97 | ### FROZEN GRAPH EXPORT
98 |
99 | The checkpoints produced by the training of the neural network requires to be exported in a format that can be converted in tflite for later deployment. First of all you'll need to use the export_tflite_ssd_graph.py python script which you can find in the training directory . An example
100 | `python export_tflite_ssd_graph.py --trained_checkpoint_prefix ./model.ckpt-### --output_directory ./ --pipeline_config_path ./configs/ssd_mobilenet_v2_oid_v4.config`
101 |
102 |
103 | it exports a file called tflite_graph.pb in the directory indicated (the current one)
104 |
105 | ## TFLITE CONVERSION
106 |
107 | the conversion of the frozen graph can be done using the tflite_convert command. usage example:
108 |
109 | `tflite_convert --graph_def_file=tflite_graph.pb --output_file=graph.tflite --inference_type=QUANTIZED_UINT8 --input_arrays=normalized_input_image_tensor --output_arrays=TFLite_Detection_PostProcess,TFLite_Detection_PostProcess:1,TFLite_Detection_PostProcess:2,TFLite_Detection_PostProcess:3 --mean_values=128 --std_dev_values=127.5 --input_shapes=1,240,320,3 --allow_custom_ops --inference_input_type=QUANTIZED_UINT8`
110 |
--------------------------------------------------------------------------------
/training/classes.txt:
--------------------------------------------------------------------------------
1 | Bottle
2 | Tin_can
3 |
--------------------------------------------------------------------------------
/training/configs/oid_v4_label_map_bottle_and_tin_can.pbtxt:
--------------------------------------------------------------------------------
1 | item {
2 | name: "/m/04dr76w"
3 | id: 1
4 | display_name: "Bottle"
5 | }
6 |
7 | item {
8 | name: "/m/02jnhm"
9 | id: 2
10 | display_name: "Tin can"
11 | }
--------------------------------------------------------------------------------
/training/configs/ssd_mobilenet_v2_oid_v4.config:
--------------------------------------------------------------------------------
1 | # SSD with Mobilenet v2 configuration for OpenImages V4 Dataset.
2 | # Users should configure the fine_tune_checkpoint field in the train config as
3 | # well as the label_map_path and input_path fields in the train_input_reader and
4 | # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
5 | # should be configured.
6 |
7 | model {
8 | ssd {
9 | num_classes: 2 # SET HOW MANY CLASSES YOU HAVE
10 | box_coder {
11 | faster_rcnn_box_coder {
12 | y_scale: 10.0
13 | x_scale: 10.0
14 | height_scale: 5.0
15 | width_scale: 5.0
16 | }
17 | }
18 | matcher {
19 | argmax_matcher {
20 | matched_threshold: 0.5
21 | unmatched_threshold: 0.5
22 | ignore_thresholds: false
23 | negatives_lower_than_unmatched: true
24 | force_match_for_each_row: true
25 | }
26 | }
27 | similarity_calculator {
28 | iou_similarity {
29 | }
30 | }
31 | anchor_generator {
32 | ssd_anchor_generator {
33 | num_layers: 6
34 | min_scale: 0.2
35 | max_scale: 0.95
36 | aspect_ratios: 1.0
37 | aspect_ratios: 2.0
38 | aspect_ratios: 0.5
39 | aspect_ratios: 3.0
40 | aspect_ratios: 0.3333
41 | }
42 | }
43 | image_resizer {
44 | fixed_shape_resizer {
45 | height: 240
46 | width: 320
47 | }
48 | }
49 | box_predictor {
50 | convolutional_box_predictor {
51 | min_depth: 0
52 | max_depth: 0
53 | num_layers_before_predictor: 0
54 | use_dropout: false
55 | dropout_keep_probability: 0.8
56 | kernel_size: 1
57 | box_code_size: 4
58 | apply_sigmoid_to_scores: false
59 | conv_hyperparams {
60 | activation: RELU_6,
61 | regularizer {
62 | l2_regularizer {
63 | weight: 0.00004
64 | }
65 | }
66 | initializer {
67 | truncated_normal_initializer {
68 | stddev: 0.03
69 | mean: 0.0
70 | }
71 | }
72 | batch_norm {
73 | train: true,
74 | scale: true,
75 | center: true,
76 | decay: 0.9997,
77 | epsilon: 0.001,
78 | }
79 | }
80 | }
81 | }
82 | feature_extractor {
83 | type: 'ssd_mobilenet_v2' # Set to the name of your chosen pre-trained model
84 | min_depth: 16
85 | depth_multiplier: 1
86 | conv_hyperparams {
87 | activation: RELU_6,
88 | regularizer {
89 | l2_regularizer {
90 | weight: 0.00004
91 | }
92 | }
93 | initializer {
94 | truncated_normal_initializer {
95 | stddev: 0.03
96 | mean: 0.0
97 | }
98 | }
99 | batch_norm {
100 | train: true,
101 | scale: true,
102 | center: true,
103 | decay: 0.9997,
104 | epsilon: 0.001,
105 | }
106 | }
107 | }
108 | loss {
109 | classification_loss {
110 | weighted_sigmoid {
111 | }
112 | }
113 | localization_loss {
114 | weighted_smooth_l1 {
115 | }
116 | }
117 | hard_example_miner {
118 | num_hard_examples: 3000
119 | iou_threshold: 0.99
120 | loss_type: CLASSIFICATION
121 | max_negatives_per_positive: 3
122 | min_negatives_per_image: 3
123 | }
124 | classification_weight: 1.0
125 | localization_weight: 1.0
126 | }
127 | normalize_loss_by_num_matches: true
128 | post_processing {
129 | batch_non_max_suppression {
130 | score_threshold: 1e-5
131 | iou_threshold: 0.5
132 | max_detections_per_class: 100
133 | max_total_detections: 100
134 | }
135 | score_converter: SIGMOID
136 | }
137 | }
138 | }
139 |
140 | train_config: {
141 | batch_size: 24 # Increase/Decrease this value depending on the available memory (Higher values require more memory and vice-versa)
142 | optimizer {
143 | rms_prop_optimizer: {
144 | learning_rate: {
145 | exponential_decay_learning_rate {
146 | initial_learning_rate: 0.0001
147 | decay_steps: 100
148 | decay_factor: 0.95
149 | }
150 | }
151 | momentum_optimizer_value: 0.9
152 | decay: 0.9
153 | epsilon: 1.0
154 | }
155 | }
156 | gradient_clipping_by_norm: 10.0
157 | keep_checkpoint_every_n_hours: 24
158 | fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" # Path to extracted files of pre-trained model ### MODIFY HERE ###
159 | fine_tune_checkpoint_type: "detection" # FIXES ERROR: "ValueError: No variables to save"
160 | from_detection_checkpoint: true # FIXES ERROR: "ValueError: No variables to save" # UPDATE: Doesn't work
161 | load_all_detection_checkpoint_vars: true
162 | #freeze_variables: ".FeatureExtractor."
163 | num_steps: 4500
164 | data_augmentation_options {
165 | random_horizontal_flip {
166 | }
167 | }
168 | data_augmentation_options {
169 | ssd_random_crop {
170 | }
171 | }
172 | data_augmentation_options {
173 | random_rgb_to_gray {
174 | }
175 | }
176 | data_augmentation_options {
177 | random_adjust_brightness {
178 | }
179 | }
180 | }
181 |
182 | train_input_reader: {
183 |
184 |
185 |
186 |
187 | tf_record_input_reader {
188 | input_path:"PATH_TO_BE_CONFIGURED"# Path to training TFRecord file ### MODIFY HERE ###
189 | #input_path: "./labelimg/dataset/data/finetuning-2-train.record"
190 | }
191 | label_map_path: "./configs/oid_v4_label_map_bottle_and_tin_can.pbtxt" # Path to label map file ### MODIFY HERE ###
192 | }
193 |
194 | eval_config: {
195 | # metrics_set: "oid_V2_detection_metrics"
196 | metrics_set: "coco_detection_metrics"
197 | }
198 |
199 | eval_input_reader: {
200 | sample_1_of_n_examples: 10
201 | tf_record_input_reader {
202 | input_path:"PATH_TO_BE_CONFIGURED"# Path to testing TFRecord file ### MODIFY HERE ###
203 | # input_path: "../dataset/open_images_v4_dataset/validation.tfrecord" # Path to testing TFRecord file ### MODIFY HERE ###
204 | # input_path: "../dataset/open_images_v4_dataset/test.tfrecord" # Path to testing TFRecord file ### MODIFY HERE ###
205 | # input_path: "../dataset/open_images_v4_dataset/mytestset_test.record" # Path to my_dataset tfrecord file ### MODIFY HERE ###
206 | }
207 | label_map_path: "./configs/oid_v4_label_map_bottle_and_tin_can.pbtxt" # Path to label map file ### MODIFY HERE ###
208 | shuffle: false
209 | num_readers: 1
210 | }
211 |
212 |
213 | }
214 |
--------------------------------------------------------------------------------
/training/configs/ssd_mobilenet_v2_oid_v4_qat.config:
--------------------------------------------------------------------------------
1 | # SSD with Mobilenet v2 configuration for OpenImages V4 Dataset.
2 | # Users should configure the fine_tune_checkpoint field in the train config as
3 | # well as the label_map_path and input_path fields in the train_input_reader and
4 | # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
5 | # should be configured.
6 |
7 | model {
8 | ssd {
9 | num_classes: 2 # SET HOW MANY CLASSES YOU HAVE
10 | box_coder {
11 | faster_rcnn_box_coder {
12 | y_scale: 10.0
13 | x_scale: 10.0
14 | height_scale: 5.0
15 | width_scale: 5.0
16 | }
17 | }
18 | matcher {
19 | argmax_matcher {
20 | matched_threshold: 0.5
21 | unmatched_threshold: 0.5
22 | ignore_thresholds: false
23 | negatives_lower_than_unmatched: true
24 | force_match_for_each_row: true
25 | }
26 | }
27 | similarity_calculator {
28 | iou_similarity {
29 | }
30 | }
31 | anchor_generator {
32 | ssd_anchor_generator {
33 | num_layers: 6
34 | min_scale: 0.2
35 | max_scale: 0.95
36 | aspect_ratios: 1.0
37 | aspect_ratios: 2.0
38 | aspect_ratios: 0.5
39 | aspect_ratios: 3.0
40 | aspect_ratios: 0.3333
41 | }
42 | }
43 | image_resizer {
44 | fixed_shape_resizer {
45 | height: 240
46 | width: 320
47 | }
48 | }
49 | box_predictor {
50 | convolutional_box_predictor {
51 | min_depth: 0
52 | max_depth: 0
53 | num_layers_before_predictor: 0
54 | use_dropout: false
55 | dropout_keep_probability: 0.8
56 | kernel_size: 1
57 | box_code_size: 4
58 | apply_sigmoid_to_scores: false
59 | conv_hyperparams {
60 | activation: RELU_6,
61 | regularizer {
62 | l2_regularizer {
63 | weight: 0.00004
64 | }
65 | }
66 | initializer {
67 | truncated_normal_initializer {
68 | stddev: 0.03
69 | mean: 0.0
70 | }
71 | }
72 | batch_norm {
73 | train: true,
74 | scale: true,
75 | center: true,
76 | decay: 0.9997,
77 | epsilon: 0.001,
78 | }
79 | }
80 | }
81 | }
82 | feature_extractor {
83 | type: 'ssd_mobilenet_v2' # Set to the name of your chosen pre-trained model
84 | min_depth: 16
85 | depth_multiplier: 1
86 | conv_hyperparams {
87 | activation: RELU_6,
88 | regularizer {
89 | l2_regularizer {
90 | weight: 0.00004
91 | }
92 | }
93 | initializer {
94 | truncated_normal_initializer {
95 | stddev: 0.03
96 | mean: 0.0
97 | }
98 | }
99 | batch_norm {
100 | train: true,
101 | scale: true,
102 | center: true,
103 | decay: 0.9997,
104 | epsilon: 0.001,
105 | }
106 | }
107 | }
108 | loss {
109 | classification_loss {
110 | weighted_sigmoid {
111 | }
112 | }
113 | localization_loss {
114 | weighted_smooth_l1 {
115 | }
116 | }
117 | hard_example_miner {
118 | num_hard_examples: 3000
119 | iou_threshold: 0.99
120 | loss_type: CLASSIFICATION
121 | max_negatives_per_positive: 3
122 | min_negatives_per_image: 3
123 | }
124 | classification_weight: 1.0
125 | localization_weight: 1.0
126 | }
127 | normalize_loss_by_num_matches: true
128 | post_processing {
129 | batch_non_max_suppression {
130 | score_threshold: 1e-5
131 | iou_threshold: 0.5
132 | max_detections_per_class: 100
133 | max_total_detections: 100
134 | }
135 | score_converter: SIGMOID
136 | }
137 | }
138 | }
139 |
140 | train_config: {
141 | batch_size: 24 # Increase/Decrease this value depending on the available memory (Higher values require more memory and vice-versa)
142 | optimizer {
143 | rms_prop_optimizer: {
144 | learning_rate: {
145 | exponential_decay_learning_rate {
146 | initial_learning_rate: 0.0001
147 | decay_steps: 100
148 | decay_factor: 0.95
149 | }
150 | }
151 | momentum_optimizer_value: 0.9
152 | decay: 0.9
153 | epsilon: 1.0
154 | }
155 | }
156 | gradient_clipping_by_norm: 10.0
157 | keep_checkpoint_every_n_hours: 24
158 | fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" # Path to extracted files of pre-trained model ### MODIFY HERE ###
159 | fine_tune_checkpoint_type: "detection" # FIXES ERROR: "ValueError: No variables to save"
160 | from_detection_checkpoint: true # FIXES ERROR: "ValueError: No variables to save" # UPDATE: Doesn't work
161 | load_all_detection_checkpoint_vars: true
162 | #freeze_variables: ".FeatureExtractor."
163 | num_steps: 4500
164 | data_augmentation_options {
165 | random_horizontal_flip {
166 | }
167 | }
168 | data_augmentation_options {
169 | ssd_random_crop {
170 | }
171 | }
172 | data_augmentation_options {
173 | random_rgb_to_gray {
174 | }
175 | }
176 | data_augmentation_options {
177 | random_adjust_brightness {
178 | }
179 | }
180 | }
181 |
182 | train_input_reader: {
183 |
184 |
185 |
186 |
187 | tf_record_input_reader {
188 | input_path: "PATH_TO_BE_CONFIGURED" # Path to training TFRecord file ### MODIFY HERE ###
189 | }
190 | label_map_path: "./configs/oid_v4_label_map_bottle_and_tin_can.pbtxt" # Path to label map file ### MODIFY HERE ###
191 | }
192 |
193 | eval_config: {
194 | # metrics_set: "oid_V2_detection_metrics"
195 | metrics_set: "coco_detection_metrics"
196 | }
197 |
198 | eval_input_reader: {
199 | sample_1_of_n_examples: 10
200 | tf_record_input_reader {
201 | input_path:"PATH_TO_BE_CONFIGURED" ### MODIFY HERE ###
202 | # input_path: "../dataset/open_images_v4_dataset/validation.tfrecord" # Path to testing TFRecord file ### MODIFY HERE ###
203 | # input_path: "../dataset/open_images_v4_dataset/test.tfrecord" # Path to testing TFRecord file ### MODIFY HERE ###
204 | # input_path: "../dataset/open_images_v4_dataset/mytestset_test.record" # Path to my_dataset tfrecord file ### MODIFY HERE ###
205 | }
206 | label_map_path: "./configs/oid_v4_label_map_bottle_and_tin_can.pbtxt" # Path to label map file ### MODIFY HERE ###
207 | shuffle: false
208 | num_readers: 1
209 | }
210 |
211 | graph_rewriter {
212 | quantization {
213 | delay: 0 #number of steps before quantization kicks in
214 | weight_bits: 8 #bits for weights
215 | activation_bits: 8#bits for activations
216 | symmetric: True # set the quantization to be symmetric (default false)
217 | }
218 | }
219 |
--------------------------------------------------------------------------------
/training/export_tflite_ssd_graph.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the LICENSE.Apache.md for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | r"""Exports an SSD detection model to use with tf-lite.
16 |
17 | Outputs file:
18 | * A tflite compatible frozen graph - $output_directory/tflite_graph.pb
19 |
20 | The exported graph has the following input and output nodes.
21 |
22 | Inputs:
23 | 'normalized_input_image_tensor': a float32 tensor of shape
24 | [1, height, width, 3] containing the normalized input image. Note that the
25 | height and width must be compatible with the height and width configured in
26 | the fixed_shape_image resizer options in the pipeline config proto.
27 |
28 | In floating point Mobilenet model, 'normalized_image_tensor' has values
29 | between [-1,1). This typically means mapping each pixel (linearly)
30 | to a value between [-1, 1]. Input image
31 | values between 0 and 255 are scaled by (1/128.0) and then a value of
32 | -1 is added to them to ensure the range is [-1,1).
33 | In quantized Mobilenet model, 'normalized_image_tensor' has values between [0,
34 | 255].
35 | In general, see the `preprocess` function defined in the feature extractor class
36 | in the object_detection/models directory.
37 |
38 | Outputs:
39 | If add_postprocessing_op is true: frozen graph adds a
40 | TFLite_Detection_PostProcess custom op node has four outputs:
41 | detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
42 | locations
43 | detection_classes: a float32 tensor of shape [1, num_boxes]
44 | with class indices
45 | detection_scores: a float32 tensor of shape [1, num_boxes]
46 | with class scores
47 | num_boxes: a float32 tensor of size 1 containing the number of detected boxes
48 | else:
49 | the graph has two outputs:
50 | 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
51 | containing the encoded box predictions.
52 | 'raw_outputs/class_predictions': a float32 tensor of shape
53 | [1, num_anchors, num_classes] containing the class scores for each anchor
54 | after applying score conversion.
55 |
56 | Example Usage:
57 | --------------
58 | python object_detection/export_tflite_ssd_graph \
59 | --pipeline_config_path path/to/ssd_mobilenet.config \
60 | --trained_checkpoint_prefix path/to/model.ckpt \
61 | --output_directory path/to/exported_model_directory
62 |
63 | The expected output would be in the directory
64 | path/to/exported_model_directory (which is created if it does not exist)
65 | with contents:
66 | - tflite_graph.pbtxt
67 | - tflite_graph.pb
68 | Config overrides (see the `config_override` flag) are text protobufs
69 | (also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
70 | certain fields in the provided pipeline_config_path. These are useful for
71 | making small changes to the inference graph that differ from the training or
72 | eval config.
73 |
74 | Example Usage (in which we change the NMS iou_threshold to be 0.5 and
75 | NMS score_threshold to be 0.0):
76 | python object_detection/export_tflite_ssd_graph \
77 | --pipeline_config_path path/to/ssd_mobilenet.config \
78 | --trained_checkpoint_prefix path/to/model.ckpt \
79 | --output_directory path/to/exported_model_directory
80 | --config_override " \
81 | model{ \
82 | ssd{ \
83 | post_processing { \
84 | batch_non_max_suppression { \
85 | score_threshold: 0.0 \
86 | iou_threshold: 0.5 \
87 | } \
88 | } \
89 | } \
90 | } \
91 | "
92 | """
93 |
94 | import tensorflow as tf
95 | from google.protobuf import text_format
96 | import sys
97 |
98 | # Add Tensorflow Object Detection API "models" directory to import libraries: https://github.com/tensorflow/models/
99 | sys.path.append('./external/tensorflow-api/research/')
100 | sys.path.append('./external/tensorflow-api/research/slim/')
101 |
102 | from object_detection import export_tflite_ssd_graph_lib
103 | from object_detection.protos import pipeline_pb2
104 |
105 | flags = tf.app.flags
106 | flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
107 | flags.DEFINE_string(
108 | 'pipeline_config_path', None,
109 | 'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
110 | 'file.')
111 | flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.')
112 | flags.DEFINE_integer('max_detections', 10,
113 | 'Maximum number of detections (boxes) to show.')
114 | flags.DEFINE_integer('max_classes_per_detection', 1,
115 | 'Maximum number of classes to output per detection box.')
116 | flags.DEFINE_integer(
117 | 'detections_per_class', 100,
118 | 'Number of anchors used per class in Regular Non-Max-Suppression.')
119 | flags.DEFINE_bool('add_postprocessing_op', True,
120 | 'Add TFLite custom op for postprocessing to the graph.')
121 | flags.DEFINE_bool(
122 | 'use_regular_nms', False,
123 | 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.')
124 | flags.DEFINE_string(
125 | 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '
126 | 'text proto to override pipeline_config_path.')
127 |
128 | FLAGS = flags.FLAGS
129 |
130 |
131 | def main(argv):
132 | del argv # Unused.
133 | flags.mark_flag_as_required('output_directory')
134 | flags.mark_flag_as_required('pipeline_config_path')
135 | flags.mark_flag_as_required('trained_checkpoint_prefix')
136 |
137 | pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
138 |
139 | with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
140 | text_format.Merge(f.read(), pipeline_config)
141 | text_format.Merge(FLAGS.config_override, pipeline_config)
142 | export_tflite_ssd_graph_lib.export_tflite_graph(
143 | pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory,
144 | FLAGS.add_postprocessing_op, FLAGS.max_detections,
145 | FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms)
146 |
147 |
148 | if __name__ == '__main__':
149 | tf.app.run(main)
150 |
--------------------------------------------------------------------------------
/training/graph_rewriter_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the LICENSE.Apache.md for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Functions for quantized training and evaluation."""
16 |
17 | import tensorflow as tf
18 |
19 |
20 | def build(graph_rewriter_config, is_training):
21 | """Returns a function that modifies default graph based on options.
22 |
23 | Args:
24 | graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
25 | is_training: whether in training of eval mode.
26 | """
27 | def graph_rewrite_fn():
28 | """Function to quantize weights and activation of the default graph."""
29 | if (graph_rewriter_config.quantization.weight_bits != 8 or
30 | graph_rewriter_config.quantization.activation_bits != 8):
31 | raise ValueError('Only 8bit quantization is supported')
32 |
33 | # Quantize the graph by inserting quantize ops for weights and activations
34 | if is_training:
35 | tf.contrib.quantize.experimental_create_training_graph(
36 | input_graph=tf.get_default_graph(),
37 | quant_delay=graph_rewriter_config.quantization.delay,
38 | symmetric=graph_rewriter_config.quantization.symmetric
39 | )
40 | else:
41 | tf.contrib.quantize.experimental_create_eval_graph(
42 | input_graph=tf.get_default_graph(),
43 | symmetric=graph_rewriter_config.quantization.symmetric
44 | )
45 |
46 | tf.contrib.layers.summarize_collection('quant_vars')
47 | return graph_rewrite_fn
48 |
--------------------------------------------------------------------------------
/training/tensorflow1_15.yml:
--------------------------------------------------------------------------------
1 | name: tensorflow1.15
2 | channels:
3 | - conda-forge
4 | - anaconda
5 | - defaults
6 | dependencies:
7 | - _libgcc_mutex=0.1=main
8 | - _tflow_select=2.1.0=gpu
9 | - asn1crypto=1.3.0=py37_0
10 | - astroid=2.3.3=py37_0
11 | - attrs=19.3.0=py_0
12 | - backcall=0.1.0=py37_0
13 | - bleach=3.1.0=py37_0
14 | - bzip2=1.0.8=h7b6447c_0
15 | - c-ares=1.15.0=h516909a_1001
16 | - ca-certificates=2021.10.26=h06a4308_2
17 | - cairo=1.14.12=h8948797_3
18 | - certifi=2021.10.8=py37h06a4308_2
19 | - cffi=1.14.0=py37h2e261b9_0
20 | - chardet=3.0.4=py37_1003
21 | - cmake=3.14.0=h52cb24c_0
22 | - contextlib2=0.6.0.post1=py_0
23 | - cryptography=2.8=py37h1ba5d50_0
24 | - cudatoolkit=10.0.130=0
25 | - cudnn=7.6.5=cuda10.0_0
26 | - cupti=10.0.130=0
27 | - cycler=0.10.0=py37_0
28 | - dbus=1.13.12=h746ee38_0
29 | - decorator=4.4.2=py_0
30 | - defusedxml=0.6.0=py_0
31 | - entrypoints=0.3=py37_0
32 | - expat=2.2.6=he6710b0_0
33 | - ffmpeg=4.0=hcdf2ecd_0
34 | - fontconfig=2.13.0=h9420a91_0
35 | - freeglut=3.0.0=hf484d3e_5
36 | - freetype=2.9.1=h8a8886c_1
37 | - gast=0.2.2=py_0
38 | - glib=2.63.1=h5a9c865_0
39 | - gmp=6.1.2=h6c8ec71_1
40 | - google-pasta=0.2.0=pyh8c360ce_0
41 | - graphite2=1.3.13=h23475e2_0
42 | - gst-plugins-base=1.14.0=hbbd80ab_1
43 | - gstreamer=1.14.0=hb453b48_1
44 | - harfbuzz=1.8.8=hffaf4a1_0
45 | - hdf5=1.10.2=hba1933b_1
46 | - icu=58.2=h9c2bf20_1
47 | - idna=2.9=py_1
48 | - imutils=0.5.3=py37_1
49 | - ipykernel=5.1.4=py37h39e3cac_0
50 | - ipython=7.13.0=py37h5ca1d4c_0
51 | - ipython_genutils=0.2.0=py37_0
52 | - ipywidgets=7.5.1=py_0
53 | - isort=4.3.21=py37_0
54 | - jasper=2.0.14=h07fcdf6_1
55 | - jedi=0.16.0=py37_0
56 | - jinja2=2.11.1=py_0
57 | - jpeg=9b=h024ee3a_2
58 | - jsonschema=3.2.0=py37_0
59 | - jupyter=1.0.0=py37_7
60 | - jupyter_client=6.0.0=py_0
61 | - jupyter_console=6.1.0=py_0
62 | - jupyter_core=4.6.1=py37_0
63 | - keras-applications=1.0.8=py_1
64 | - kiwisolver=1.1.0=py37he6710b0_0
65 | - krb5=1.17.1=h173b8e3_0
66 | - lazy-object-proxy=1.4.3=py37h7b6447c_0
67 | - ld_impl_linux-64=2.33.1=h53a641e_7
68 | - libblas=3.8.0=14_openblas
69 | - libcblas=3.8.0=14_openblas
70 | - libcurl=7.69.1=h20c2e04_0
71 | - libedit=3.1.20181209=hc058e9b_0
72 | - libffi=3.2.1=hd88cf55_4
73 | - libgcc-ng=9.1.0=hdf63c60_0
74 | - libgfortran-ng=7.3.0=hdf63c60_5
75 | - libglu=9.0.0=hf484d3e_1
76 | - liblapack=3.8.0=14_openblas
77 | - libopenblas=0.3.7=h5ec1e0e_6
78 | - libopencv=3.4.2=hb342d67_1
79 | - libopus=1.3=h7b6447c_0
80 | - libpng=1.6.37=hbc83047_0
81 | - libprotobuf=3.11.4=h8b12597_0
82 | - libsodium=1.0.16=h1bed415_0
83 | - libssh2=1.9.0=h1ba5d50_1
84 | - libstdcxx-ng=9.1.0=hdf63c60_0
85 | - libtiff=4.1.0=h2733197_0
86 | - libuuid=1.0.3=h1bed415_2
87 | - libvpx=1.7.0=h439df22_0
88 | - libxcb=1.13=h1bed415_1
89 | - libxml2=2.9.9=hea5a465_1
90 | - libxslt=1.1.33=h7d1a2b0_0
91 | - lxml=4.5.0=py37hefd8a0e_0
92 | - markupsafe=1.1.1=py37h7b6447c_0
93 | - matplotlib=3.1.3=py37_0
94 | - matplotlib-base=3.1.3=py37hef1b27d_0
95 | - mccabe=0.6.1=py37_1
96 | - mistune=0.8.4=py37h7b6447c_0
97 | - nbconvert=5.6.1=py37_0
98 | - nbformat=5.0.4=py_0
99 | - ncurses=6.2=he6710b0_0
100 | - notebook=6.0.3=py37_0
101 | - olefile=0.46=py37_0
102 | - opencv=3.4.2=py37h6fd60c2_1
103 | - openssl=1.1.1m=h7f8727e_0
104 | - pandas=1.0.3=py37h0573a6f_0
105 | - pandoc=2.2.3.2=0
106 | - pandocfilters=1.4.2=py37_1
107 | - parso=0.6.2=py_0
108 | - pcre=8.43=he6710b0_0
109 | - pexpect=4.8.0=py37_0
110 | - pickleshare=0.7.5=py37_0
111 | - pip=20.0.2=py37_1
112 | - pixman=0.38.0=h7b6447c_0
113 | - prometheus_client=0.7.1=py_0
114 | - prompt_toolkit=3.0.3=py_0
115 | - ptyprocess=0.6.0=py37_0
116 | - py-opencv=3.4.2=py37hb342d67_1
117 | - pycparser=2.20=py_0
118 | - pygments=2.6.1=py_0
119 | - pylint=2.4.4=py37_0
120 | - pyopenssl=19.1.0=py37_0
121 | - pyparsing=2.4.6=py_0
122 | - pyqt=5.9.2=py37h05f1152_2
123 | - pyrsistent=0.15.7=py37h7b6447c_0
124 | - pysocks=1.7.1=py37_0
125 | - python=3.7.6=h0371630_2
126 | - python-dateutil=2.8.1=py_0
127 | - python_abi=3.7=1_cp37m
128 | - pytz=2019.3=py_0
129 | - pyzmq=18.1.1=py37he6710b0_0
130 | - qt=5.9.7=h5867ecd_1
131 | - qtconsole=4.7.1=py_0
132 | - qtpy=1.9.0=py_0
133 | - readline=7.0=h7b6447c_5
134 | - requests=2.23.0=py37_0
135 | - rhash=1.3.8=h1ba5d50_0
136 | - scipy=1.4.1=py37h921218d_0
137 | - send2trash=1.5.0=py37_0
138 | - sip=4.19.8=py37hf484d3e_0
139 | - sqlite=3.31.1=h7b6447c_0
140 | - tensorboard=1.15.0=py37_0
141 | - tensorflow-estimator=1.15.1=pyh2649769_0
142 | - terminado=0.8.3=py37_0
143 | - testpath=0.4.4=py_0
144 | - tk=8.6.8=hbc83047_0
145 | - tornado=6.0.4=py37h7b6447c_1
146 | - tqdm=4.43.0=py_0
147 | - traitlets=4.3.3=py37_0
148 | - urllib3=1.25.8=py37_0
149 | - wcwidth=0.1.8=py_0
150 | - webencodings=0.5.1=py37_1
151 | - widgetsnbextension=3.5.1=py37_0
152 | - xz=5.2.4=h14c3975_4
153 | - zeromq=4.3.1=he6710b0_3
154 | - zlib=1.2.11=h7b6447c_3
155 | - zstd=1.3.7=h0b5b093_0
156 | - pip:
157 | - absl-py==1.0.0
158 | - appdirs==1.4.4
159 | - argcomplete==1.10.0
160 | - astor==0.8.1
161 | - audioread==2.1.9
162 | - bfloat16==1.0
163 | - cached-property==1.5.2
164 | - cmd2==1.0.2
165 | - colorama==0.4.4
166 | - cython==0.29.21
167 | - grpcio==1.43.0
168 | - h5py==2.10.0
169 | - imageio==2.13.5
170 | - importlib-metadata==4.10.0
171 | - iniconfig==1.1.1
172 | - iteration-utilities==0.11.0
173 | - joblib==1.1.0
174 | - keras==2.3.1
175 | - keras-preprocessing==1.1.2
176 | - librosa==0.8.1
177 | - llvmlite==0.38.0
178 | - lvis==0.5.3
179 | - markdown==3.3.6
180 | - more-itertools==8.12.0
181 | - netron==5.4.1
182 | - networkx==2.6.3
183 | - numba==0.55.0
184 | - numpy==1.18.5
185 | - onnx==1.8.0
186 | - opencv-python==4.5.4.60
187 | - opt-einsum==3.3.0
188 | - packaging==21.3
189 | - pillow==8.3.2
190 | - pluggy==0.13.1
191 | - pooch==1.5.2
192 | - prettytable==0.7.2
193 | - protobuf==3.19.1
194 | - py==1.11.0
195 | - pycocotools==2.0.3
196 | - pyperclip==1.8.2
197 | - pytest==6.0.1
198 | - python-graphviz==0.16
199 | - pywavelets==1.2.0
200 | - pyyaml==6.0
201 | - resampy==0.2.2
202 | - scikit-image==0.17.2
203 | - scikit-learn==0.21.3
204 | - setuptools==60.5.0
205 | - six==1.16.0
206 | - soundfile==0.10.3.post1
207 | - tensorflow==1.15.5
208 | - tensorflow-gpu==1.15.5
209 | - termcolor==1.1.0
210 | - texttable==1.6.2
211 | - tf-slim==1.1.0
212 | - tifffile==2021.11.2
213 | - toml==0.10.2
214 | - typing==3.7.4.1
215 | - typing-extensions==4.0.1
216 | - werkzeug==2.0.2
217 | - wheel==0.37.1
218 | - wrapt==1.13.3
219 | - xlsxwriter==1.1.5
220 | - xxhash==1.3.0
221 | - zipp==3.7.0
222 | prefix: /home/bomps/anaconda3/envs/tensorflow1.15
223 |
--------------------------------------------------------------------------------
/training/train_eval_model_main.py:
--------------------------------------------------------------------------------
1 | # ==============================================================================
2 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the LICENSE.Apache.md for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 |
17 | # Modified by:
18 | # Lorenzo Lamberti
19 | # Luca Bompani
20 | # Date: 01.04.2023
21 |
22 | # Example commands:
23 | # training: CUDA_VISIBLE_DEVICES=1 python train_eval_model_main.py --model_dir=./training/ --pipeline_config_path=configs/ssd_mobilenet_v2_oid_v4.config --alsologtostderr
24 | # evaluation: CUDA_VISIBLE_DEVICES=1 python train_eval_model_main.py --pipeline_config_path=configs/ssd_mobilenet_v2_oid_v4.config --checkpoint_dir=./trained-inference-graphs/output_inference_graph_320x240.pb --run_once
25 |
26 | """Binary to run train and evaluation on object detection model."""
27 |
28 | from __future__ import absolute_import
29 | from __future__ import division
30 | from __future__ import print_function
31 |
32 |
33 | import os
34 | import sys
35 | # Add Tensorflow Object Detection API "models" directory to import libraries: https://github.com/tensorflow/models/
36 | sys.path.append('./external/tensorflow-api/research/')
37 | sys.path.append('./external/tensorflow-api/research/slim/')
38 | from absl import flags
39 |
40 | # TensorFlow
41 | import tensorflow as tf
42 | # TensorFlow API
43 | from object_detection import model_hparams
44 | from object_detection import model_lib
45 |
46 | flags.DEFINE_string(
47 | 'model_dir', None, 'Path to output model directory '
48 | 'where event and checkpoint files will be written.')
49 | flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
50 | 'file.')
51 | flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
52 | flags.DEFINE_boolean('eval_training_data', False,
53 | 'If training data should be evaluated for this job. Note '
54 | 'that one call only use this in eval-only mode, and '
55 | '`checkpoint_dir` must be supplied.')
56 | flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
57 | 'every n eval input examples, where n is provided.')
58 | flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
59 | 'one of every n train input examples for evaluation, '
60 | 'where n is provided. This is only used if '
61 | '`eval_training_data` is True.')
62 | flags.DEFINE_string(
63 | 'hparams_overrides', None, 'Hyperparameter overrides, '
64 | 'represented as a string containing comma-separated '
65 | 'hparam_name=value pairs.')
66 | flags.DEFINE_string(
67 | 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
68 | '`checkpoint_dir` is provided, this binary operates in eval-only mode, '
69 | 'writing resulting metrics to `model_dir`.')
70 | flags.DEFINE_boolean(
71 | 'run_once', False, 'If running in eval-only mode, whether to run just '
72 | 'one round of eval vs running continuously (default).'
73 | )
74 | FLAGS = flags.FLAGS
75 |
76 |
77 | def main(unused_argv):
78 | flags.mark_flag_as_required('model_dir')
79 | flags.mark_flag_as_required('pipeline_config_path')
80 | config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
81 |
82 | train_and_eval_dict = model_lib.create_estimator_and_inputs(
83 | run_config=config,
84 | hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
85 | pipeline_config_path=FLAGS.pipeline_config_path,
86 | train_steps=FLAGS.num_train_steps,
87 | sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
88 | sample_1_of_n_eval_on_train_examples=(
89 | FLAGS.sample_1_of_n_eval_on_train_examples))
90 | estimator = train_and_eval_dict['estimator']
91 | train_input_fn = train_and_eval_dict['train_input_fn']
92 | eval_input_fns = train_and_eval_dict['eval_input_fns']
93 | eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
94 | predict_input_fn = train_and_eval_dict['predict_input_fn']
95 | train_steps = train_and_eval_dict['train_steps']
96 |
97 | if FLAGS.checkpoint_dir:
98 | if FLAGS.eval_training_data:
99 | name = 'training_data'
100 | input_fn = eval_on_train_input_fn
101 | else:
102 | name = 'validation_data'
103 | # The first eval input will be evaluated.
104 | input_fn = eval_input_fns[0]
105 | if FLAGS.run_once:
106 | estimator.evaluate(input_fn,
107 | steps=None,
108 | checkpoint_path=tf.train.latest_checkpoint(
109 | FLAGS.checkpoint_dir))
110 |
111 | # params = estimator.get_variable_names()
112 | # for p in params:
113 | # print(p, estimator.get_variable_value(p).shape)
114 |
115 | else:
116 | model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,
117 | train_steps, name)
118 | else:
119 | train_spec, eval_specs = model_lib.create_train_and_eval_specs(
120 | train_input_fn,
121 | eval_input_fns,
122 | eval_on_train_input_fn,
123 | predict_input_fn,
124 | train_steps,
125 | eval_on_train_data=False)
126 |
127 | # Currently only a single Eval Spec is allowed.
128 | tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
129 |
130 |
131 | if __name__ == '__main__':
132 | tf.app.run()
133 |
--------------------------------------------------------------------------------
/training/utils/data-augmentation.py:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------#
2 | # Copyright (C) 2023 University of Bologna, Italy, ETH Zürich, Switzerland. #
3 | # All rights reserved. #
4 | # #
5 | # Licensed under the Apache License, Version 2.0 (the "License"); #
6 | # you may not use this file except in compliance with the License. #
7 | # See LICENSE.apache.md in the top directory for details. #
8 | # You may obtain a copy of the License at #
9 | # #
10 | # http://www.apache.org/licenses/LICENSE-2.0 #
11 | # #
12 | # Unless required by applicable law or agreed to in writing, software #
13 | # distributed under the License is distributed on an "AS IS" BASIS, #
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
15 | # See the License for the specific language governing permissions and #
16 | # limitations under the License. #
17 | # #
18 | # File: SSD_tin_can_bottle.py #
19 | # Authors: #
20 | # Lorenzo Lamberti #
21 | # Luca Bompani #
22 | # Manuele Rusci #
23 | # Daniele Palossi #
24 | # Date: 01.04.2023 #
25 | #-------------------------------------------------------------------------------#
26 |
27 | # Description:
28 | # this script generate 5 new images for each image of the tin can's training set (of Open Images)
29 | # by translating each image of a percentage of its lenght (2%, 4%, 6%, 8%, 10%).
30 |
31 | from PIL import Image
32 | import matplotlib.pyplot as plt
33 | import os
34 | import pandas as pd
35 | from copy import copy
36 |
37 | def roll(image, delta):
38 | """Roll an image sideways and append black pixels on the right."""
39 | xsize, ysize = image.size
40 |
41 | delta = int(delta*xsize)
42 |
43 | delta = delta % xsize
44 | if delta == 0: return image
45 |
46 | new_im = Image.new("RGB", (xsize, ysize))
47 | part2 = image.crop((delta, 0, xsize, ysize))
48 | new_im.paste(part2, (0, 0, xsize-delta, ysize))
49 | #part1 = image.crop((0, 0, delta, ysize)) #if you want to paste the cut part on the right instead of black pixels
50 | #image.paste(part1, (xsize-delta, 0, xsize, ysize))
51 | #return image
52 |
53 | return new_im
54 |
55 | annotation = pd.read_csv("annotations.csv") #path to the file with annotations downloaded from Open Images es. "train-annotations-bbox.csv"
56 | total_row = annotation.shape[0]
57 |
58 | print("begin")
59 |
60 | for numer, row in annotation.iterrows():
61 | print(numer, total_row)
62 | if row['LabelName'] == "/m/02jnhm": #this is the string for tin cans in OpenImages, change it to change the object of interest
63 | for i in range(5): #change 5 if you want more or less images
64 | to_append = copy(row)
65 | to_append['ImageID'] = str(i) + to_append['ImageID']
66 | to_append['XMin'] -=(i+1)*0.02
67 | to_append['XMax'] -=(i+1)*0.02
68 |
69 | if to_append['XMin'] > 0:
70 | annotation = annotation.append(to_append, ignore_index=True)
71 | elif to_append['XMin'] < 0 and to_append['XMax'] > 0:
72 | to_append['XMin'] = 0
73 | annotation = annotation.append(to_append, ignore_index=True)
74 | #elif to_append['XMax'] < 0: #if you want to paste the cut part of the image to the right
75 | # to_append['XMin'] = 1 + to_append['XMin'] #use this part to check if the tin can has been split in two parts
76 | # to_append['XMax'] = 1 + to_append['XMax']
77 | # annotation = annotation.append(to_append, ignore_index=True)
78 | #elif to_append['XMin'] < 0 and to_append['XMin'] > -0.09:
79 | # to_append['XMin'] = 0
80 | # annotation = annotation.append(to_append, ignore_index=True)
81 | #else:
82 | # new_to_append = copy(to_append)
83 | # to_append['XMin'] = 0
84 | # annotation = annotation.append(to_append, ignore_index=True)
85 | # new_to_append['XMin'] = 1 + new_to_append['XMin']
86 | # new_to_append['XMax'] = 1
87 | # annotation = annotation.append(new_to_append, ignore_index=True)
88 |
89 | annotation.sort_values('ImageID' , inplace=True)
90 | annotation.to_csv("annotations-da-black-border.csv", index = False)
91 |
92 |
93 | print("middle")
94 |
95 | for image in os.listdir("images"): #directory where images to augment are saved
96 | f = os.path.join("images", image)
97 | # checking if it is a file
98 | if os.path.isfile(f):
99 | for i in range(5):
100 | im = Image.open(os.path.join("images", image))
101 | im2 = roll(im, (i+1)*0.02)
102 | im2.save("images/" + str(i) + image)
103 |
104 | print("end of script")
--------------------------------------------------------------------------------