├── .gitignore
├── LICENSE
├── README.md
├── _config.yml
├── _includes
└── head-custom.html
├── articles.md
├── assets
└── css
│ └── style.scss
├── codes
├── MMDet3d_tutorials
│ ├── 2 inference-camera.ipynb
│ ├── 2 inference-pcd.ipynb
│ ├── 3 KITTI点云数据读取和可视化.ipynb
│ ├── 4 inference_demo.ipynb
│ ├── 5 train.ipynb
│ ├── README.md
│ ├── game.json
│ ├── game.png
│ └── myconfig.py
├── MMEval_tutorials
│ ├── 1-MMEval-介绍.ipynb
│ ├── 2-MMEval-使用.ipynb
│ ├── 3-MMEval-添加评测指标.ipynb
│ └── cifar10_dist_eval
│ │ ├── README.md
│ │ ├── cifar10_eval.py
│ │ ├── cifar10_eval_mpi4py.py
│ │ ├── cifar10_eval_torch_dist.py
│ │ └── train_cifar10.ipynb
├── MMSelfSup_tutorials
│ ├── anno_files
│ │ ├── train.txt
│ │ └── val.txt
│ ├── img
│ │ ├── MAE.png
│ │ ├── SimCLR.png
│ │ └── mmselfsup_logo.png
│ ├── 【1】模型自监督预训练 之 SimCLR.ipynb
│ ├── 【2】图片向量可视化 t-SNE.ipynb
│ ├── 【3】自监督预训练模型的评估:“分类” 下游任务 之 线性评估.ipynb
│ ├── 【4】自监督预训练模型的评估:“分类” 下游任务 之 SVM 评估.ipynb
│ ├── 【5】自监督预训练模型的评估:“检测”下游任务.ipynb
│ ├── 【6】在 MMDetection 中使用自监督预训练模型.ipynb
│ └── 【7】模型自监督预训练 之 MAE.ipynb
├── MMYOLO_tutorials
│ ├── [实用类第二期]10分钟换遍主干网络.ipynb
│ ├── [实用类第四期]顶会第一步·模块自定义.ipynb
│ ├── [工具类第一期]特征图可视化.ipynb
│ └── [工具类第二期]10分钟轻松掌握大图推理.ipynb
├── lec2.ipynb
├── lec3.ipynb
├── lec4.ipynb
├── lec5.ipynb
├── lec6.ipynb
├── lec7.ipynb
└── lec8.ipynb
├── lecture_sjtu.md
├── mmeval.md
├── mmyolo.md
└── model_diagrams.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Attribution-NonCommercial-ShareAlike 4.0 International
2 |
3 | =======================================================================
4 |
5 | Creative Commons Corporation ("Creative Commons") is not a law firm and
6 | does not provide legal services or legal advice. Distribution of
7 | Creative Commons public licenses does not create a lawyer-client or
8 | other relationship. Creative Commons makes its licenses and related
9 | information available on an "as-is" basis. Creative Commons gives no
10 | warranties regarding its licenses, any material licensed under their
11 | terms and conditions, or any related information. Creative Commons
12 | disclaims all liability for damages resulting from their use to the
13 | fullest extent possible.
14 |
15 | Using Creative Commons Public Licenses
16 |
17 | Creative Commons public licenses provide a standard set of terms and
18 | conditions that creators and other rights holders may use to share
19 | original works of authorship and other material subject to copyright
20 | and certain other rights specified in the public license below. The
21 | following considerations are for informational purposes only, are not
22 | exhaustive, and do not form part of our licenses.
23 |
24 | Considerations for licensors: Our public licenses are
25 | intended for use by those authorized to give the public
26 | permission to use material in ways otherwise restricted by
27 | copyright and certain other rights. Our licenses are
28 | irrevocable. Licensors should read and understand the terms
29 | and conditions of the license they choose before applying it.
30 | Licensors should also secure all rights necessary before
31 | applying our licenses so that the public can reuse the
32 | material as expected. Licensors should clearly mark any
33 | material not subject to the license. This includes other CC-
34 | licensed material, or material used under an exception or
35 | limitation to copyright. More considerations for licensors:
36 | wiki.creativecommons.org/Considerations_for_licensors
37 |
38 | Considerations for the public: By using one of our public
39 | licenses, a licensor grants the public permission to use the
40 | licensed material under specified terms and conditions. If
41 | the licensor's permission is not necessary for any reason--for
42 | example, because of any applicable exception or limitation to
43 | copyright--then that use is not regulated by the license. Our
44 | licenses grant only permissions under copyright and certain
45 | other rights that a licensor has authority to grant. Use of
46 | the licensed material may still be restricted for other
47 | reasons, including because others have copyright or other
48 | rights in the material. A licensor may make special requests,
49 | such as asking that all changes be marked or described.
50 | Although not required by our licenses, you are encouraged to
51 | respect those requests where reasonable. More considerations
52 | for the public:
53 | wiki.creativecommons.org/Considerations_for_licensees
54 |
55 | =======================================================================
56 |
57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
58 | Public License
59 |
60 | By exercising the Licensed Rights (defined below), You accept and agree
61 | to be bound by the terms and conditions of this Creative Commons
62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License
63 | ("Public License"). To the extent this Public License may be
64 | interpreted as a contract, You are granted the Licensed Rights in
65 | consideration of Your acceptance of these terms and conditions, and the
66 | Licensor grants You such rights in consideration of benefits the
67 | Licensor receives from making the Licensed Material available under
68 | these terms and conditions.
69 |
70 |
71 | Section 1 -- Definitions.
72 |
73 | a. Adapted Material means material subject to Copyright and Similar
74 | Rights that is derived from or based upon the Licensed Material
75 | and in which the Licensed Material is translated, altered,
76 | arranged, transformed, or otherwise modified in a manner requiring
77 | permission under the Copyright and Similar Rights held by the
78 | Licensor. For purposes of this Public License, where the Licensed
79 | Material is a musical work, performance, or sound recording,
80 | Adapted Material is always produced where the Licensed Material is
81 | synched in timed relation with a moving image.
82 |
83 | b. Adapter's License means the license You apply to Your Copyright
84 | and Similar Rights in Your contributions to Adapted Material in
85 | accordance with the terms and conditions of this Public License.
86 |
87 | c. BY-NC-SA Compatible License means a license listed at
88 | creativecommons.org/compatiblelicenses, approved by Creative
89 | Commons as essentially the equivalent of this Public License.
90 |
91 | d. Copyright and Similar Rights means copyright and/or similar rights
92 | closely related to copyright including, without limitation,
93 | performance, broadcast, sound recording, and Sui Generis Database
94 | Rights, without regard to how the rights are labeled or
95 | categorized. For purposes of this Public License, the rights
96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar
97 | Rights.
98 |
99 | e. Effective Technological Measures means those measures that, in the
100 | absence of proper authority, may not be circumvented under laws
101 | fulfilling obligations under Article 11 of the WIPO Copyright
102 | Treaty adopted on December 20, 1996, and/or similar international
103 | agreements.
104 |
105 | f. Exceptions and Limitations means fair use, fair dealing, and/or
106 | any other exception or limitation to Copyright and Similar Rights
107 | that applies to Your use of the Licensed Material.
108 |
109 | g. License Elements means the license attributes listed in the name
110 | of a Creative Commons Public License. The License Elements of this
111 | Public License are Attribution, NonCommercial, and ShareAlike.
112 |
113 | h. Licensed Material means the artistic or literary work, database,
114 | or other material to which the Licensor applied this Public
115 | License.
116 |
117 | i. Licensed Rights means the rights granted to You subject to the
118 | terms and conditions of this Public License, which are limited to
119 | all Copyright and Similar Rights that apply to Your use of the
120 | Licensed Material and that the Licensor has authority to license.
121 |
122 | j. Licensor means the individual(s) or entity(ies) granting rights
123 | under this Public License.
124 |
125 | k. NonCommercial means not primarily intended for or directed towards
126 | commercial advantage or monetary compensation. For purposes of
127 | this Public License, the exchange of the Licensed Material for
128 | other material subject to Copyright and Similar Rights by digital
129 | file-sharing or similar means is NonCommercial provided there is
130 | no payment of monetary compensation in connection with the
131 | exchange.
132 |
133 | l. Share means to provide material to the public by any means or
134 | process that requires permission under the Licensed Rights, such
135 | as reproduction, public display, public performance, distribution,
136 | dissemination, communication, or importation, and to make material
137 | available to the public including in ways that members of the
138 | public may access the material from a place and at a time
139 | individually chosen by them.
140 |
141 | m. Sui Generis Database Rights means rights other than copyright
142 | resulting from Directive 96/9/EC of the European Parliament and of
143 | the Council of 11 March 1996 on the legal protection of databases,
144 | as amended and/or succeeded, as well as other essentially
145 | equivalent rights anywhere in the world.
146 |
147 | n. You means the individual or entity exercising the Licensed Rights
148 | under this Public License. Your has a corresponding meaning.
149 |
150 |
151 | Section 2 -- Scope.
152 |
153 | a. License grant.
154 |
155 | 1. Subject to the terms and conditions of this Public License,
156 | the Licensor hereby grants You a worldwide, royalty-free,
157 | non-sublicensable, non-exclusive, irrevocable license to
158 | exercise the Licensed Rights in the Licensed Material to:
159 |
160 | a. reproduce and Share the Licensed Material, in whole or
161 | in part, for NonCommercial purposes only; and
162 |
163 | b. produce, reproduce, and Share Adapted Material for
164 | NonCommercial purposes only.
165 |
166 | 2. Exceptions and Limitations. For the avoidance of doubt, where
167 | Exceptions and Limitations apply to Your use, this Public
168 | License does not apply, and You do not need to comply with
169 | its terms and conditions.
170 |
171 | 3. Term. The term of this Public License is specified in Section
172 | 6(a).
173 |
174 | 4. Media and formats; technical modifications allowed. The
175 | Licensor authorizes You to exercise the Licensed Rights in
176 | all media and formats whether now known or hereafter created,
177 | and to make technical modifications necessary to do so. The
178 | Licensor waives and/or agrees not to assert any right or
179 | authority to forbid You from making technical modifications
180 | necessary to exercise the Licensed Rights, including
181 | technical modifications necessary to circumvent Effective
182 | Technological Measures. For purposes of this Public License,
183 | simply making modifications authorized by this Section 2(a)
184 | (4) never produces Adapted Material.
185 |
186 | 5. Downstream recipients.
187 |
188 | a. Offer from the Licensor -- Licensed Material. Every
189 | recipient of the Licensed Material automatically
190 | receives an offer from the Licensor to exercise the
191 | Licensed Rights under the terms and conditions of this
192 | Public License.
193 |
194 | b. Additional offer from the Licensor -- Adapted Material.
195 | Every recipient of Adapted Material from You
196 | automatically receives an offer from the Licensor to
197 | exercise the Licensed Rights in the Adapted Material
198 | under the conditions of the Adapter's License You apply.
199 |
200 | c. No downstream restrictions. You may not offer or impose
201 | any additional or different terms or conditions on, or
202 | apply any Effective Technological Measures to, the
203 | Licensed Material if doing so restricts exercise of the
204 | Licensed Rights by any recipient of the Licensed
205 | Material.
206 |
207 | 6. No endorsement. Nothing in this Public License constitutes or
208 | may be construed as permission to assert or imply that You
209 | are, or that Your use of the Licensed Material is, connected
210 | with, or sponsored, endorsed, or granted official status by,
211 | the Licensor or others designated to receive attribution as
212 | provided in Section 3(a)(1)(A)(i).
213 |
214 | b. Other rights.
215 |
216 | 1. Moral rights, such as the right of integrity, are not
217 | licensed under this Public License, nor are publicity,
218 | privacy, and/or other similar personality rights; however, to
219 | the extent possible, the Licensor waives and/or agrees not to
220 | assert any such rights held by the Licensor to the limited
221 | extent necessary to allow You to exercise the Licensed
222 | Rights, but not otherwise.
223 |
224 | 2. Patent and trademark rights are not licensed under this
225 | Public License.
226 |
227 | 3. To the extent possible, the Licensor waives any right to
228 | collect royalties from You for the exercise of the Licensed
229 | Rights, whether directly or through a collecting society
230 | under any voluntary or waivable statutory or compulsory
231 | licensing scheme. In all other cases the Licensor expressly
232 | reserves any right to collect such royalties, including when
233 | the Licensed Material is used other than for NonCommercial
234 | purposes.
235 |
236 |
237 | Section 3 -- License Conditions.
238 |
239 | Your exercise of the Licensed Rights is expressly made subject to the
240 | following conditions.
241 |
242 | a. Attribution.
243 |
244 | 1. If You Share the Licensed Material (including in modified
245 | form), You must:
246 |
247 | a. retain the following if it is supplied by the Licensor
248 | with the Licensed Material:
249 |
250 | i. identification of the creator(s) of the Licensed
251 | Material and any others designated to receive
252 | attribution, in any reasonable manner requested by
253 | the Licensor (including by pseudonym if
254 | designated);
255 |
256 | ii. a copyright notice;
257 |
258 | iii. a notice that refers to this Public License;
259 |
260 | iv. a notice that refers to the disclaimer of
261 | warranties;
262 |
263 | v. a URI or hyperlink to the Licensed Material to the
264 | extent reasonably practicable;
265 |
266 | b. indicate if You modified the Licensed Material and
267 | retain an indication of any previous modifications; and
268 |
269 | c. indicate the Licensed Material is licensed under this
270 | Public License, and include the text of, or the URI or
271 | hyperlink to, this Public License.
272 |
273 | 2. You may satisfy the conditions in Section 3(a)(1) in any
274 | reasonable manner based on the medium, means, and context in
275 | which You Share the Licensed Material. For example, it may be
276 | reasonable to satisfy the conditions by providing a URI or
277 | hyperlink to a resource that includes the required
278 | information.
279 | 3. If requested by the Licensor, You must remove any of the
280 | information required by Section 3(a)(1)(A) to the extent
281 | reasonably practicable.
282 |
283 | b. ShareAlike.
284 |
285 | In addition to the conditions in Section 3(a), if You Share
286 | Adapted Material You produce, the following conditions also apply.
287 |
288 | 1. The Adapter's License You apply must be a Creative Commons
289 | license with the same License Elements, this version or
290 | later, or a BY-NC-SA Compatible License.
291 |
292 | 2. You must include the text of, or the URI or hyperlink to, the
293 | Adapter's License You apply. You may satisfy this condition
294 | in any reasonable manner based on the medium, means, and
295 | context in which You Share Adapted Material.
296 |
297 | 3. You may not offer or impose any additional or different terms
298 | or conditions on, or apply any Effective Technological
299 | Measures to, Adapted Material that restrict exercise of the
300 | rights granted under the Adapter's License You apply.
301 |
302 |
303 | Section 4 -- Sui Generis Database Rights.
304 |
305 | Where the Licensed Rights include Sui Generis Database Rights that
306 | apply to Your use of the Licensed Material:
307 |
308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right
309 | to extract, reuse, reproduce, and Share all or a substantial
310 | portion of the contents of the database for NonCommercial purposes
311 | only;
312 |
313 | b. if You include all or a substantial portion of the database
314 | contents in a database in which You have Sui Generis Database
315 | Rights, then the database in which You have Sui Generis Database
316 | Rights (but not its individual contents) is Adapted Material,
317 | including for purposes of Section 3(b); and
318 |
319 | c. You must comply with the conditions in Section 3(a) if You Share
320 | all or a substantial portion of the contents of the database.
321 |
322 | For the avoidance of doubt, this Section 4 supplements and does not
323 | replace Your obligations under this Public License where the Licensed
324 | Rights include other Copyright and Similar Rights.
325 |
326 |
327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability.
328 |
329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
339 |
340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
349 |
350 | c. The disclaimer of warranties and limitation of liability provided
351 | above shall be interpreted in a manner that, to the extent
352 | possible, most closely approximates an absolute disclaimer and
353 | waiver of all liability.
354 |
355 |
356 | Section 6 -- Term and Termination.
357 |
358 | a. This Public License applies for the term of the Copyright and
359 | Similar Rights licensed here. However, if You fail to comply with
360 | this Public License, then Your rights under this Public License
361 | terminate automatically.
362 |
363 | b. Where Your right to use the Licensed Material has terminated under
364 | Section 6(a), it reinstates:
365 |
366 | 1. automatically as of the date the violation is cured, provided
367 | it is cured within 30 days of Your discovery of the
368 | violation; or
369 |
370 | 2. upon express reinstatement by the Licensor.
371 |
372 | For the avoidance of doubt, this Section 6(b) does not affect any
373 | right the Licensor may have to seek remedies for Your violations
374 | of this Public License.
375 |
376 | c. For the avoidance of doubt, the Licensor may also offer the
377 | Licensed Material under separate terms or conditions or stop
378 | distributing the Licensed Material at any time; however, doing so
379 | will not terminate this Public License.
380 |
381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
382 | License.
383 |
384 |
385 | Section 7 -- Other Terms and Conditions.
386 |
387 | a. The Licensor shall not be bound by any additional or different
388 | terms or conditions communicated by You unless expressly agreed.
389 |
390 | b. Any arrangements, understandings, or agreements regarding the
391 | Licensed Material not stated herein are separate from and
392 | independent of the terms and conditions of this Public License.
393 |
394 |
395 | Section 8 -- Interpretation.
396 |
397 | a. For the avoidance of doubt, this Public License does not, and
398 | shall not be interpreted to, reduce, limit, restrict, or impose
399 | conditions on any use of the Licensed Material that could lawfully
400 | be made without permission under this Public License.
401 |
402 | b. To the extent possible, if any provision of this Public License is
403 | deemed unenforceable, it shall be automatically reformed to the
404 | minimum extent necessary to make it enforceable. If the provision
405 | cannot be reformed, it shall be severed from this Public License
406 | without affecting the enforceability of the remaining terms and
407 | conditions.
408 |
409 | c. No term or condition of this Public License will be waived and no
410 | failure to comply consented to unless expressly agreed to by the
411 | Licensor.
412 |
413 | d. Nothing in this Public License constitutes or may be interpreted
414 | as a limitation upon, or waiver of, any privileges and immunities
415 | that apply to the Licensor or You, including from the legal
416 | processes of any jurisdiction or authority.
417 |
418 | =======================================================================
419 |
420 | Creative Commons is not a party to its public
421 | licenses. Notwithstanding, Creative Commons may elect to apply one of
422 | its public licenses to material it publishes and in those instances
423 | will be considered the “Licensor.” The text of the Creative Commons
424 | public licenses is dedicated to the public domain under the CC0 Public
425 | Domain Dedication. Except for the limited purpose of indicating that
426 | material is shared under a Creative Commons public license or as
427 | otherwise permitted by the Creative Commons policies published at
428 | creativecommons.org/policies, Creative Commons does not authorize the
429 | use of the trademark "Creative Commons" or any other trademark or logo
430 | of Creative Commons without its prior written consent including,
431 | without limitation, in connection with any unauthorized modifications
432 | to any of its public licenses or any other arrangements,
433 | understandings, or agreements concerning use of licensed material. For
434 | the avoidance of doubt, this paragraph does not form part of the
435 | public licenses.
436 |
437 | Creative Commons may be contacted at creativecommons.org.
438 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OpenMMLabCourse
2 |
3 | This repository hosts articles, lectures and tutorials on computer vision and OpenMMLab, helping learners to understand algorithms and systematically master our toolboxes.
4 |
5 | OpenMMLab team owns the copyright of all these articles, videos and tutorial codes. Please contact [openmmlab at gmail.com] for collaborations if you hope to deliver courses in universities based on our materials.
6 |
7 | ## News
8 |
9 | - 2023.05 我们邀请了上海交通大学电院计算机系教授、博士生导师卢策吾老师讲解了人体姿态估计算法课程,包含 2D/3D 姿态估计 以及 DensePose 与 Body Mesh 相关内容,视频见《公开课》列表
10 | - 2023.05 上海交大OpenMMLab校企选修课顺利结课
11 | - 2023.05 我们邀请了清华大学交叉信息研究院的赵行老师讲解了 3D 目标检测的算法课程,视频见《公开课》列表
12 | - 2022.10 由上海交通大学学生创新中心联合商汤科技与上海人工智能实验室联合打造的 [《OpenMMLab实践公开课》](https://mp.weixin.qq.com/s/8yztK5qu9-7cXCF1WK441g) 圆满结课,课程资料详见 [上海交大×商汤科技《OpenMMLab实践公开课》](./lecture_sjtu.md)
13 |
14 | ## 《通用视觉框架 OpenMMLab》 公开课
15 |
16 | | | 讲座内容 | 讲座视频 | 代码教学 | 课程中的代码 |
17 | | :----: | :-----------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------: |
18 | | 第1讲 | [计算机视觉 OpenMMLab 概述](https://www.bilibili.com/video/BV1R341117FJ/) | [](https://www.bilibili.com/video/BV1R341117FJ/) | | |
19 | | 第2讲 | [图像分类与 MMClassification](https://www.bilibili.com/video/BV1J341127nQ/) | [](https://www.bilibili.com/video/BV1J341127nQ/) | [](https://www.bilibili.com/video/BV1J341127nQ?p=7) | [lec2.ipynb](codes/lec2.ipynb) |
20 | | 第3讲 | [目标检测与 MMDetection (上)](https://www.bilibili.com/video/BV1Vv411A7ZM/) | [](https://www.bilibili.com/video/BV1Vv411A7ZM/) | [](https://www.bilibili.com/video/BV1Vv411A7ZM?p=5) | [lec3.ipynb](codes/lec3.ipynb) |
21 | | 第4讲 | [目标检测与 MMDetection (下)](https://www.bilibili.com/video/BV1bM4y1g7Hf/) | [](https://www.bilibili.com/video/BV1bM4y1g7Hf/) | [](https://www.bilibili.com/video/BV1bM4y1g7Hf?p=5) | [lec4.ipynb](codes/lec4.ipynb) |
22 | | 第5讲 | [语义分割与 MMSegmentation](https://www.bilibili.com/video/BV1944y1b76p/) | [](https://www.bilibili.com/video/BV1944y1b76p/) | [](https://www.bilibili.com/video/BV1944y1b76p?p=5) | [lec5.ipynb](codes/lec5.ipynb) |
23 | | 第6讲 | [底层视觉与 MMEditing(上)](https://www.bilibili.com/video/BV1zq4y1o7ph/) | [](https://www.bilibili.com/video/BV1zq4y1o7ph/) | [](https://www.bilibili.com/video/BV1zq4y1o7ph?p=5) | [lec6.ipynb](codes/lec6.ipynb) |
24 | | 第7讲 | [底层视觉与 MMEditing(下)](https://www.bilibili.com/video/BV1cQ4y167KL/) | [](https://www.bilibili.com/video/BV1cQ4y167KL/) | [](https://www.bilibili.com/video/BV1cQ4y167KL?p=4) | [lec7.ipynb](codes/lec7.ipynb) |
25 | | 第8讲 | [视频理解与 MMAction2](https://www.bilibili.com/video/BV1h34y1D7QH/) | [](https://www.bilibili.com/video/BV1h34y1D7QH) | [](https://www.bilibili.com/video/BV1h34y1D7QH?p=5) | [lec8.ipynb](codes/lec8.ipynb) |
26 | | 第9讲 | [3D目标检测与 MMDetection3D](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=895668) | [](https://www.bilibili.com/video/BV1Sc411K7L1) | [](https://www.bilibili.com/video/BV1aG4y197is) | [MMDet3D Tutorials](codes/MMDet3d_tutorials) |
27 | | 第10讲 | [生成对抗网络与 MMGeneration](https://space.bilibili.com/1900783/channel/collectiondetail?sid=367182) | In coming | [](https://www.bilibili.com/video/BV1bY4y147kz/) | [MMGeneration Tutorials](https://github.com/TommyZihao/MMGeneration_Tutorials) |
28 | | 第11讲 | [光学字符识别与 MMOCR](https://space.bilibili.com/1900783/channel/collectiondetail?sid=292930) | [](https://www.bilibili.com/video/BV1Ly4y1c7yw/) | [](https://www.bilibili.com/video/BV1Ua411x7dB/) | [MMOCR tutorials](https://github.com/TommyZihao/MMOCR_tutorials) |
29 | | 第12讲 | [人体关键点检测与 MMPose](https://space.bilibili.com/1900783/channel/collectiondetail?sid=552719) | [](https://www.bilibili.com/video/BV1kk4y1L7Xb/) | [](https://www.bilibili.com/video/BV16B4y1h7JS/) | [MMPose Tutorials](https://github.com/TommyZihao/MMPose_Tutorials) |
30 | | 第13讲 | [视频目标感知与 MMTracking](https://space.bilibili.com/1900783/channel/collectiondetail?sid=356479) | In coming | [](https://www.bilibili.com/video/BV1za411Y7Zm/) | [MMTracking Tutorials](https://github.com/TommyZihao/MMTracking_Tutorials) |
31 | | 第14讲 | 光流估计与 MMFlow | In coming | | |
32 | | 第15讲 | [自监督学习与 MMselfSup](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287) | [](https://www.bilibili.com/video/BV1iu4y1e7tV/) | [](https://www.bilibili.com/video/BV1hg411r7iK) | [MMSelfSup tutorials](codes/MMSelfSup_tutorials) |
33 | | 第16讲 | 模型压缩与 MMRazor | In coming | | |
34 | | 第17讲 | [模型部署与 MMDeploy](https://www.bilibili.com/video/BV1Tx4y1F768) | [](https://www.bilibili.com/video/BV1Tx4y1F768) | [](https://www.bilibili.com/video/BV1yX4y1X7jp) | [MMDeploy Tutorials](https://github.com/TommyZihao/MMDeploy_Tutorials) |
35 |
36 | ## MMYOLO 系列视频
37 |
38 | MMYOLO 系列视频是 MMYOLO 开发者和社区小伙伴们一起录制和分享的教学视频,希望能对您学习和使用 MMYOLO 有一定帮助。
39 | 详情见 [MMYOLO 系列视频](mmyolo.md)
40 |
41 | ## MMEval 系列视频
42 |
43 | MMEval 是一个跨框架算法评测库,MMEval 系列视频将会给大家介绍解读 MMEval 相关的内容。
44 | 详情见 [MMEval 系列视频](mmeval.md)
45 |
46 | ## 技术文章
47 |
48 | - [1. 基础架构 & MMCV](./articles.md/#1-基础架构--mmcv)
49 | - [2. 目标检测 & MMDetection](./articles.md/#2-目标检测--mmdetection)
50 | - [3. 3D目标检测 & MMDetection3D](./articles.md/#3-3d目标检测--mmdetection3d)
51 | - [4. 旋转框检测 & MMRotate](./articles.md/#4-旋转框检测--mmrotate)
52 | - [5. 图像分类 & MMClassification](./articles.md/#5-图像分类--mmclassification)
53 | - [6. 语义分割 & MMSegmentation](./articles.md/#6-语义分割--mmsegmentation)
54 | - [7. 生成模型 & MMGeneration](./articles.md/#7-生成模型--mmgeneration)
55 | - [8. 底层视觉 & MMEditing](./articles.md/#8-底层视觉--mmediting)
56 | - [9. 姿态估计 & MMPose](./articles.md/#9-姿态估计--mmpose)
57 | - [10. 人体参数化模型 & MMHuman3D](./articles.md/#10-人体参数化模型--mmhuman3d)
58 | - [11. 行为理解 & MMAction2](./articles.md/#11-行为理解--mmaction2)
59 | - [12. 视频追踪 & MMTracking](./articles.md/#12-视频追踪--mmtracking)
60 | - [13. 光学字符识别 & MMOCR](./articles.md/#13-光学字符识别--mmocr)
61 | - [14. 光流估计 & MMFlow](./articles.md/#14-光流估计--mmflow)
62 | - [15. 少样本学习 & MMFewShot](./articles.md/#15-少样本学习--mmfewshot)
63 | - [16. 自监督学习 & MMSelfSup](./articles.md/#16-自监督学习--mmselfsup)
64 | - [17. 模型压缩 & MMRazor](./articles.md/#17-模型压缩--mmrazor)
65 | - [18. 模型部署 & MMdeploy](./articles.md/#18-模型部署--mmdeploy)
66 | - [19. Python & Pytorch 底层机制解读](./articles.md/#19-python--pytorch-底层机制解读)
67 |
68 |
69 |
70 | ## 项目案例教学
71 |
72 | | 项目内容 | 代码教学 |
73 | | :----------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: |
74 | | [人像自拍秒变爆款动漫画风](https://www.bilibili.com/video/BV1XL4y1g7in/) | [](https://www.bilibili.com/video/BV1XL4y1g7in/) |
75 | | [CycleGAN照片转梵高莫奈油画](https://www.bilibili.com/video/BV1wv4y1T71F/) | [](https://www.bilibili.com/video/BV1wv4y1T71F/) |
76 | | [单目标追踪:蜜蜂、台球追踪](https://www.bilibili.com/video/BV1s44y1g75J) | [MMTracking_Tutorials](https://github.com/TommyZihao/MMTracking_Tutorials) |
77 | | [多目标追踪:人流量计数+足迹追踪](https://www.bilibili.com/video/BV1J3411M7KQ) | [MMTracking_Tutorials](https://github.com/TommyZihao/MMTracking_Tutorials) |
78 | | 图像分类 MMClassification大项目 | [MMClassificaiton_Tutorials](https://github.com/TommyZihao/MMClassification_Tutorials) |
79 | | 目标检测 MMDetection大项目 | [MMDetection_Tutorials](https://github.com/TommyZihao/MMDetection_Tutorials) |
80 |
81 |
82 |
83 | ## 校园讲座
84 |
85 | | | 讲座内容 | 讲座视频 |
86 | | :---: | :------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: |
87 | | 1 | [OpenMMLab @ 华东理工大学 - 计算机视觉概述与OpenMMLab应用](https://www.bilibili.com/video/BV1Gb4y1B7D4/) | [](https://www.bilibili.com/video/BV1Gb4y1B7D4/) |
88 | | 2 | [OpenMMLab @ 上海交通大学 - 图像分类与目标检测算法综述](https://www.bilibili.com/video/BV1ou411k7fD/) | [](https://www.bilibili.com/video/BV1ou411k7fD/) |
89 | | 3 | [OpenMMLab @ 上海交通大学 - 手把手带你跑通MMDetection](https://www.bilibili.com/video/BV1NL4y1c7ki/) | [](https://www.bilibili.com/video/BV1NL4y1c7ki/) |
90 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-slate
2 | show_downloads: false
3 | google_analytics:
4 | plugins:
5 | - jekyll-readme-index
--------------------------------------------------------------------------------
/_includes/head-custom.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | {% include head-custom-google-analytics.html %}
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/assets/css/style.scss:
--------------------------------------------------------------------------------
1 | ---
2 | ---
3 |
4 | @import "{{ site.theme }}";
5 |
6 | td {
7 | vertical-align: middle !important;
8 | }
9 |
10 | .inner {
11 | max-width: 960px !important;
12 | }
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/2 inference-camera.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# 使用预训练模型、基于单目图像检测场景中的物体"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## 使用 Python API"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "预先下载一个配置文件和预训练模型,保存到 checkpoints 文件夹\n",
22 | "\n",
23 | "这里给出的是SMKOE和FCOS3D各一个例子模型"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "!mim download mmdet3d --config smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d --dest checkpoints\n",
33 | "!curl -sLO https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth\n",
34 | "!mv fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth checkpoints/"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "准备一幅包含汽车、行人的街景图象,为了方便我们使用 `mmdet3d/demo/data` 下提供的一个 nuscenes 数据集中的图像文件"
42 | ]
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "metadata": {},
47 | "source": [
48 | "如果希望尝试更多数据,我们还 KITTI 数据集中裁剪了一个子集用于展示,下载解压后,点云文件储存在 `data/kitti/training/velodyne/` 和 `data/kitti/testing/velodyne/` 目录下"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "# !curl -sL -o kitti_tiny_3D.zip \"https://onedrive.live.com/download?resid=CB1C03091115D5EA%21119&authkey=!AO57a1ru2Tz2jHQ\"\n",
58 | "# !unzip -d data/kitti kitti_tiny_3D.zip"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "初始化模型并执行推理"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {
72 | "pycharm": {
73 | "is_executing": false
74 | }
75 | },
76 | "outputs": [],
77 | "source": [
78 | "from mmdet3d.apis import init_model\n",
79 | "\n",
80 | "# config_file = 'configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py'\n",
81 | "# checkpoint_file = 'checkpoints/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth'\n",
82 | "\n",
83 | "config_file = 'configs/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d.py'\n",
84 | "checkpoint_file = 'checkpoints/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth'\n",
85 | "\n",
86 | "model = init_model(config_file, checkpoint_file, device='cuda:0')"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {
93 | "pycharm": {
94 | "is_executing": false
95 | }
96 | },
97 | "outputs": [],
98 | "source": [
99 | "# test a single sample\n",
100 | "\n",
101 | "from mmdet3d.apis import inference_mono_3d_detector\n",
102 | "image = 'demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg'\n",
103 | "ann = 'demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d.coco.json'\n",
104 | "result, data = inference_mono_3d_detector(model, image, ann)"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {
111 | "pycharm": {
112 | "is_executing": false
113 | }
114 | },
115 | "outputs": [],
116 | "source": [
117 | "# show the results\n",
118 | "\n",
119 | "from mmdet3d.apis import show_result_meshlab\n",
120 | "out_dir = './'\n",
121 | "show_result_meshlab(data, result, out_dir, show=True, score_thr=0.1, task='mono-det')"
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "metadata": {},
127 | "source": [
128 | "## 使用 demo 程序"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "metadata": {},
134 | "source": [
135 | "Demo 程序命令行参数\n",
136 | "\n",
137 | "```\n",
138 | "python demo/mono_det_demo.py ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--out-dir ${OUT_DIR}] [--show]\n",
139 | "```"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "!python demo/mono_det_demo.py \\\n",
149 | " data/kitti/testing/image_2/000002.png \\\n",
150 | " data/kitti/kitti_infos_test_mono3d.coco.json \\\n",
151 | " configs/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d.py \\\n",
152 | " checkpoints/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth \\\n",
153 | " --show"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "!python demo/mono_det_demo.py \\\n",
163 | " demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg \\\n",
164 | " demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525_mono3d.coco.json \\\n",
165 | " configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py \\\n",
166 | " .\\checkpoints\\fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth \\\n",
167 | " --show"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "metadata": {},
173 | "source": [
174 | "### 推理自己的图像"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 1,
180 | "metadata": {
181 | "pycharm": {
182 | "is_executing": false
183 | }
184 | },
185 | "outputs": [
186 | {
187 | "name": "stderr",
188 | "output_type": "stream",
189 | "text": [
190 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torchvision\\io\\image.py:11: UserWarning: Failed to load image Python extension: Could not find module 'C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\Lib\\site-packages\\torchvision\\image.pyd' (or one of its dependencies). Try using the full path with constructor syntax.\n",
191 | " warn(f\"Failed to load image Python extension: {e}\")\n"
192 | ]
193 | },
194 | {
195 | "name": "stdout",
196 | "output_type": "stream",
197 | "text": [
198 | "load checkpoint from local path: checkpoints/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth\n"
199 | ]
200 | }
201 | ],
202 | "source": [
203 | "from mmdet3d.apis import init_model\n",
204 | "\n",
205 | "config_file = 'configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py'\n",
206 | "checkpoint_file = 'checkpoints/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_20210715_235813-4bed5239.pth'\n",
207 | "\n",
208 | "# config_file = 'configs/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d.py'\n",
209 | "# checkpoint_file = 'checkpoints/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth'\n",
210 | "\n",
211 | "model = init_model(config_file, checkpoint_file, device='cuda:0')"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {
218 | "pycharm": {
219 | "is_executing": false
220 | }
221 | },
222 | "outputs": [],
223 | "source": [
224 | "# test a single sample\n",
225 | "\n",
226 | "from mmdet3d.apis import inference_mono_3d_detector\n",
227 | "image = 'game.png'\n",
228 | "ann = 'game.json'\n",
229 | "result, data = inference_mono_3d_detector(model, image, ann)"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {},
236 | "outputs": [],
237 | "source": [
238 | "from mmdet3d.apis import show_result_meshlab\n",
239 | "out_dir = './'\n",
240 | "show_result_meshlab(data, result, out_dir, show=True, score_thr=0.15, task='mono-det')"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "### 在视频上推理"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": 2,
253 | "metadata": {},
254 | "outputs": [
255 | {
256 | "name": "stderr",
257 | "output_type": "stream",
258 | "text": [
259 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torch\\functional.py:445: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ..\\aten\\src\\ATen\\native\\TensorShape.cpp:2157.)\n",
260 | " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n"
261 | ]
262 | }
263 | ],
264 | "source": [
265 | "import mmcv \n",
266 | "from mmdet3d.apis import inference_mono_3d_detector, show_result_meshlab\n",
267 | "\n",
268 | "video = mmcv.VideoReader('game.mp4')\n",
269 | "ann_tmpl = 'game.json'\n",
270 | "tmp_out = 'tmp'\n",
271 | "frames = []\n",
272 | "\n",
273 | "# iterate over all frames\n",
274 | "for i, frame in enumerate(video):\n",
275 | " imfn = f'game/{i:04d}.jpg'\n",
276 | " mmcv.imwrite(frame, imfn)\n",
277 | " \n",
278 | " annfn = f'game/{i:04d}.json'\n",
279 | " ann = mmcv.load(ann_tmpl)\n",
280 | " ann['images'][0]['file_name'] = imfn\n",
281 | " mmcv.dump(ann, annfn)\n",
282 | " \n",
283 | " result, data = inference_mono_3d_detector(model, imfn, annfn)\n",
284 | " show_result_meshlab(data, result, tmp_out, show=False, score_thr=0.15, task='mono-det')\n",
285 | " \n",
286 | " frames.append(f'{tmp_out}/{i:04d}/{i:04d}_pred.png')"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": 5,
292 | "metadata": {},
293 | "outputs": [],
294 | "source": [
295 | "import cv2\n",
296 | "\n",
297 | "vwriter = cv2.VideoWriter('game-out.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (1920,1080))\n",
298 | "\n",
299 | "for frame in frames:\n",
300 | " img = cv2.imread(frame)\n",
301 | " vwriter.write(img)"
302 | ]
303 | },
304 | {
305 | "cell_type": "code",
306 | "execution_count": null,
307 | "metadata": {},
308 | "outputs": [],
309 | "source": []
310 | }
311 | ],
312 | "metadata": {
313 | "kernelspec": {
314 | "display_name": "Python 3 (ipykernel)",
315 | "language": "python",
316 | "name": "python3"
317 | },
318 | "language_info": {
319 | "codemirror_mode": {
320 | "name": "ipython",
321 | "version": 3
322 | },
323 | "file_extension": ".py",
324 | "mimetype": "text/x-python",
325 | "name": "python",
326 | "nbconvert_exporter": "python",
327 | "pygments_lexer": "ipython3",
328 | "version": "3.8.12"
329 | },
330 | "pycharm": {
331 | "stem_cell": {
332 | "cell_type": "raw",
333 | "metadata": {
334 | "collapsed": false
335 | },
336 | "source": []
337 | }
338 | }
339 | },
340 | "nbformat": 4,
341 | "nbformat_minor": 4
342 | }
343 |
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/2 inference-pcd.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# 使用预训练模型、基于点云检测场景中的物体"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## 使用 Python API"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "预先下载一个配置文件和预训练模型,保存到 checkpoints 文件夹,这里SECOND和PointPillars各给出了一个例子模型"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "# !mim download mmdet3d --config hv_second_secfpn_6x8_80e_kitti-3d-3class --dest checkpoints\n",
31 | "!mim download mmdet3d --config hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class --dest checkpoints"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {},
37 | "source": [
38 | "准备一个点云文件,为了方便我们使用 `mmdet3d/demo/data` 下提供的一个 KITTI 数据集中的点云文件"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "如果希望尝试更多数据,我们还 KITTI 数据集中裁剪了一个子集用于展示,下载解压后,点云文件储存在 `data/kitti/training/velodyne/` 和 `data/kitti/testing/velodyne/` 目录下"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {
52 | "tags": []
53 | },
54 | "outputs": [],
55 | "source": [
56 | "# !curl -L -o kitti_tiny_3D.zip \"https://onedrive.live.com/download?resid=CB1C03091115D5EA%21119&authkey=!AO57a1ru2Tz2jHQ\"\n",
57 | "# !unzip -d data/kitti kitti_tiny_3D.zip"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "metadata": {},
63 | "source": [
64 | "初始化模型并执行推理"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 1,
70 | "metadata": {
71 | "pycharm": {
72 | "is_executing": false
73 | }
74 | },
75 | "outputs": [
76 | {
77 | "name": "stderr",
78 | "output_type": "stream",
79 | "text": [
80 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torchvision\\io\\image.py:11: UserWarning: Failed to load image Python extension: Could not find module 'C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\Lib\\site-packages\\torchvision\\image.pyd' (or one of its dependencies). Try using the full path with constructor syntax.\n",
81 | " warn(f\"Failed to load image Python extension: {e}\")\n"
82 | ]
83 | },
84 | {
85 | "name": "stdout",
86 | "output_type": "stream",
87 | "text": [
88 | "load checkpoint from local path: checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth\n"
89 | ]
90 | },
91 | {
92 | "name": "stderr",
93 | "output_type": "stream",
94 | "text": [
95 | "C:\\Users\\wangruohui\\Desktop\\mmdetection3d\\mmdet3d\\models\\dense_heads\\anchor3d_head.py:84: UserWarning: dir_offset and dir_limit_offset will be depressed and be incorporated into box coder in the future\n",
96 | " warnings.warn(\n"
97 | ]
98 | }
99 | ],
100 | "source": [
101 | "from mmdet3d.apis import init_model, inference_detector, show_result_meshlab\n",
102 | "\n",
103 | "# SECOND 模型\n",
104 | "# config_file = 'configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py'\n",
105 | "# checkpoint_file = 'checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth'\n",
106 | "\n",
107 | "# PointPillars 模型\n",
108 | "config_file = 'configs/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py'\n",
109 | "checkpoint_file = 'checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth'\n",
110 | "\n",
111 | "model = init_model(config_file, checkpoint_file, device='cuda:0')"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": 2,
117 | "metadata": {
118 | "pycharm": {
119 | "is_executing": false
120 | }
121 | },
122 | "outputs": [
123 | {
124 | "name": "stderr",
125 | "output_type": "stream",
126 | "text": [
127 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torch\\functional.py:445: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ..\\aten\\src\\ATen\\native\\TensorShape.cpp:2157.)\n",
128 | " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n"
129 | ]
130 | }
131 | ],
132 | "source": [
133 | "# 找一个点云数据,这里用 KITTI 数据集中的一个点云文件\n",
134 | "pcd = 'demo/data/kitti/kitti_000008.bin'\n",
135 | "result, data = inference_detector(model, pcd)"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": 3,
141 | "metadata": {
142 | "pycharm": {
143 | "is_executing": false
144 | }
145 | },
146 | "outputs": [
147 | {
148 | "name": "stdout",
149 | "output_type": "stream",
150 | "text": [
151 | "Jupyter environment detected. Enabling Open3D WebVisualizer.\n",
152 | "[Open3D INFO] WebRTC GUI backend enabled.\n",
153 | "[Open3D INFO] WebRTCWindowSystem: HTTP handshake server disabled.\n",
154 | "[Open3D WARNING] invalid color in PaintUniformColor, clipping to [0, 1]\n"
155 | ]
156 | },
157 | {
158 | "data": {
159 | "text/plain": [
160 | "('./', 'kitti_000008')"
161 | ]
162 | },
163 | "execution_count": 3,
164 | "metadata": {},
165 | "output_type": "execute_result"
166 | }
167 | ],
168 | "source": [
169 | "# 可视化检测结果\n",
170 | "out_dir = './'\n",
171 | "show_result_meshlab(data, result, out_dir, show=True)"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {},
177 | "source": [
178 | "## 使用 demo 程序"
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "Demo 程序命令行参数\n",
186 | "```\n",
187 | "python demo/pcd_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show]\n",
188 | "```\n",
189 | "\n",
190 | "参考文档:https://mmdetection3d.readthedocs.io/en/latest/demo.html"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": null,
196 | "metadata": {},
197 | "outputs": [
198 | {
199 | "name": "stdout",
200 | "output_type": "stream",
201 | "text": [
202 | "load checkpoint from local path: checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth\n"
203 | ]
204 | },
205 | {
206 | "name": "stderr",
207 | "output_type": "stream",
208 | "text": [
209 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torchvision\\io\\image.py:11: UserWarning: Failed to load image Python extension: Could not find module 'C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\Lib\\site-packages\\torchvision\\image.pyd' (or one of its dependencies). Try using the full path with constructor syntax.\n",
210 | " warn(f\"Failed to load image Python extension: {e}\")\n",
211 | "c:\\wangruohui\\桌面\\mmdetection3d\\mmdet3d\\models\\dense_heads\\anchor3d_head.py:84: UserWarning: dir_offset and dir_limit_offset will be depressed and be incorporated into box coder in the future\n",
212 | " warnings.warn(\n",
213 | "C:\\Users\\wangruohui\\Miniconda3\\envs\\mmdet3d\\lib\\site-packages\\torch\\functional.py:445: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ..\\aten\\src\\ATen\\native\\TensorShape.cpp:2157.)\n",
214 | " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n"
215 | ]
216 | }
217 | ],
218 | "source": [
219 | "!python demo/pcd_demo.py \\\n",
220 | " demo/data/kitti/kitti_000008.bin \\\n",
221 | " configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py \\\n",
222 | " checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth \\\n",
223 | " --show"
224 | ]
225 | },
226 | {
227 | "cell_type": "markdown",
228 | "metadata": {},
229 | "source": [
230 | "Windows 上如果希望在 powershell 里面执行命令,多行换行符替换为 Backtick \\`"
231 | ]
232 | },
233 | {
234 | "cell_type": "raw",
235 | "metadata": {},
236 | "source": [
237 | " python .\\demo\\pcd_demo.py `\n",
238 | " .\\data\\kitti\\testing\\velodyne\\000002.bin `\n",
239 | " configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py `\n",
240 | " checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth `\n",
241 | " --show"
242 | ]
243 | }
244 | ],
245 | "metadata": {
246 | "kernelspec": {
247 | "display_name": "Python 3 (ipykernel)",
248 | "language": "python",
249 | "name": "python3"
250 | },
251 | "language_info": {
252 | "codemirror_mode": {
253 | "name": "ipython",
254 | "version": 3
255 | },
256 | "file_extension": ".py",
257 | "mimetype": "text/x-python",
258 | "name": "python",
259 | "nbconvert_exporter": "python",
260 | "pygments_lexer": "ipython3",
261 | "version": "3.8.12"
262 | },
263 | "pycharm": {
264 | "stem_cell": {
265 | "cell_type": "raw",
266 | "metadata": {
267 | "collapsed": false
268 | },
269 | "source": []
270 | }
271 | }
272 | },
273 | "nbformat": 4,
274 | "nbformat_minor": 4
275 | }
276 |
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/4 inference_demo.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# 使用 Python API 进行推理"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [
15 | {
16 | "name": "stdout",
17 | "output_type": "stream",
18 | "text": [
19 | "processing hv_second_secfpn_6x8_80e_kitti-3d-3class...\n",
20 | "downloading -------------------------------- 20.4/20.4 MiB 2.3 MB/s eta 0:00:00\n",
21 | "Successfully downloaded hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth to C:\\Users\\WRH\\Desktop\\mmdetection3d\\checkpoints\n",
22 | "Successfully dumped hv_second_secfpn_6x8_80e_kitti-3d-3class.py to C:\\Users\\WRH\\Desktop\\mmdetection3d\\checkpoints\n"
23 | ]
24 | },
25 | {
26 | "name": "stderr",
27 | "output_type": "stream",
28 | "text": [
29 | "C:\\Users\\WRH\\miniconda3\\envs\\mm38\\lib\\site-packages\\_distutils_hack\\__init__.py:30: UserWarning: Setuptools is replacing distutils.\n",
30 | " warnings.warn(\"Setuptools is replacing distutils.\")\n"
31 | ]
32 | }
33 | ],
34 | "source": [
35 | "#使用 MIM 下载预训练模型\n",
36 | "!mim download mmdet3d --config hv_second_secfpn_6x8_80e_kitti-3d-3class --dest checkpoints"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 2,
42 | "metadata": {
43 | "pycharm": {
44 | "is_executing": false
45 | }
46 | },
47 | "outputs": [],
48 | "source": [
49 | "from mmdet3d.apis import init_model, inference_detector, show_result_meshlab"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 3,
55 | "metadata": {
56 | "pycharm": {
57 | "is_executing": false
58 | }
59 | },
60 | "outputs": [],
61 | "source": [
62 | "config_file = 'configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py'\n",
63 | "# download the checkpoint from model zoo and put it in `checkpoints/`\n",
64 | "checkpoint_file = 'checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth'"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 4,
70 | "metadata": {
71 | "pycharm": {
72 | "is_executing": false
73 | }
74 | },
75 | "outputs": [
76 | {
77 | "name": "stderr",
78 | "output_type": "stream",
79 | "text": [
80 | "C:\\Users\\WRH\\Desktop\\mmdetection3d\\mmdet3d\\models\\dense_heads\\anchor3d_head.py:84: UserWarning: dir_offset and dir_limit_offset will be depressed and be incorporated into box coder in the future\n",
81 | " warnings.warn(\n"
82 | ]
83 | },
84 | {
85 | "name": "stdout",
86 | "output_type": "stream",
87 | "text": [
88 | "load checkpoint from local path: checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth\n"
89 | ]
90 | }
91 | ],
92 | "source": [
93 | "# build the model from a config file and a checkpoint file\n",
94 | "model = init_model(config_file, checkpoint_file, device='cuda:0')"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 5,
100 | "metadata": {
101 | "pycharm": {
102 | "is_executing": false
103 | }
104 | },
105 | "outputs": [],
106 | "source": [
107 | "# test a single sample\n",
108 | "pcd = 'data/kitti/testing/velodyne_reduced/000008.bin'\n",
109 | "result, data = inference_detector(model, pcd)"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": 6,
115 | "metadata": {
116 | "pycharm": {
117 | "is_executing": false
118 | }
119 | },
120 | "outputs": [
121 | {
122 | "name": "stdout",
123 | "output_type": "stream",
124 | "text": [
125 | "Jupyter environment detected. Enabling Open3D WebVisualizer.\n",
126 | "[Open3D INFO] WebRTC GUI backend enabled.\n",
127 | "[Open3D INFO] WebRTCWindowSystem: HTTP handshake server disabled.\n"
128 | ]
129 | },
130 | {
131 | "data": {
132 | "text/plain": [
133 | "('./', '000008')"
134 | ]
135 | },
136 | "execution_count": 6,
137 | "metadata": {},
138 | "output_type": "execute_result"
139 | }
140 | ],
141 | "source": [
142 | "# show the results\n",
143 | "out_dir = './'\n",
144 | "show_result_meshlab(data, result, out_dir, show=True)"
145 | ]
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "# 使用 demo 程序进行推理"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "metadata": {},
157 | "source": [
158 | "点云\n",
159 | "\n",
160 | "```\n",
161 | "python demo/pcd_demo.py ${PCD_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--score-thr ${SCORE_THR}] [--out-dir ${OUT_DIR}] [--show]\n",
162 | "```\n",
163 | "\n",
164 | "单目视觉\n",
165 | "\n",
166 | "```\n",
167 | "python demo/mono_det_demo.py ${IMAGE_FILE} ${ANNOTATION_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${GPU_ID}] [--out-dir ${OUT_DIR}] [--show]\n",
168 | "```"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": 7,
174 | "metadata": {},
175 | "outputs": [
176 | {
177 | "name": "stdout",
178 | "output_type": "stream",
179 | "text": [
180 | "load checkpoint from local path: checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth"
181 | ]
182 | },
183 | {
184 | "name": "stderr",
185 | "output_type": "stream",
186 | "text": [
187 | "c:\\users\\wrh\\desktop\\mmdetection3d\\mmdet3d\\models\\dense_heads\\anchor3d_head.py:84: UserWarning: dir_offset and dir_limit_offset will be depressed and be incorporated into box coder in the future\n",
188 | " warnings.warn(\n"
189 | ]
190 | },
191 | {
192 | "name": "stdout",
193 | "output_type": "stream",
194 | "text": [
195 | "\n"
196 | ]
197 | }
198 | ],
199 | "source": [
200 | "# 与上面程序效果相同\n",
201 | "!python demo/pcd_demo.py \\\n",
202 | " data/kitti/testing/velodyne/000002.bin \\\n",
203 | " configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py \\\n",
204 | " checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20210831_022017-ae782e87.pth \\\n",
205 | " --show"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": 8,
211 | "metadata": {},
212 | "outputs": [
213 | {
214 | "name": "stderr",
215 | "output_type": "stream",
216 | "text": [
217 | "'wget' 不是内部或外部命令,也不是可运行的程序\n",
218 | "或批处理文件。\n",
219 | "'mv' 不是内部或外部命令,也不是可运行的程序\n",
220 | "或批处理文件。\n"
221 | ]
222 | }
223 | ],
224 | "source": [
225 | "# 从 https://github.com/open-mmlab/mmdetection3d/blob/master/configs/fcos3d/README.md 下载 FCOS3D 预训练模型到 checkpoints 文件夹下\n",
226 | "# Windows 需要在 powershell 执行\n",
227 | "!wget https://download.openmmlab.com/mmdetection3d/v0.1.0_models/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth\n",
228 | "!mv fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth checkpoints"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": null,
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "!python demo/mono_det_demo.py \\\n",
238 | " .\\data\\kitti\\testing\\image_2\\000002.png \\\n",
239 | " .\\data\\kitti\\kitti_infos_test_mono3d.coco.json \\\n",
240 | " configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py \\\n",
241 | " .\\checkpoints\\fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune_20210717_095645-8d806dc2.pth \\\n",
242 | " --show"
243 | ]
244 | }
245 | ],
246 | "metadata": {
247 | "kernelspec": {
248 | "display_name": "Python 3 (ipykernel)",
249 | "language": "python",
250 | "name": "python3"
251 | },
252 | "language_info": {
253 | "codemirror_mode": {
254 | "name": "ipython",
255 | "version": 3
256 | },
257 | "file_extension": ".py",
258 | "mimetype": "text/x-python",
259 | "name": "python",
260 | "nbconvert_exporter": "python",
261 | "pygments_lexer": "ipython3",
262 | "version": "3.8.12"
263 | },
264 | "pycharm": {
265 | "stem_cell": {
266 | "cell_type": "raw",
267 | "metadata": {
268 | "collapsed": false
269 | },
270 | "source": []
271 | }
272 | }
273 | },
274 | "nbformat": 4,
275 | "nbformat_minor": 4
276 | }
277 |
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/README.md:
--------------------------------------------------------------------------------
1 | # MMDet3D Tutorials
2 |
3 | 本目录放置的是 [MMDetection3D教程](https://www.bilibili.com/video/BV1aG4y197is) 中使用的代码
4 |
5 | 视频中使用的 KITTI_tiny_3D 数据集可以 [从Onedrive下载](https://onedrive.live.com/download?resid=CB1C03091115D5EA%21119&authkey=!AO57a1ru2Tz2jHQ)
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/game.json:
--------------------------------------------------------------------------------
1 | {
2 | "images": [
3 | {
4 | "file_name": "game.png",
5 | "id": 0,
6 | "cam_intrinsic": [
7 | [
8 | 1000,
9 | 0.0,
10 | 683.0
11 | ],
12 | [
13 | 0.0,
14 | 1000,
15 | 384.0
16 | ],
17 | [
18 | 0.0,
19 | 0.0,
20 | 1.0
21 | ]
22 | ],
23 | "width": 1366,
24 | "height": 768
25 | }
26 | ]
27 | }
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/game.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/911c1928fad57ec2374ce53925e4e85777db02d6/codes/MMDet3d_tutorials/game.png
--------------------------------------------------------------------------------
/codes/MMDet3d_tutorials/myconfig.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | 'configs\pointpillars\hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py'
3 | ]
4 |
5 | data = dict(
6 | samples_per_gpu=4,
7 | workers_per_gpu=1,
8 | persistent_workers=True,
9 | # train=dict(dataset=dict(ann_file='data/kitti/kitti_infos_val.pkl'), )
10 | # test=dict(
11 | # split='testing',
12 | # ann_file='data/kitti/kitti_infos_test.pkl',
13 | # )
14 | )
15 |
16 | optimizer = dict(
17 | type='AdamW', lr=0.0001, betas=(0.95, 0.99), weight_decay=0.01)
18 | lr_config = None
19 | momentum_config = None
20 |
21 | runner = dict(max_epochs=5)
22 | checkpoint_config = dict(interval=5)
23 | evaluation = dict(interval=5)
24 | log_config = dict(interval=5)
25 |
26 | load_from = 'checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20220301_150306-37dc2420.pth'
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/1-MMEval-介绍.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "08bbcd73",
6 | "metadata": {},
7 | "source": [
8 | "# 1. MMEval 简介\n",
9 | "\n",
10 | "MMEval 是一个统一的跨框架算法评测库,提供高效准确的分布式评测以及多种机器学习框架后端支持,具有以下特点:\n",
11 | "\n",
12 | "- 提供丰富的计算机视觉各细分方向评测指标(自然语言处理方向的评测指标正在支持中)\n",
13 | "\n",
14 | "- 支持多种分布式通信库,实现高效准确的分布式评测。\n",
15 | "\n",
16 | "- 支持多种机器学习框架,根据输入自动分发对应实现。\n",
17 | "\n",
18 | "GitHub 主页:https://github.com/open-mmlab/mmeval \n",
19 | "MMEval 中文文档:https://mmeval.readthedocs.io/zh_CN/latest/\n",
20 | "\n",
21 | "更多相关资料:\n",
22 | "- MMEval 发布介绍文章:https://zhuanlan.zhihu.com/p/579074667\n",
23 | "- MMEval 与其它开源算法评测库的区别与定位:https://www.zhihu.com/question/565197712/answer/2749256174"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "id": "715453c6",
29 | "metadata": {},
30 | "source": [
31 | "# 2. MMEval 安装\n",
32 | "\n",
33 | "MMEval 依赖 Python 3.6+,可以通过 pip 来安装 MMEval:\n",
34 | "```bash\n",
35 | "pip install mmeval\n",
36 | "```\n",
37 | "\n",
38 | "如果要安装 MMEval 中所有评测指标都需要的依赖,可以通过以下命令安装所有的额外依赖:\n",
39 | "```bash\n",
40 | "pip install \"mmeval[all]\"\n",
41 | "```"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 1,
47 | "id": "fccf14cb",
48 | "metadata": {},
49 | "outputs": [
50 | {
51 | "name": "stdout",
52 | "output_type": "stream",
53 | "text": [
54 | "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\r\n",
55 | "Requirement already satisfied: mmeval in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (0.1.0)\r\n",
56 | "Requirement already satisfied: numpy in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from mmeval) (1.23.3)\r\n",
57 | "Requirement already satisfied: pyyaml in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from mmeval) (6.0)\r\n",
58 | "Requirement already satisfied: plum-dispatch in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from mmeval) (1.7.2)\r\n"
59 | ]
60 | }
61 | ],
62 | "source": [
63 | "!pip install mmeval"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "id": "b798ffe9",
69 | "metadata": {},
70 | "source": [
71 | "# 3. MMEval 简单使用"
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "id": "6bb08279",
77 | "metadata": {},
78 | "source": [
79 | "MMEval 中的评测指标提供两种使用方式,以 Accuracy 为例:"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 2,
85 | "id": "cc16d992",
86 | "metadata": {},
87 | "outputs": [
88 | {
89 | "name": "stderr",
90 | "output_type": "stream",
91 | "text": [
92 | "2022-11-16 21:54:21.713877: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n"
93 | ]
94 | }
95 | ],
96 | "source": [
97 | "from mmeval import Accuracy\n",
98 | "import numpy as np\n",
99 | "\n",
100 | "accuracy = Accuracy()"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "34044934",
106 | "metadata": {},
107 | "source": [
108 | "第一种是直接调用实例化的 Accuracy 对象,计算评测指标:"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": 3,
114 | "id": "6c6cd1a0",
115 | "metadata": {},
116 | "outputs": [
117 | {
118 | "data": {
119 | "text/plain": [
120 | "{'top1': 0.5}"
121 | ]
122 | },
123 | "execution_count": 3,
124 | "metadata": {},
125 | "output_type": "execute_result"
126 | }
127 | ],
128 | "source": [
129 | "preds = np.asarray([0, 2, 1, 3])\n",
130 | "labels = np.asarray([0, 1, 2, 3])\n",
131 | "accuracy(preds, labels)\n",
132 | "# {'top1': 0.5}"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "id": "55526b9a",
138 | "metadata": {},
139 | "source": [
140 | "第二种是累积多个批次的数据后,计算评测指标:"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": 4,
146 | "id": "4459d4f0",
147 | "metadata": {},
148 | "outputs": [
149 | {
150 | "data": {
151 | "text/plain": [
152 | "{'top1': 0.218}"
153 | ]
154 | },
155 | "execution_count": 4,
156 | "metadata": {},
157 | "output_type": "execute_result"
158 | }
159 | ],
160 | "source": [
161 | "for i in range(10):\n",
162 | " labels = np.random.randint(0, 4, size=(100, ))\n",
163 | " predicts = np.random.randint(0, 4, size=(100, ))\n",
164 | " accuracy.add(predicts, labels)\n",
165 | "\n",
166 | "accuracy.compute()\n",
167 | "# {'top1': ...}"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "id": "6a07db5a",
173 | "metadata": {},
174 | "source": [
175 | "## 4. 多框架支持"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "id": "2c3a477b",
181 | "metadata": {},
182 | "source": [
183 | "MMEval 中的一些评测指标支持接收不同机器学习框架对应的数据类型(如 Tensor)进行计算,以 Accuracy 为例"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": 5,
189 | "id": "b9f4839d",
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "from mmeval import Accuracy\n",
194 | "\n",
195 | "accuracy = Accuracy()"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "id": "7d5db419",
201 | "metadata": {},
202 | "source": [
203 | "使用 NumPy 数组进行计算:"
204 | ]
205 | },
206 | {
207 | "cell_type": "code",
208 | "execution_count": 6,
209 | "id": "ba51045f",
210 | "metadata": {},
211 | "outputs": [
212 | {
213 | "data": {
214 | "text/plain": [
215 | "{'top1': 0.5}"
216 | ]
217 | },
218 | "execution_count": 6,
219 | "metadata": {},
220 | "output_type": "execute_result"
221 | }
222 | ],
223 | "source": [
224 | "import numpy as np\n",
225 | "\n",
226 | "labels = np.asarray([0, 1, 2, 3])\n",
227 | "preds = np.asarray([0, 2, 1, 3])\n",
228 | "accuracy(preds, labels)"
229 | ]
230 | },
231 | {
232 | "cell_type": "markdown",
233 | "id": "4f80e3d8",
234 | "metadata": {},
235 | "source": [
236 | "使用 torch.Tensor 进行计算:"
237 | ]
238 | },
239 | {
240 | "cell_type": "code",
241 | "execution_count": 7,
242 | "id": "a2092b76",
243 | "metadata": {},
244 | "outputs": [
245 | {
246 | "data": {
247 | "text/plain": [
248 | "{'top1': 0.5}"
249 | ]
250 | },
251 | "execution_count": 7,
252 | "metadata": {},
253 | "output_type": "execute_result"
254 | }
255 | ],
256 | "source": [
257 | "import torch\n",
258 | "\n",
259 | "labels = torch.Tensor([0, 1, 2, 3])\n",
260 | "preds = torch.Tensor([0, 2, 1, 3])\n",
261 | "accuracy(preds, labels)"
262 | ]
263 | },
264 | {
265 | "cell_type": "markdown",
266 | "id": "c7223456",
267 | "metadata": {},
268 | "source": [
269 | "使用 tensorflow.Tensor 进行计算:"
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": 8,
275 | "id": "d54f474f",
276 | "metadata": {},
277 | "outputs": [
278 | {
279 | "name": "stderr",
280 | "output_type": "stream",
281 | "text": [
282 | "2022-11-16 21:55:40.462162: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set\n",
283 | "2022-11-16 21:55:40.462301: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
284 | "2022-11-16 21:55:40.623373: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \n",
285 | "pciBusID: 0000:26:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
286 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
287 | "2022-11-16 21:55:40.625176: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 1 with properties: \n",
288 | "pciBusID: 0000:2c:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
289 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
290 | "2022-11-16 21:55:40.626921: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 2 with properties: \n",
291 | "pciBusID: 0000:65:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
292 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
293 | "2022-11-16 21:55:40.628668: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 3 with properties: \n",
294 | "pciBusID: 0000:6a:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
295 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
296 | "2022-11-16 21:55:40.630422: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 4 with properties: \n",
297 | "pciBusID: 0000:a3:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
298 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
299 | "2022-11-16 21:55:40.632176: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 5 with properties: \n",
300 | "pciBusID: 0000:a8:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
301 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
302 | "2022-11-16 21:55:40.633915: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 6 with properties: \n",
303 | "pciBusID: 0000:e1:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
304 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
305 | "2022-11-16 21:55:40.635657: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 7 with properties: \n",
306 | "pciBusID: 0000:e7:00.0 name: NVIDIA A100-SXM4-80GB computeCapability: 8.0\n",
307 | "coreClock: 1.41GHz coreCount: 108 deviceMemorySize: 79.21GiB deviceMemoryBandwidth: 1.85TiB/s\n",
308 | "2022-11-16 21:55:40.635681: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
309 | "2022-11-16 21:55:40.635709: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11\n",
310 | "2022-11-16 21:55:40.635726: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11\n",
311 | "2022-11-16 21:55:40.635744: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
312 | "2022-11-16 21:55:40.635763: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
313 | "2022-11-16 21:55:40.635877: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcusolver.so.10'; dlerror: libcusolver.so.10: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages/cv2/../../lib64:/usr/local/cuda-11.2/lib64:\n",
314 | "2022-11-16 21:55:40.635903: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11\n",
315 | "2022-11-16 21:55:40.635922: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8\n",
316 | "2022-11-16 21:55:40.635930: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1757] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\n",
317 | "Skipping registering GPU devices...\n",
318 | "2022-11-16 21:55:40.655135: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set\n",
319 | "2022-11-16 21:55:40.655229: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
320 | "2022-11-16 21:55:40.655235: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] \n"
321 | ]
322 | },
323 | {
324 | "data": {
325 | "text/plain": [
326 | "{'top1': 0.5}"
327 | ]
328 | },
329 | "execution_count": 8,
330 | "metadata": {},
331 | "output_type": "execute_result"
332 | }
333 | ],
334 | "source": [
335 | "import tensorflow as tf\n",
336 | "\n",
337 | "labels = tf.convert_to_tensor([0, 1, 2, 3])\n",
338 | "preds = tf.convert_to_tensor([0, 2, 1, 3])\n",
339 | "accuracy(preds, labels)"
340 | ]
341 | },
342 | {
343 | "cell_type": "markdown",
344 | "id": "5ff33ed0",
345 | "metadata": {},
346 | "source": [
347 | "MMEval 的评测指标提供了一些特定机器学习框架的指标计算实现,具体可以在[支持矩阵](https://mmeval.readthedocs.io/zh_CN/latest/get_started/support_matrix.html)中查看"
348 | ]
349 | },
350 | {
351 | "cell_type": "code",
352 | "execution_count": null,
353 | "id": "d679ac84",
354 | "metadata": {},
355 | "outputs": [],
356 | "source": []
357 | }
358 | ],
359 | "metadata": {
360 | "kernelspec": {
361 | "display_name": "Python 3.6.8 64-bit",
362 | "language": "python",
363 | "name": "python3"
364 | },
365 | "language_info": {
366 | "codemirror_mode": {
367 | "name": "ipython",
368 | "version": 3
369 | },
370 | "file_extension": ".py",
371 | "mimetype": "text/x-python",
372 | "name": "python",
373 | "nbconvert_exporter": "python",
374 | "pygments_lexer": "ipython3",
375 | "version": "3.6.8"
376 | },
377 | "vscode": {
378 | "interpreter": {
379 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
380 | }
381 | }
382 | },
383 | "nbformat": 4,
384 | "nbformat_minor": 5
385 | }
386 |
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/2-MMEval-使用.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "f03f4492",
6 | "metadata": {},
7 | "source": [
8 | "本节将以 CIFAR-10 数据集的评测为例,分别介绍如何使用 MMEval 结合 torch.distributed 和 MPI4Py 进行分布式评测,相关代码可以在 [mmeval/examples/cifar10_dist_eval](https://github.com/open-mmlab/mmeval/tree/main/examples/cifar10_dist_eval) 中找到。"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 1,
14 | "id": "9b4c32f3",
15 | "metadata": {
16 | "scrolled": false
17 | },
18 | "outputs": [
19 | {
20 | "name": "stdout",
21 | "output_type": "stream",
22 | "text": [
23 | "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n",
24 | "Requirement already satisfied: torch in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (1.12.1)\n",
25 | "Requirement already satisfied: torchvision in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (0.13.1)\n",
26 | "Requirement already satisfied: tqdm in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (4.64.1)\n",
27 | "Requirement already satisfied: typing_extensions in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from torch) (4.4.0)\n",
28 | "Requirement already satisfied: numpy in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from torchvision) (1.23.3)\n",
29 | "Requirement already satisfied: requests in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from torchvision) (2.28.1)\n",
30 | "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from torchvision) (9.2.0)\n",
31 | "Requirement already satisfied: certifi>=2017.4.17 in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from requests->torchvision) (2022.9.24)\n",
32 | "Requirement already satisfied: charset-normalizer<3,>=2 in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from requests->torchvision) (2.0.4)\n",
33 | "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from requests->torchvision) (1.26.12)\n",
34 | "Requirement already satisfied: idna<4,>=2.5 in /nvme/data/xiaoyancong/miniconda3/envs/mmeval/lib/python3.8/site-packages (from requests->torchvision) (3.4)\n"
35 | ]
36 | }
37 | ],
38 | "source": [
39 | "!pip install torch torchvision tqdm"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 2,
45 | "id": "9ed6cb08",
46 | "metadata": {},
47 | "outputs": [
48 | {
49 | "name": "stderr",
50 | "output_type": "stream",
51 | "text": [
52 | "2022-11-16 22:05:56.763074: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n"
53 | ]
54 | }
55 | ],
56 | "source": [
57 | "import torch\n",
58 | "import torchvision as tv\n",
59 | "import tqdm\n",
60 | "from torch.utils.data import DataLoader\n",
61 | "\n",
62 | "from mmeval import Accuracy"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "id": "7f696518",
68 | "metadata": {},
69 | "source": [
70 | "## 1. 单进程评测"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "id": "e4598813",
76 | "metadata": {},
77 | "source": [
78 | "首先我们需要加载 CIFAR-10 测试数据,我们可以使用 TorchVison 提供的数据集类。"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 3,
84 | "id": "dfcb3567",
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "def get_eval_dataloader():\n",
89 | " dataset = tv.datasets.CIFAR10(\n",
90 | " root='./',\n",
91 | " train=False,\n",
92 | " download=True,\n",
93 | " transform=tv.transforms.ToTensor())\n",
94 | " return DataLoader(dataset, batch_size=1)"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "id": "5b548525",
100 | "metadata": {},
101 | "source": [
102 | "其次,我们需要准备待评测的模型,这里我们使用 TorchVision 中的 resnet18。"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 4,
108 | "id": "54fc3c57",
109 | "metadata": {},
110 | "outputs": [],
111 | "source": [
112 | "def get_model(pretrained_model_fpath=None):\n",
113 | " model = tv.models.resnet18(num_classes=10)\n",
114 | " if pretrained_model_fpath is not None:\n",
115 | " model.load_state_dict(torch.load(pretrained_model_fpath))\n",
116 | " return model.eval()"
117 | ]
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "id": "f1a22235",
122 | "metadata": {},
123 | "source": [
124 | "有了待评测的数据集与模型,就可以使用 mmeval.Accuracy 指标对模型预测结果进行评测。"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": 5,
130 | "id": "d6b2097e",
131 | "metadata": {},
132 | "outputs": [
133 | {
134 | "name": "stdout",
135 | "output_type": "stream",
136 | "text": [
137 | "Files already downloaded and verified\n"
138 | ]
139 | },
140 | {
141 | "name": "stderr",
142 | "output_type": "stream",
143 | "text": [
144 | "100%|███████████████████████████████████████████████████████████████████████████| 10000/10000 [00:33<00:00, 302.07it/s]\n"
145 | ]
146 | },
147 | {
148 | "name": "stdout",
149 | "output_type": "stream",
150 | "text": [
151 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n"
152 | ]
153 | }
154 | ],
155 | "source": [
156 | "eval_dataloader = get_eval_dataloader()\n",
157 | "model = get_model('./cifar10_resnet18.pth').cuda()\n",
158 | "# 实例化 `Accuracy`,计算 top1 与 top3 准确率\n",
159 | "accuracy = Accuracy(topk=(1, 3))\n",
160 | "\n",
161 | "with torch.no_grad():\n",
162 | " for images, labels in tqdm.tqdm(eval_dataloader):\n",
163 | " predicted_score = model(images.cuda()).cpu()\n",
164 | " # 累计批次数据,中间结果将保存在 `accuracy._results` 中\n",
165 | " accuracy.add(predictions=predicted_score, labels=labels)\n",
166 | "\n",
167 | "# 调用 `accuracy.compute` 进行指标计算\n",
168 | "print(accuracy.compute())\n",
169 | "# 调用 `accuracy.reset` 清除保存在 `accuracy._results` 中的中间结果\n",
170 | "accuracy.reset()"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "id": "5f98e385",
176 | "metadata": {},
177 | "source": [
178 | "## 2. 使用 torch.distributed 进行分布式评测"
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "id": "2e819510",
184 | "metadata": {},
185 | "source": [
186 | "在 MMEval 中为 torch.distributed 实现了两个分布式通信后端,分别是 TorchCPUDist 和 TorchCUDADist。\n",
187 | "\n",
188 | "为 MMEval 设置分布式通信后端的方式有两种:"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 6,
194 | "id": "ec104453",
195 | "metadata": {},
196 | "outputs": [],
197 | "source": [
198 | "from mmeval.core import set_default_dist_backend\n",
199 | "from mmeval import Accuracy\n",
200 | "\n",
201 | "# 1. 设置全局默认分布式通信后端\n",
202 | "set_default_dist_backend('torch_cpu')\n",
203 | "\n",
204 | "# 2. 初始化评测指标时候通过 `dist_backend` 传参\n",
205 | "accuracy = Accuracy(dist_backend='torch_cpu')"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "a499999a",
211 | "metadata": {},
212 | "source": [
213 | "结合上述单进程评测的代码,再加入数据集切片和分布式初始化,即可实现分布式评测。"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": 7,
219 | "id": "94e921fc",
220 | "metadata": {},
221 | "outputs": [
222 | {
223 | "name": "stdout",
224 | "output_type": "stream",
225 | "text": [
226 | "import torch\r\n",
227 | "import torchvision as tv\r\n",
228 | "import tqdm\r\n",
229 | "from torch.utils.data import DataLoader, DistributedSampler\r\n",
230 | "\r\n",
231 | "from mmeval import Accuracy\r\n",
232 | "\r\n",
233 | "\r\n",
234 | "def get_eval_dataloader(rank=0, num_replicas=1):\r\n",
235 | " dataset = tv.datasets.CIFAR10(\r\n",
236 | " root='./',\r\n",
237 | " train=False,\r\n",
238 | " download=True,\r\n",
239 | " transform=tv.transforms.ToTensor())\r\n",
240 | " dist_sampler = DistributedSampler(\r\n",
241 | " dataset, num_replicas=num_replicas, rank=rank)\r\n",
242 | " data_loader = DataLoader(dataset, batch_size=1, sampler=dist_sampler)\r\n",
243 | " return data_loader, len(dataset)\r\n",
244 | "\r\n",
245 | "\r\n",
246 | "def get_model(pretrained_model_fpath=None):\r\n",
247 | " model = tv.models.resnet18(num_classes=10)\r\n",
248 | " if pretrained_model_fpath is not None:\r\n",
249 | " model.load_state_dict(torch.load(pretrained_model_fpath))\r\n",
250 | " return model.eval()\r\n",
251 | "\r\n",
252 | "\r\n",
253 | "def eval_fn(rank, process_num):\r\n",
254 | " torch.distributed.init_process_group(\r\n",
255 | " backend='gloo',\r\n",
256 | " init_method='tcp://127.0.0.1:2345',\r\n",
257 | " world_size=process_num,\r\n",
258 | " rank=rank)\r\n",
259 | " torch.cuda.set_device(rank)\r\n",
260 | "\r\n",
261 | " eval_dataloader, total_num_samples = get_eval_dataloader(rank, process_num)\r\n",
262 | " model = get_model('./cifar10_resnet18.pth').cuda()\r\n",
263 | " accuracy = Accuracy(topk=(1, 3), dist_backend='torch_cpu')\r\n",
264 | "\r\n",
265 | " with torch.no_grad():\r\n",
266 | " for images, labels in tqdm.tqdm(eval_dataloader, disable=(rank != 0)):\r\n",
267 | " predicted_score = model(images.cuda()).cpu()\r\n",
268 | " accuracy.add(predictions=predicted_score, labels=labels)\r\n",
269 | "\r\n",
270 | " print(accuracy.compute(size=total_num_samples))\r\n",
271 | " accuracy.reset()\r\n",
272 | "\r\n",
273 | "\r\n",
274 | "if __name__ == '__main__':\r\n",
275 | " process_num = 3\r\n",
276 | " torch.multiprocessing.spawn(\r\n",
277 | " eval_fn, nprocs=process_num, args=(process_num, ))\r\n"
278 | ]
279 | }
280 | ],
281 | "source": [
282 | "!cat cifar10_dist_eval/cifar10_eval_torch_dist.py"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": 8,
288 | "id": "2701a60b",
289 | "metadata": {},
290 | "outputs": [
291 | {
292 | "name": "stdout",
293 | "output_type": "stream",
294 | "text": [
295 | "2022-11-16 22:10:49.905219: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
296 | "2022-11-16 22:10:53.194848: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
297 | "2022-11-16 22:10:53.194848: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
298 | "2022-11-16 22:10:53.207048: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
299 | "Files already downloaded and verified\n",
300 | "Files already downloaded and verified\n",
301 | "Files already downloaded and verified\n",
302 | "100%|██████████████████████████████████████| 3334/3334 [00:11<00:00, 283.87it/s]\n",
303 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n",
304 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n",
305 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n"
306 | ]
307 | }
308 | ],
309 | "source": [
310 | "!python cifar10_dist_eval/cifar10_eval_torch_dist.py"
311 | ]
312 | },
313 | {
314 | "cell_type": "markdown",
315 | "id": "68763bff",
316 | "metadata": {},
317 | "source": [
318 | "## 3. 使用 MPI4Py 进行分布式评测"
319 | ]
320 | },
321 | {
322 | "cell_type": "markdown",
323 | "id": "6b26bc33",
324 | "metadata": {},
325 | "source": [
326 | "MMEval 将分布式通信功能抽象解耦了,因此虽然上述例子使用的是 PyTorch 模型和数据加载,我们仍然可以使用除 torch.distributed 以外的分布式通信后端来实现分布式评测。下面将展示如何使用 MPI4Py 作为分布式通信后端来进行分布式评测。"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "id": "4ede318a",
332 | "metadata": {},
333 | "source": [
334 | "首先需要安装 MPI4Py 以及 openmpi,建议使用 conda 进行安装:"
335 | ]
336 | },
337 | {
338 | "cell_type": "code",
339 | "execution_count": 9,
340 | "id": "9398f9bf",
341 | "metadata": {},
342 | "outputs": [
343 | {
344 | "name": "stdout",
345 | "output_type": "stream",
346 | "text": [
347 | "Collecting package metadata (current_repodata.json): done\n",
348 | "Solving environment: done\n",
349 | "\n",
350 | "# All requested packages already installed.\n",
351 | "\n"
352 | ]
353 | }
354 | ],
355 | "source": [
356 | "!conda install -y openmpi mpi4py"
357 | ]
358 | },
359 | {
360 | "cell_type": "code",
361 | "execution_count": 10,
362 | "id": "9a649abf",
363 | "metadata": {},
364 | "outputs": [
365 | {
366 | "name": "stdout",
367 | "output_type": "stream",
368 | "text": [
369 | "import torch\r\n",
370 | "import torchvision as tv\r\n",
371 | "import tqdm\r\n",
372 | "from mpi4py import MPI\r\n",
373 | "from torch.utils.data import DataLoader, DistributedSampler\r\n",
374 | "\r\n",
375 | "from mmeval import Accuracy\r\n",
376 | "\r\n",
377 | "\r\n",
378 | "def get_eval_dataloader(rank=0, num_replicas=1):\r\n",
379 | " dataset = tv.datasets.CIFAR10(\r\n",
380 | " root='./',\r\n",
381 | " train=False,\r\n",
382 | " download=True,\r\n",
383 | " transform=tv.transforms.ToTensor())\r\n",
384 | " dist_sampler = DistributedSampler(\r\n",
385 | " dataset, num_replicas=num_replicas, rank=rank)\r\n",
386 | " data_loader = DataLoader(dataset, batch_size=1, sampler=dist_sampler)\r\n",
387 | " return data_loader, len(dataset)\r\n",
388 | "\r\n",
389 | "\r\n",
390 | "def get_model(pretrained_model_fpath=None):\r\n",
391 | " model = tv.models.resnet18(num_classes=10)\r\n",
392 | " if pretrained_model_fpath is not None:\r\n",
393 | " model.load_state_dict(torch.load(pretrained_model_fpath))\r\n",
394 | " return model.eval()\r\n",
395 | "\r\n",
396 | "\r\n",
397 | "def eval_fn(rank, process_num):\r\n",
398 | " torch.cuda.set_device(rank)\r\n",
399 | " eval_dataloader, total_num_samples = get_eval_dataloader(rank, process_num)\r\n",
400 | " model = get_model('./cifar10_resnet18.pth').cuda()\r\n",
401 | " accuracy = Accuracy(topk=(1, 3), dist_backend='mpi4py')\r\n",
402 | "\r\n",
403 | " with torch.no_grad():\r\n",
404 | " for images, labels in tqdm.tqdm(eval_dataloader, disable=(rank != 0)):\r\n",
405 | " predicted_score = model(images.cuda()).cpu()\r\n",
406 | " accuracy.add(predictions=predicted_score, labels=labels)\r\n",
407 | "\r\n",
408 | " print(accuracy.compute(size=total_num_samples))\r\n",
409 | " accuracy.reset()\r\n",
410 | "\r\n",
411 | "\r\n",
412 | "if __name__ == '__main__':\r\n",
413 | " comm = MPI.COMM_WORLD\r\n",
414 | " eval_fn(comm.Get_rank(), comm.Get_size())\r\n"
415 | ]
416 | }
417 | ],
418 | "source": [
419 | "!cat cifar10_dist_eval/cifar10_eval_mpi4py.py"
420 | ]
421 | },
422 | {
423 | "cell_type": "markdown",
424 | "id": "6f585b4c",
425 | "metadata": {},
426 | "source": [
427 | "使用 mpirun 作为分布式评测启动方式:"
428 | ]
429 | },
430 | {
431 | "cell_type": "code",
432 | "execution_count": 1,
433 | "id": "843293fa",
434 | "metadata": {},
435 | "outputs": [
436 | {
437 | "name": "stdout",
438 | "output_type": "stream",
439 | "text": [
440 | "2022-11-16 22:12:59.873751: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
441 | "2022-11-16 22:12:59.873752: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
442 | "2022-11-16 22:12:59.874402: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
443 | "Files already downloaded and verified\n",
444 | "Files already downloaded and verified\n",
445 | "Files already downloaded and verified\n",
446 | "100%|██████████| 3334/3334 [00:11<00:00, 282.08it/s]{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n",
447 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n",
448 | "{'top1': 0.7458999752998352, 'top3': 0.8931000232696533}\n",
449 | "\n"
450 | ]
451 | }
452 | ],
453 | "source": [
454 | "# 使用 mpirun 启动 3 个进程\n",
455 | "!mpirun -np 3 python cifar10_dist_eval/cifar10_eval_mpi4py.py"
456 | ]
457 | },
458 | {
459 | "cell_type": "code",
460 | "execution_count": null,
461 | "id": "d5ea95e7",
462 | "metadata": {},
463 | "outputs": [],
464 | "source": []
465 | }
466 | ],
467 | "metadata": {
468 | "kernelspec": {
469 | "display_name": "Python 3 (ipykernel)",
470 | "language": "python",
471 | "name": "python3"
472 | },
473 | "language_info": {
474 | "codemirror_mode": {
475 | "name": "ipython",
476 | "version": 3
477 | },
478 | "file_extension": ".py",
479 | "mimetype": "text/x-python",
480 | "name": "python",
481 | "nbconvert_exporter": "python",
482 | "pygments_lexer": "ipython3",
483 | "version": "3.8.13"
484 | }
485 | },
486 | "nbformat": 4,
487 | "nbformat_minor": 5
488 | }
489 |
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/cifar10_dist_eval/README.md:
--------------------------------------------------------------------------------
1 | # CIFAR-10 Evaluation Example
2 |
3 | ## Single process evaluation
4 |
5 | ```bash
6 | python cifar10_eval.py
7 | ```
8 |
9 | ## Multiple processes evaluation with torch.distributed
10 |
11 | ```bash
12 | python cifar10_eval_torch_dist.py
13 | ```
14 |
15 | ## Multiple processes evaluation with MPI4Py
16 |
17 | ```bash
18 | mpirun -np 3 python3 cifar10_eval_mpi4py.py
19 | ```
20 |
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/cifar10_dist_eval/cifar10_eval.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision as tv
3 | import tqdm
4 | from torch.utils.data import DataLoader
5 |
6 | from mmeval import Accuracy
7 |
8 |
9 | def get_eval_dataloader():
10 | dataset = tv.datasets.CIFAR10(
11 | root='./',
12 | train=False,
13 | download=True,
14 | transform=tv.transforms.ToTensor())
15 | return DataLoader(dataset, batch_size=1)
16 |
17 |
18 | def get_model(pretrained_model_fpath=None):
19 | model = tv.models.resnet18(num_classes=10)
20 | if pretrained_model_fpath is not None:
21 | model.load_state_dict(torch.load(pretrained_model_fpath))
22 | return model.eval()
23 |
24 |
25 | eval_dataloader = get_eval_dataloader()
26 | model = get_model()
27 | accuracy = Accuracy(topk=(1, 3))
28 |
29 | with torch.no_grad():
30 | for images, labels in tqdm.tqdm(eval_dataloader):
31 | predicted_score = model(images)
32 | accuracy.add(predictions=predicted_score, labels=labels)
33 |
34 | print(accuracy.compute())
35 | accuracy.reset()
36 |
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/cifar10_dist_eval/cifar10_eval_mpi4py.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision as tv
3 | import tqdm
4 | from mpi4py import MPI
5 | from torch.utils.data import DataLoader, DistributedSampler
6 |
7 | from mmeval import Accuracy
8 |
9 |
10 | def get_eval_dataloader(rank=0, num_replicas=1):
11 | dataset = tv.datasets.CIFAR10(
12 | root='./',
13 | train=False,
14 | download=True,
15 | transform=tv.transforms.ToTensor())
16 | dist_sampler = DistributedSampler(
17 | dataset, num_replicas=num_replicas, rank=rank)
18 | data_loader = DataLoader(dataset, batch_size=1, sampler=dist_sampler)
19 | return data_loader, len(dataset)
20 |
21 |
22 | def get_model(pretrained_model_fpath=None):
23 | model = tv.models.resnet18(num_classes=10)
24 | if pretrained_model_fpath is not None:
25 | model.load_state_dict(torch.load(pretrained_model_fpath))
26 | return model.eval()
27 |
28 |
29 | def eval_fn(rank, process_num):
30 | torch.cuda.set_device(rank)
31 | eval_dataloader, total_num_samples = get_eval_dataloader(rank, process_num)
32 | model = get_model('./cifar10_resnet18.pth').cuda()
33 | accuracy = Accuracy(topk=(1, 3), dist_backend='mpi4py')
34 |
35 | with torch.no_grad():
36 | for images, labels in tqdm.tqdm(eval_dataloader, disable=(rank != 0)):
37 | predicted_score = model(images.cuda()).cpu()
38 | accuracy.add(predictions=predicted_score, labels=labels)
39 |
40 | print(accuracy.compute(size=total_num_samples))
41 | accuracy.reset()
42 |
43 |
44 | if __name__ == '__main__':
45 | comm = MPI.COMM_WORLD
46 | eval_fn(comm.Get_rank(), comm.Get_size())
47 |
--------------------------------------------------------------------------------
/codes/MMEval_tutorials/cifar10_dist_eval/cifar10_eval_torch_dist.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision as tv
3 | import tqdm
4 | from torch.utils.data import DataLoader, DistributedSampler
5 |
6 | from mmeval import Accuracy
7 |
8 |
9 | def get_eval_dataloader(rank=0, num_replicas=1):
10 | dataset = tv.datasets.CIFAR10(
11 | root='./',
12 | train=False,
13 | download=True,
14 | transform=tv.transforms.ToTensor())
15 | dist_sampler = DistributedSampler(
16 | dataset, num_replicas=num_replicas, rank=rank)
17 | data_loader = DataLoader(dataset, batch_size=1, sampler=dist_sampler)
18 | return data_loader, len(dataset)
19 |
20 |
21 | def get_model(pretrained_model_fpath=None):
22 | model = tv.models.resnet18(num_classes=10)
23 | if pretrained_model_fpath is not None:
24 | model.load_state_dict(torch.load(pretrained_model_fpath))
25 | return model.eval()
26 |
27 |
28 | def eval_fn(rank, process_num):
29 | torch.distributed.init_process_group(
30 | backend='gloo',
31 | init_method='tcp://127.0.0.1:2345',
32 | world_size=process_num,
33 | rank=rank)
34 | torch.cuda.set_device(rank)
35 |
36 | eval_dataloader, total_num_samples = get_eval_dataloader(rank, process_num)
37 | model = get_model('./cifar10_resnet18.pth').cuda()
38 | accuracy = Accuracy(topk=(1, 3), dist_backend='torch_cpu')
39 |
40 | with torch.no_grad():
41 | for images, labels in tqdm.tqdm(eval_dataloader, disable=(rank != 0)):
42 | predicted_score = model(images.cuda()).cpu()
43 | accuracy.add(predictions=predicted_score, labels=labels)
44 |
45 | print(accuracy.compute(size=total_num_samples))
46 | accuracy.reset()
47 |
48 |
49 | if __name__ == '__main__':
50 | process_num = 3
51 | torch.multiprocessing.spawn(
52 | eval_fn, nprocs=process_num, args=(process_num, ))
53 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/img/MAE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/911c1928fad57ec2374ce53925e4e85777db02d6/codes/MMSelfSup_tutorials/img/MAE.png
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/img/SimCLR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/911c1928fad57ec2374ce53925e4e85777db02d6/codes/MMSelfSup_tutorials/img/SimCLR.png
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/img/mmselfsup_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/911c1928fad57ec2374ce53925e4e85777db02d6/codes/MMSelfSup_tutorials/img/mmselfsup_logo.png
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【1】模型自监督预训练 之 SimCLR.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | ""
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 模型自监督预训练 之 SimCLR\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 自监督预训练方法介绍:SimCLR"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "**论文地址**:https://arxiv.org/pdf/2002.05709.pdf\n",
48 | "\n",
49 | "**SimCLR 基本思想**:对一张图片做两次不同的数据增强操作,增强后的两张图片互为彼此的正样本,同一个 batch 里其他图片的增强结果为这两张增强图片的负样本。SimCLR 要求编码器最大化当前图像与其正样本表示的相似度,最小化当前图像与其负样本表示的相似度。"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {},
55 | "source": [
56 | "
"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "## 1. 环境配置"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "# Check nvcc version\n",
80 | "!nvcc -V"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "# Check GCC version\n",
90 | "!gcc --version"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "# Check PyTorch installation\n",
100 | "import torch, torchvision\n",
101 | "print(torch.__version__)\n",
102 | "print(torch.cuda.is_available())"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": [
109 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "!pip install openmim"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "!mim install mmcv"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "### 1.3 安装 MMSelfSup"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": null,
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "%cd /content"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
153 | "%cd /content/mmselfsup"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "# Install MMSelfSup from source\n",
163 | "!pip install -e . "
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "### 1.4 检查安装是否正确"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "import mmselfsup\n",
180 | "print(mmselfsup.__version__)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "## 2. 准备数据集"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "### 2.0 数据集介绍"
195 | ]
196 | },
197 | {
198 | "cell_type": "markdown",
199 | "metadata": {},
200 | "source": [
201 | "本教程将在 `Tiny ImageNet` 数据集上训练自监督模型 SimCLR。\n",
202 | "\n",
203 | "Tiny ImageNet 数据集是 ImageNet 的一个子集。\n",
204 | "\n",
205 | "该数据集包含 200 个类别,每个类别有 500 张训练图片、50 张验证图片和 50 张测试图片,共 120,000 张图像。每张图片均为 64×64 彩色图片。\n",
206 | "\n",
207 | "数据集官方下载地址:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
208 | ]
209 | },
210 | {
211 | "cell_type": "markdown",
212 | "metadata": {},
213 | "source": [
214 | "### 2.1 下载数据集"
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "metadata": {},
220 | "source": [
221 | "使用 GNU [Wget](https://www.gnu.org/software/wget/) 工具从斯坦福官方网站下载:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "metadata": {},
228 | "outputs": [],
229 | "source": [
230 | "%cd /content/mmselfsup"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": [
239 | "!mkdir data\n",
240 | "%cd data\n",
241 | "!wget http://cs231n.stanford.edu/tiny-imagenet-200.zip"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "metadata": {},
247 | "source": [
248 | "### 2.2 解压数据集"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {},
255 | "outputs": [],
256 | "source": [
257 | "!unzip -q tiny-imagenet-200.zip"
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": null,
263 | "metadata": {},
264 | "outputs": [],
265 | "source": [
266 | "!rm -rf tiny-imagenet-200.zip"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "### 2.3 查看数据集目录"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "# Check data directory\n",
283 | "!apt-get install tree\n",
284 | "!tree -d /content/mmselfsup/data"
285 | ]
286 | },
287 | {
288 | "cell_type": "markdown",
289 | "metadata": {},
290 | "source": [
291 | "### 2.4 准备标注文件"
292 | ]
293 | },
294 | {
295 | "cell_type": "markdown",
296 | "metadata": {},
297 | "source": [
298 | "为了减少大家重写 `加载数据集` 代码的负担,我们整理好了标注文件,复制到数据集根目录 `mmselfsup/data/tiny-imagenet-200` 下即可。"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": null,
304 | "metadata": {},
305 | "outputs": [],
306 | "source": [
307 | "%cd /content/mmselfsup/data"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "metadata": {},
314 | "outputs": [],
315 | "source": [
316 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/train.txt -P tiny-imagenet-200\n",
317 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/val.txt -P tiny-imagenet-200"
318 | ]
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "metadata": {},
323 | "source": [
324 | "## 3. 写模型自监督预训练的配置文件"
325 | ]
326 | },
327 | {
328 | "cell_type": "markdown",
329 | "metadata": {},
330 | "source": [
331 | "1. 新建一个名为 `simclr_resnet50_1xb32-coslr-1e_tinyin200.py` 的配置文件。(配置文件命名要求 & 含义可参考[这里](https://mmsegmentation.readthedocs.io/zh_CN/latest/tutorials/config.html#id3))\n",
332 | "\n",
333 | "\n",
334 | "\n",
335 | "2. `simclr_resnet50_1xb32-coslr-1e_tinyin200.py` 训练配置文件的内容:\n",
336 | " 1. 继承 [simclr_resnet50_8xb32-coslr-200e_in1k.py](https://github.com/open-mmlab/mmselfsup/blob/master/configs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py) 配置文件\n",
337 | " 2. 根据需求修改参数 samples_per_gpu(单个 GPU 的 Batch size)和 workers_per_gpu (单个 GPU 分配的数据加载线程数)\n",
338 | " 3. 修改数据集路径和数据标注文件路径\n",
339 | " 4. 根据 batch size 调整学习率(调整原则请参考:[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id2))\n",
340 | " 5. 修改训练的总轮数 epoch"
341 | ]
342 | },
343 | {
344 | "cell_type": "code",
345 | "execution_count": null,
346 | "metadata": {},
347 | "outputs": [],
348 | "source": [
349 | "%cd /content/mmselfsup"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": null,
355 | "metadata": {},
356 | "outputs": [],
357 | "source": [
358 | "%%writefile /content/mmselfsup/configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py\n",
359 | "\n",
360 | "_base_ = 'simclr_resnet50_8xb32-coslr-200e_in1k.py'\n",
361 | "\n",
362 | "# dataset\n",
363 | "data = dict(\n",
364 | " samples_per_gpu=32, \n",
365 | " workers_per_gpu=2,\n",
366 | " train=dict(\n",
367 | " data_source=dict(\n",
368 | " data_prefix='data/tiny-imagenet-200/train',\n",
369 | " ann_file='data/tiny-imagenet-200/train.txt',\n",
370 | " )\n",
371 | " )\n",
372 | ")\n",
373 | "\n",
374 | "# optimizer\n",
375 | "optimizer = dict(\n",
376 | " lr=0.3 * ((32 * 1) / (32 * 8)),\n",
377 | ")\n",
378 | "\n",
379 | "runner = dict(max_epochs=1)"
380 | ]
381 | },
382 | {
383 | "cell_type": "markdown",
384 | "metadata": {},
385 | "source": [
386 | "## 4. 模型自监督预训练"
387 | ]
388 | },
389 | {
390 | "cell_type": "markdown",
391 | "metadata": {},
392 | "source": [
393 | "我们推荐使用分布式训练工具 [tools/dist_train.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/dist_train.sh) 来启动训练任务(即使您只用一张 GPU 进行训练)。\n",
394 | "因为一些自监督预训练算法需要用多张 GPU 进行训练,为此 MMSelfSup 支持了多卡训练可能会用到的模块,如 `SyncBN` 等。如果算法在训练的过程中使用到了这些模块,但不使用分布式训练,就会报错。\n",
395 | "\n",
396 | "```shell\n",
397 | "bash tools/dist_train.sh ${CONFIG_FILE} ${GPUS} --work-dir ${YOUR_WORK_DIR} [optional arguments]\n",
398 | "```\n",
399 | "\n",
400 | "参数:\n",
401 | "+ CONFIG_FILE:自监督训练的配置文件所在路径\n",
402 | "\n",
403 | "+ GPUS:进行训练时所使用的 GPU 数量\n",
404 | "\n",
405 | "+ work-dir:训练过程中产生模型和日志等文件的保存路径\n",
406 | "\n",
407 | "其他可选参数 `optional arguments` 可参考[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id3)。"
408 | ]
409 | },
410 | {
411 | "cell_type": "code",
412 | "execution_count": null,
413 | "metadata": {},
414 | "outputs": [],
415 | "source": [
416 | "%cd /content/mmselfsup"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": null,
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "!bash tools/dist_train.sh \\\n",
426 | "configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py \\\n",
427 | "1 \\\n",
428 | "--work_dir work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/ "
429 | ]
430 | }
431 | ],
432 | "metadata": {
433 | "kernelspec": {
434 | "display_name": "Python 3",
435 | "language": "python",
436 | "name": "python3"
437 | },
438 | "language_info": {
439 | "codemirror_mode": {
440 | "name": "ipython",
441 | "version": 3
442 | },
443 | "file_extension": ".py",
444 | "mimetype": "text/x-python",
445 | "name": "python",
446 | "nbconvert_exporter": "python",
447 | "pygments_lexer": "ipython3",
448 | "version": "3.7.0"
449 | }
450 | },
451 | "nbformat": 4,
452 | "nbformat_minor": 2
453 | }
454 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【2】图片向量可视化 t-SNE.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 图片向量可视化 t-SNE\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 任务介绍"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "**t-SNE** 是一种数据降维与可视化的方法。当我们想对高维数据进行分类时,可以先对其使用 t-SNE 进行可视化,看其是否具备可分性。具体来说,就是将数据通过 t-SNE 投影到二维或者三维空间中,如果在低维空间中同类数据间隔小,异类之间间隔大,那么说明数据是可分的;但如果在低维空间中不具有可分性,可能是数据本身不可分,也有可能是因为其不能投影到低维空间中。\n",
48 | "\n",
49 | "我们经常使用 t-SNE 可视化来展示自监督预训练的学习效果。如果模型能在自监督预训练过程中学习到比较好的特征提取能力,那么数据经过该模型提取到的高维特征就应该具有可分性。\n",
50 | "\n",
51 | "本教程将演示:使用 t-SNE 可视化模型自监督预训练的学习效果\n",
52 | "\n",
53 | "代码详细请参考官方[文档](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#t-sne)。"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "## 1. 环境配置"
61 | ]
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "metadata": {},
66 | "source": [
67 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "# Check nvcc version\n",
77 | "!nvcc -V"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "# Check GCC version\n",
87 | "!gcc --version"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "# Check PyTorch installation\n",
97 | "import torch, torchvision\n",
98 | "print(torch.__version__)\n",
99 | "print(torch.cuda.is_available())"
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "!pip install openmim"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "!mim install mmcv"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {},
130 | "source": [
131 | "### 1.3 安装 MMSelfSup"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "%cd /content"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
150 | "%cd /content/mmselfsup"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "# Install MMSelfSup from source\n",
160 | "!pip install -e . "
161 | ]
162 | },
163 | {
164 | "cell_type": "markdown",
165 | "metadata": {},
166 | "source": [
167 | "### 1.4 检查安装是否正确"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": null,
173 | "metadata": {},
174 | "outputs": [],
175 | "source": [
176 | "import mmselfsup\n",
177 | "print(mmselfsup.__version__)"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "## 2. 准备数据集"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "### 2.0 数据集介绍"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "metadata": {},
197 | "source": [
198 | "本教程将使用自监督预训练模型提取 `Tiny ImageNet` 数据集里的图片特征,并进行 t-SNE 可视化。\n",
199 | "\n",
200 | "Tiny ImageNet 数据集是 ImageNet 的一个子集。\n",
201 | "\n",
202 | "该数据集包含 200 个类别,每个类别有 500 张训练图片、50 张验证图片和 50 张测试图片,共 120,000 张图像。每张图片均为 64×64 彩色图片。\n",
203 | "\n",
204 | "数据集官方下载地址:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "### 2.1 下载数据集"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "使用 GNU [Wget](https://www.gnu.org/software/wget/) 工具从斯坦福官方网站下载:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": null,
224 | "metadata": {},
225 | "outputs": [],
226 | "source": [
227 | "%cd /content/mmselfsup"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": null,
233 | "metadata": {},
234 | "outputs": [],
235 | "source": [
236 | "!mkdir data\n",
237 | "%cd data\n",
238 | "!wget http://cs231n.stanford.edu/tiny-imagenet-200.zip"
239 | ]
240 | },
241 | {
242 | "cell_type": "markdown",
243 | "metadata": {},
244 | "source": [
245 | "### 2.2 解压数据集"
246 | ]
247 | },
248 | {
249 | "cell_type": "code",
250 | "execution_count": null,
251 | "metadata": {},
252 | "outputs": [],
253 | "source": [
254 | "!unzip -q tiny-imagenet-200.zip"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": null,
260 | "metadata": {},
261 | "outputs": [],
262 | "source": [
263 | "!rm -rf tiny-imagenet-200.zip"
264 | ]
265 | },
266 | {
267 | "cell_type": "markdown",
268 | "metadata": {},
269 | "source": [
270 | "### 2.3 查看数据集目录"
271 | ]
272 | },
273 | {
274 | "cell_type": "code",
275 | "execution_count": null,
276 | "metadata": {},
277 | "outputs": [],
278 | "source": [
279 | "# Check data directory\n",
280 | "!apt-get install tree\n",
281 | "!tree -d /content/mmselfsup/data"
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "metadata": {},
287 | "source": [
288 | "### 2.4 准备标注文件"
289 | ]
290 | },
291 | {
292 | "cell_type": "markdown",
293 | "metadata": {},
294 | "source": [
295 | "为了减少大家重写 `加载数据集` 代码的负担,我们整理好了标注文件,复制到数据集根目录 `mmselfsup/data/tiny-imagenet-200` 下即可。"
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "execution_count": null,
301 | "metadata": {},
302 | "outputs": [],
303 | "source": [
304 | "%cd /content/mmselfsup/data"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": null,
310 | "metadata": {},
311 | "outputs": [],
312 | "source": [
313 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/train.txt -P tiny-imagenet-200\n",
314 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/val.txt -P tiny-imagenet-200"
315 | ]
316 | },
317 | {
318 | "cell_type": "markdown",
319 | "metadata": {},
320 | "source": [
321 | "## 3. 准备自监督预训练模型"
322 | ]
323 | },
324 | {
325 | "cell_type": "markdown",
326 | "metadata": {},
327 | "source": [
328 | "目前,MMSelfSup 里 t-SNE 可视化的模型对象既可以是 `自监督预训练模型提取 backbone 部分的权重文件`,也可以是 `自监督预训练过程中直接保存下来的名为 epoch_*.pth 的 checkpoint 文件`。所以,我们这里需要准备好两种模型文件,方便后面对这两种文件进行 t-SNE 可视化的代码演示。\n",
329 | "\n",
330 | "**注意**:目前,MMSelfSup 只支持以 `ResNet-50` 为 backbone 的自监督预训练模型的 t-SNE 可视化。"
331 | ]
332 | },
333 | {
334 | "cell_type": "markdown",
335 | "metadata": {},
336 | "source": [
337 | "### 3.1 准备自监督预训练模型提取 backbone 部分的权重文件"
338 | ]
339 | },
340 | {
341 | "cell_type": "markdown",
342 | "metadata": {},
343 | "source": [
344 | "**注意:MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 中的模型文件都已经提取过 backbone 权值,不需要再次提取!**我们直接使用即可。\n",
345 | "\n",
346 | "在模型库中找到在 SimCLR 的预训练模型文件 `simclr_resnet50_8xb32-coslr-200e_in1k`,下载放在 `checkpoints` 文件夹里 "
347 | ]
348 | },
349 | {
350 | "cell_type": "code",
351 | "execution_count": null,
352 | "metadata": {},
353 | "outputs": [],
354 | "source": [
355 | "%cd /content/mmselfsup\n",
356 | "!mkdir checkpoints\n",
357 | "!wget https://download.openmmlab.com/mmselfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth -P checkpoints"
358 | ]
359 | },
360 | {
361 | "cell_type": "markdown",
362 | "metadata": {},
363 | "source": [
364 | "### 3.2 准备自监督预训练过程中直接保存下来的 checkpoint 文件"
365 | ]
366 | },
367 | {
368 | "cell_type": "markdown",
369 | "metadata": {},
370 | "source": [
371 | "我们使用第一个教程 `模型自监督预训练 之 SimCLR` 中训练保存下来的 `epoch_1.pth` 文件进行演示,该文件可以从 [这里](https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth) 下载,存放在文件夹 `mmselfsup/work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200` 里。"
372 | ]
373 | },
374 | {
375 | "cell_type": "code",
376 | "execution_count": null,
377 | "metadata": {},
378 | "outputs": [],
379 | "source": [
380 | "%cd /content/mmselfsup\n",
381 | "!mkdir -p work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200\n",
382 | "!wget https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth -P work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200"
383 | ]
384 | },
385 | {
386 | "cell_type": "markdown",
387 | "metadata": {},
388 | "source": [
389 | "同时,准备好第一个教程 `模型自监督预训练 之 SimCLR` 中的自监督预训练配置文件 `simclr_resnet50_1xb32-coslr-1e_tinyin200.py`。"
390 | ]
391 | },
392 | {
393 | "cell_type": "code",
394 | "execution_count": null,
395 | "metadata": {},
396 | "outputs": [],
397 | "source": [
398 | "%cd /content/mmselfsup"
399 | ]
400 | },
401 | {
402 | "cell_type": "code",
403 | "execution_count": null,
404 | "metadata": {},
405 | "outputs": [],
406 | "source": [
407 | "%%writefile /content/mmselfsup/configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py\n",
408 | "\n",
409 | "_base_ = 'simclr_resnet50_8xb32-coslr-200e_in1k.py'\n",
410 | "\n",
411 | "# dataset\n",
412 | "data = dict(\n",
413 | " samples_per_gpu=32, \n",
414 | " workers_per_gpu=2,\n",
415 | " train=dict(\n",
416 | " data_source=dict(\n",
417 | " data_prefix='data/tiny-imagenet-200/train',\n",
418 | " ann_file='data/tiny-imagenet-200/train.txt',\n",
419 | " )\n",
420 | " )\n",
421 | ")\n",
422 | "\n",
423 | "# optimizer\n",
424 | "optimizer = dict(\n",
425 | " lr=0.3 * ((32 * 1) / (32 * 8)),\n",
426 | ")\n",
427 | "\n",
428 | "runner = dict(max_epochs=1)"
429 | ]
430 | },
431 | {
432 | "cell_type": "markdown",
433 | "metadata": {},
434 | "source": [
435 | "## 4. 写 t-SNE 可视化的数据配置文件"
436 | ]
437 | },
438 | {
439 | "cell_type": "markdown",
440 | "metadata": {},
441 | "source": [
442 | "1. 新建一个名为 `tsne_tinyin200.py` 的配置文件。\n",
443 | "\n",
444 | "\n",
445 | "\n",
446 | "2. `tsne_tinyin200.py` 数据配置文件的内容如下:\n",
447 | " 1. 继承 [tsne_imagenet.py](https://github.com/open-mmlab/mmselfsup/blob/master/configs/benchmarks/classification/tsne_imagenet.py) 配置文件\n",
448 | " 2. 修改数据集路径和数据标注文件路径"
449 | ]
450 | },
451 | {
452 | "cell_type": "code",
453 | "execution_count": null,
454 | "metadata": {},
455 | "outputs": [],
456 | "source": [
457 | "%cd /content/mmselfsup"
458 | ]
459 | },
460 | {
461 | "cell_type": "code",
462 | "execution_count": null,
463 | "metadata": {},
464 | "outputs": [],
465 | "source": [
466 | "%%writefile /content/mmselfsup/configs/benchmarks/classification/tsne_tinyin200.py\n",
467 | "_base_ = 'tsne_imagenet.py'\n",
468 | "\n",
469 | "data = dict(\n",
470 | " extract=dict(\n",
471 | " data_source=dict(\n",
472 | " data_prefix='data/tiny-imagenet-200/val',\n",
473 | " ann_file='data/tiny-imagenet-200/val.txt',\n",
474 | " )\n",
475 | " )\n",
476 | ")"
477 | ]
478 | },
479 | {
480 | "cell_type": "markdown",
481 | "metadata": {},
482 | "source": [
483 | "## 5. 使用 t-SNE 可视化自监督预训练模型提取的图片特征"
484 | ]
485 | },
486 | {
487 | "cell_type": "markdown",
488 | "metadata": {},
489 | "source": [
490 | "下面,我们分别演示对 `自监督预训练模型提取 backbone 部分的权重文件` 和 `自监督预训练过程中直接保存下来的名为 epoch_*.pth 的 checkpoint 文件` 两种文件进行 t-SNE 可视化。"
491 | ]
492 | },
493 | {
494 | "cell_type": "markdown",
495 | "metadata": {},
496 | "source": [
497 | "### 5.1 对自监督预训练模型的 backbone 权重文件提取图片特征并保存 t-SNE 可视化结果"
498 | ]
499 | },
500 | {
501 | "cell_type": "markdown",
502 | "metadata": {},
503 | "source": [
504 | "使用 [visualize_tsne.py](https://github.com/open-mmlab/mmselfsup/blob/master/tools/analysis_tools/visualize_tsne.py) 脚本来提取图片特征并保存 t-SNE 可视化结果。\n",
505 | "\n",
506 | "```shell\n",
507 | "python tools/analysis_tools/visualize_tsne.py ${CONFIG_FILE} --dataset_config ${DATASET_CONFIG} --cfg-options ${CFG_OPTION} --work-dir ${WORK_DIR} [optional arguments]\n",
508 | "```\n",
509 | "\n",
510 | "参数:\n",
511 | "+ CONFIG_FILE:自监督训练的配置文件所在路径\n",
512 | "+ dataset_config:数据配置文件所在路径\n",
513 | "+ cfg-options:配置文件的可选项。如果要对“自监督预训练模型的 backbone 权重文件”进行 t-SNE 可视化,就在该参数里进行设置。\n",
514 | "+ work-dir:保存可视化结果的路径\n",
515 | "\n",
516 | "其他可选参数 optional arguments 可参考 [visualize_tsne.py](https://github.com/open-mmlab/mmselfsup/blob/master/tools/analysis_tools/visualize_tsne.py)."
517 | ]
518 | },
519 | {
520 | "cell_type": "code",
521 | "execution_count": null,
522 | "metadata": {},
523 | "outputs": [],
524 | "source": [
525 | "%cd /content/mmselfsup"
526 | ]
527 | },
528 | {
529 | "cell_type": "code",
530 | "execution_count": null,
531 | "metadata": {},
532 | "outputs": [],
533 | "source": [
534 | "!python tools/analysis_tools/visualize_tsne.py \\\n",
535 | "configs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py \\\n",
536 | "--dataset_config configs/benchmarks/classification/tsne_tinyin200.py \\\n",
537 | "--cfg-options model.backbone.init_cfg.type=Pretrained \\\n",
538 | "model.backbone.init_cfg.checkpoint=checkpoints/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth \\\n",
539 | "--work-dir work_dirs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k/tsne_simclr"
540 | ]
541 | },
542 | {
543 | "cell_type": "markdown",
544 | "metadata": {},
545 | "source": [
546 | "### 5.2 对自监督预训练过程中直接保存下来的 checkpoint 文件提取图片特征并保存 t-SNE 可视化结果"
547 | ]
548 | },
549 | {
550 | "cell_type": "markdown",
551 | "metadata": {},
552 | "source": [
553 | "使用 [visualize_tsne.py](https://github.com/open-mmlab/mmselfsup/blob/master/tools/analysis_tools/visualize_tsne.py) 脚本来提取图片特征并保存 t-SNE 可视化结果。\n",
554 | "\n",
555 | "```shell\n",
556 | "python tools/analysis_tools/visualize_tsne.py ${CONFIG_FILE} --dataset_config ${DATASET_CONFIG} --checkpoint ${CKPT_PATH} --work-dir ${WORK_DIR} [optional arguments]\n",
557 | "```\n",
558 | "\n",
559 | "参数:\n",
560 | "+ CONFIG_FILE:自监督训练的配置文件所在路径\n",
561 | "+ dataset_config:数据配置文件所在路径\n",
562 | "+ checkpoint:自监督预训练过程中保存下来(名为 `epoch_*.pth`)的模型文件路径\n",
563 | "+ work-dir:保存可视化结果的路径\n",
564 | "\n",
565 | "其他可选参数 optional arguments 可参考 [visualize_tsne.py](https://github.com/open-mmlab/mmselfsup/blob/master/tools/analysis_tools/visualize_tsne.py)."
566 | ]
567 | },
568 | {
569 | "cell_type": "code",
570 | "execution_count": null,
571 | "metadata": {},
572 | "outputs": [],
573 | "source": [
574 | "%cd /content/mmselfsup"
575 | ]
576 | },
577 | {
578 | "cell_type": "code",
579 | "execution_count": null,
580 | "metadata": {},
581 | "outputs": [],
582 | "source": [
583 | "!python tools/analysis_tools/visualize_tsne.py \\\n",
584 | "configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py \\\n",
585 | "--dataset_config configs/benchmarks/classification/tsne_tinyin200.py \\\n",
586 | "--checkpoint work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/epoch_1.pth \\\n",
587 | "--work-dir work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/tsne_simclr"
588 | ]
589 | },
590 | {
591 | "cell_type": "markdown",
592 | "metadata": {},
593 | "source": [
594 | "### 5.3 对权值随机初始化的模型提取图片特征并保存 t-SNE 可视化结果"
595 | ]
596 | },
597 | {
598 | "cell_type": "markdown",
599 | "metadata": {},
600 | "source": [
601 | "同时,我们可以和 `同一个 backbone 但是权值随机初始化` 的模型进行对比,对比二者提取图片特征的 t-SNE 可视化效果。\n",
602 | "\n",
603 | "做法:既不设置 checkpoint 参数,也不设置 cfg-options 参数。"
604 | ]
605 | },
606 | {
607 | "cell_type": "code",
608 | "execution_count": null,
609 | "metadata": {},
610 | "outputs": [],
611 | "source": [
612 | "!python tools/analysis_tools/visualize_tsne.py \\\n",
613 | "configs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py \\\n",
614 | "--dataset_config configs/benchmarks/classification/tsne_tinyin200.py \\\n",
615 | "--work-dir work_dirs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k/tsne_random"
616 | ]
617 | },
618 | {
619 | "cell_type": "markdown",
620 | "metadata": {},
621 | "source": [
622 | "## 6. 显示 t-SNE 可视化图片"
623 | ]
624 | },
625 | {
626 | "cell_type": "markdown",
627 | "metadata": {},
628 | "source": [
629 | "根据上面运行的结果信息,修改 t-SNE 可视化图片的目录路径"
630 | ]
631 | },
632 | {
633 | "cell_type": "code",
634 | "execution_count": null,
635 | "metadata": {},
636 | "outputs": [],
637 | "source": [
638 | "selfsup_tsne_dir = 'work_dirs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k/tsne_simclr/tsne_20220719_131352/saved_pictures/'\n",
639 | "rand_tsne_dir = 'work_dirs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k/tsne_random/tsne_20220719_131509/saved_pictures/'"
640 | ]
641 | },
642 | {
643 | "cell_type": "code",
644 | "execution_count": null,
645 | "metadata": {},
646 | "outputs": [],
647 | "source": [
648 | "from PIL import Image\n",
649 | "import matplotlib.pyplot as plt\n",
650 | "\n",
651 | "plt.figure(figsize=(20, 50), constrained_layout=True)\n",
652 | "for i in range(1, 6):\n",
653 | " # the full path of images\n",
654 | " selfsup_tsne_feat_i_path = selfsup_tsne_dir + 'feat'+ str(i) + '.png'\n",
655 | " rand_tsne_feat_i_path = rand_tsne_dir + 'feat'+ str(i) + '.png'\n",
656 | " \n",
657 | " # open the images\n",
658 | " selfsup_tsne_feat_i_images = Image.open(selfsup_tsne_feat_i_path)\n",
659 | " rand_tsne_feat_i_images = Image.open(rand_tsne_feat_i_path)\n",
660 | " \n",
661 | " # plot the images\n",
662 | " plt.subplot(5, 2, 2*i-1)\n",
663 | " plt.title('selfsup feat '+ str(i) +' tsne', y=0.9, fontsize=30)\n",
664 | " plt.imshow(selfsup_tsne_feat_i_images)\n",
665 | " plt.axis('off')\n",
666 | " \n",
667 | " plt.subplot(5, 2, 2*i)\n",
668 | " plt.title('random feat '+ str(i) +' tsne', y=0.9, fontsize=30)\n",
669 | " plt.imshow(rand_tsne_feat_i_images)\n",
670 | " plt.axis('off')\n",
671 | "\n",
672 | "plt.show()"
673 | ]
674 | }
675 | ],
676 | "metadata": {
677 | "kernelspec": {
678 | "display_name": "Python 3",
679 | "language": "python",
680 | "name": "python3"
681 | },
682 | "language_info": {
683 | "codemirror_mode": {
684 | "name": "ipython",
685 | "version": 3
686 | },
687 | "file_extension": ".py",
688 | "mimetype": "text/x-python",
689 | "name": "python",
690 | "nbconvert_exporter": "python",
691 | "pygments_lexer": "ipython3",
692 | "version": "3.7.0"
693 | }
694 | },
695 | "nbformat": 4,
696 | "nbformat_minor": 2
697 | }
698 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【3】自监督预训练模型的评估:“分类” 下游任务 之 线性评估.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 自监督预训练模型的评估:“分类” 下游任务 之 线性评估\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 任务介绍"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "在使用 “分类” 下游任务对自监督预训练模型进行评估的方法中,MMSelfSup 目前实现了以下几种方法:(详细请参考官方[文档](https://mmselfsup.readthedocs.io/zh_CN/latest/tutorials/6_benchmarks.html#id2))\n",
48 | "+ VOC SVM / Low-shot SVM\n",
49 | "+ **线性评估**\n",
50 | "+ ImageNet 半监督分类\n",
51 | "+ ImageNet 最邻近分类\n",
52 | "\n",
53 | "\n",
54 | "**注意:下游任务的 backbone 必须和自监督预训练模型的 backbone 保持一致,才能进行自监督预训练权值的迁移!**"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "线性评估是评估自监督预训练模型最通用的基准之一。它的 **`基本流程`** 如下:\n",
62 | "1. 在自监督预训练模型的 backbone 后面接上一层新的线性分类层\n",
63 | "2. 固定自监督预训练模型的 backbone 权值,使用下游任务(分类)的数据集对新的线性分类层进行训练,以此验证自监督预训练模型提取图片特征的效果。"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "## 1. 环境配置"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "metadata": {},
76 | "source": [
77 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "# Check nvcc version\n",
87 | "!nvcc -V"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "# Check GCC version\n",
97 | "!gcc --version"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": null,
103 | "metadata": {},
104 | "outputs": [],
105 | "source": [
106 | "# Check PyTorch installation\n",
107 | "import torch, torchvision\n",
108 | "print(torch.__version__)\n",
109 | "print(torch.cuda.is_available())"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "metadata": {},
123 | "outputs": [],
124 | "source": [
125 | "!pip install openmim"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "metadata": {},
132 | "outputs": [],
133 | "source": [
134 | "!mim install mmcv"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "### 1.3 安装 MMSelfSup"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "%cd /content"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
160 | "%cd /content/mmselfsup"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": null,
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "# Install MMSelfSup from source\n",
170 | "!pip install -e . "
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {},
176 | "source": [
177 | "### 1.4 检查安装是否正确"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "metadata": {},
184 | "outputs": [],
185 | "source": [
186 | "import mmselfsup\n",
187 | "print(mmselfsup.__version__)"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "## 2. 准备数据集"
195 | ]
196 | },
197 | {
198 | "cell_type": "markdown",
199 | "metadata": {},
200 | "source": [
201 | "目前,线性评估所支持的数据集包括 **ImageNet**、**Places205** 和 **iNaturalist18**。"
202 | ]
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "metadata": {},
207 | "source": [
208 | "### 2.0 数据集介绍"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "本教程将在 `Tiny ImageNet` 数据集上对自监督预训练好的模型进行线性评估。\n",
216 | "\n",
217 | "Tiny ImageNet 数据集是 ImageNet 的一个子集。\n",
218 | "\n",
219 | "该数据集包含 200 个类别,每个类别有 500 张训练图片、50 张验证图片和 50 张测试图片,共 120,000 张图像。每张图片均为 64×64 彩色图片。\n",
220 | "\n",
221 | "数据集官方下载地址:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "### 2.1 下载数据集"
229 | ]
230 | },
231 | {
232 | "cell_type": "markdown",
233 | "metadata": {},
234 | "source": [
235 | "使用 GNU [Wget](https://www.gnu.org/software/wget/) 工具从斯坦福官方网站下载:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
236 | ]
237 | },
238 | {
239 | "cell_type": "code",
240 | "execution_count": null,
241 | "metadata": {},
242 | "outputs": [],
243 | "source": [
244 | "%cd /content/mmselfsup"
245 | ]
246 | },
247 | {
248 | "cell_type": "code",
249 | "execution_count": null,
250 | "metadata": {},
251 | "outputs": [],
252 | "source": [
253 | "!mkdir data\n",
254 | "%cd data\n",
255 | "!wget http://cs231n.stanford.edu/tiny-imagenet-200.zip"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "### 2.2 解压数据集"
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": null,
268 | "metadata": {},
269 | "outputs": [],
270 | "source": [
271 | "!unzip -q tiny-imagenet-200.zip"
272 | ]
273 | },
274 | {
275 | "cell_type": "code",
276 | "execution_count": null,
277 | "metadata": {},
278 | "outputs": [],
279 | "source": [
280 | "!rm -rf tiny-imagenet-200.zip"
281 | ]
282 | },
283 | {
284 | "cell_type": "markdown",
285 | "metadata": {},
286 | "source": [
287 | "### 2.3 查看数据集目录"
288 | ]
289 | },
290 | {
291 | "cell_type": "code",
292 | "execution_count": null,
293 | "metadata": {},
294 | "outputs": [],
295 | "source": [
296 | "# Check data directory\n",
297 | "!apt-get install tree\n",
298 | "!tree -d /content/mmselfsup/data"
299 | ]
300 | },
301 | {
302 | "cell_type": "markdown",
303 | "metadata": {},
304 | "source": [
305 | "### 2.4 准备标注文件"
306 | ]
307 | },
308 | {
309 | "cell_type": "markdown",
310 | "metadata": {},
311 | "source": [
312 | "为了减少大家重写 `加载数据集` 代码的负担,我们整理好了标注文件,复制到数据集根目录 `mmselfsup/data/tiny-imagenet-200` 下即可。"
313 | ]
314 | },
315 | {
316 | "cell_type": "code",
317 | "execution_count": null,
318 | "metadata": {},
319 | "outputs": [],
320 | "source": [
321 | "%cd /content/mmselfsup/data"
322 | ]
323 | },
324 | {
325 | "cell_type": "code",
326 | "execution_count": null,
327 | "metadata": {},
328 | "outputs": [],
329 | "source": [
330 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/train.txt -P tiny-imagenet-200\n",
331 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/val.txt -P tiny-imagenet-200"
332 | ]
333 | },
334 | {
335 | "cell_type": "markdown",
336 | "metadata": {},
337 | "source": [
338 | "## 3. 准备自监督预训练模型的 backbone 权值文件"
339 | ]
340 | },
341 | {
342 | "cell_type": "markdown",
343 | "metadata": {},
344 | "source": [
345 | "### 3.1 针对自监督预训练过程中保存的 checkpoint 文件"
346 | ]
347 | },
348 | {
349 | "cell_type": "markdown",
350 | "metadata": {},
351 | "source": [
352 | "我们使用第一个教程 `模型自监督预训练 之 SimCLR` 中训练保存下来的 `epoch_1.pth` 文件进行演示,该文件可以从 [这里](https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth) 下载,存放在文件夹 `mmselfsup/work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200` 里。"
353 | ]
354 | },
355 | {
356 | "cell_type": "code",
357 | "execution_count": null,
358 | "metadata": {},
359 | "outputs": [],
360 | "source": [
361 | "%cd /content/mmselfsup\n",
362 | "!mkdir -p work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200\n",
363 | "!wget https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth -P work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200"
364 | ]
365 | },
366 | {
367 | "cell_type": "markdown",
368 | "metadata": {},
369 | "source": [
370 | "可以使用命令 `tools/model_converters/extract_backbone_weights.py` 来提取自监督预训练模型的 backbone 权值,代码如下:\n",
371 | "\n",
372 | "```python\n",
373 | "python tools/model_converters/extract_backbone_weights.py {CHECKPOINT} {MODEL_FILE}\n",
374 | "```\n",
375 | "\n",
376 | "参数:\n",
377 | "- CHECKPOINT:自监督预训练过程中保存下来(名为 `epoch_*.pth`)的模型文件路径\n",
378 | "- MODEL_FILE:输出 backbone 权重文件的保存路径。"
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": null,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "%cd /content/mmselfsup"
388 | ]
389 | },
390 | {
391 | "cell_type": "code",
392 | "execution_count": null,
393 | "metadata": {},
394 | "outputs": [],
395 | "source": [
396 | "!python ./tools/model_converters/extract_backbone_weights.py \\\n",
397 | "work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/epoch_1.pth \\\n",
398 | "work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/backbone.pth"
399 | ]
400 | },
401 | {
402 | "cell_type": "markdown",
403 | "metadata": {},
404 | "source": [
405 | "### 3.2 针对 MMSelfSup 模型库里的模型文件"
406 | ]
407 | },
408 | {
409 | "cell_type": "markdown",
410 | "metadata": {},
411 | "source": [
412 | "**注意:MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 中的模型文件都已经提取过 backbone 权值,不需要再次提取!**我们直接使用即可。\n",
413 | "\n",
414 | "在模型库中找到在 SimCLR 的预训练模型文件 `simclr_resnet50_8xb32-coslr-200e_in1k`,下载放在 `checkpoints` 文件夹里 "
415 | ]
416 | },
417 | {
418 | "cell_type": "code",
419 | "execution_count": null,
420 | "metadata": {},
421 | "outputs": [],
422 | "source": [
423 | "%cd /content/mmselfsup\n",
424 | "!mkdir checkpoints\n",
425 | "!wget https://download.openmmlab.com/mmselfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth -P checkpoints"
426 | ]
427 | },
428 | {
429 | "cell_type": "markdown",
430 | "metadata": {},
431 | "source": [
432 | "## 4. 自监督预训练模型的线性评估(Linear Evaluation)"
433 | ]
434 | },
435 | {
436 | "cell_type": "markdown",
437 | "metadata": {},
438 | "source": [
439 | "### 4.1 写自监督预训练模型的线性评估配置文件"
440 | ]
441 | },
442 | {
443 | "cell_type": "markdown",
444 | "metadata": {},
445 | "source": [
446 | "1. 新建一个名为 `resnet50_linear-1xb512-coslr-5e_tinyin200.py` 的配置文件。(配置文件命名要求 & 含义可参考[这里](https://mmsegmentation.readthedocs.io/zh_CN/latest/tutorials/config.html#id3))\n",
447 | "\n",
448 | "\n",
449 | "\n",
450 | "2. `resnet50_linear-1xb512-coslr-5e_tinyin200.py` 训练配置文件的内容:\n",
451 | " 1. 继承 [resnet50_linear-8xb512-coslr-90e_in1k.py](https://github.com/open-mmlab/mmselfsup/blob/master/configs/benchmarks/classification/imagenet/resnet50_linear-8xb512-coslr-90e_in1k.py) 配置文件\n",
452 | " 2. 根据需求修改参数 samples_per_gpu(单个 GPU 的 Batch size)和 workers_per_gpu (单个 GPU 分配的数据加载线程数)\n",
453 | " 3. 修改数据集路径和数据标注文件路径\n",
454 | " 4. 根据 batch size 调整学习率(调整原则请参考:[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id2))\n",
455 | " 5. 修改训练的总轮数 epoch"
456 | ]
457 | },
458 | {
459 | "cell_type": "code",
460 | "execution_count": null,
461 | "metadata": {},
462 | "outputs": [],
463 | "source": [
464 | "%%writefile /content/mmselfsup/configs/benchmarks/classification/imagenet/resnet50_linear-1xb512-coslr-5e_tinyin200.py\n",
465 | "_base_ = 'resnet50_linear-8xb512-coslr-90e_in1k.py'\n",
466 | "\n",
467 | "data = dict(\n",
468 | " samples_per_gpu=512, \n",
469 | " workers_per_gpu=2,\n",
470 | " train=dict(\n",
471 | " data_source=dict(\n",
472 | " data_prefix='data/tiny-imagenet-200/train',\n",
473 | " ann_file='data/tiny-imagenet-200/train.txt')),\n",
474 | " val=dict(\n",
475 | " data_source=dict(\n",
476 | " data_prefix='data/tiny-imagenet-200/val',\n",
477 | " ann_file='data/tiny-imagenet-200/val.txt'))\n",
478 | " )\n",
479 | "\n",
480 | "# optimizer\n",
481 | "optimizer = dict(\n",
482 | " lr=1.6 * ((512 * 1) / (512 * 8)),\n",
483 | ")\n",
484 | "\n",
485 | "# runtime settings\n",
486 | "runner = dict(type='EpochBasedRunner', max_epochs=5)"
487 | ]
488 | },
489 | {
490 | "cell_type": "markdown",
491 | "metadata": {},
492 | "source": [
493 | "### 4.2 开始线性评估"
494 | ]
495 | },
496 | {
497 | "cell_type": "markdown",
498 | "metadata": {},
499 | "source": [
500 | "在运行线性评估时,我们使用 [dist_train_linear.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/benchmarks/classification/dist_train_linear.sh) 脚本来启动训练。\n",
501 | "\n",
502 | "```shell\n",
503 | "GPUS=${GPUS} bash tools/benchmarks/classification/dist_train_linear.sh ${CONFIG} ${PRETRAIN}\n",
504 | "```\n",
505 | "\n",
506 | "参数:\n",
507 | "- GPUS:默认使用的 GPU 数量是 8,如果需要调整,需要加上该环境变量。\n",
508 | "- CONFIG:线性评估所使用的配置文件,位于 `configs/benchmarks/classification/` 对应数据集目录下\n",
509 | "- PRETRAIN:自监督预训练模型的 backbone 权重文件所在的路径"
510 | ]
511 | },
512 | {
513 | "cell_type": "code",
514 | "execution_count": null,
515 | "metadata": {},
516 | "outputs": [],
517 | "source": [
518 | "%cd /content/mmselfsup"
519 | ]
520 | },
521 | {
522 | "cell_type": "code",
523 | "execution_count": null,
524 | "metadata": {},
525 | "outputs": [],
526 | "source": [
527 | "!GPUS=1 bash tools/benchmarks/classification/dist_train_linear.sh \\\n",
528 | "configs/benchmarks/classification/imagenet/resnet50_linear-1xb512-coslr-5e_tinyin200.py \\\n",
529 | "checkpoints/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth"
530 | ]
531 | }
532 | ],
533 | "metadata": {
534 | "kernelspec": {
535 | "display_name": "Python 3",
536 | "language": "python",
537 | "name": "python3"
538 | },
539 | "language_info": {
540 | "codemirror_mode": {
541 | "name": "ipython",
542 | "version": 3
543 | },
544 | "file_extension": ".py",
545 | "mimetype": "text/x-python",
546 | "name": "python",
547 | "nbconvert_exporter": "python",
548 | "pygments_lexer": "ipython3",
549 | "version": "3.7.0"
550 | }
551 | },
552 | "nbformat": 4,
553 | "nbformat_minor": 2
554 | }
555 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【4】自监督预训练模型的评估:“分类” 下游任务 之 SVM 评估.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 自监督预训练模型的评估:“分类” 下游任务 之 SVM 评估\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 任务介绍"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "在使用 “分类” 下游任务对自监督预训练模型进行评估的方法中,MMSelfSup 目前实现了以下几种方法:(详细请参考官方[文档](https://mmselfsup.readthedocs.io/zh_CN/latest/tutorials/6_benchmarks.html#id2))\n",
48 | "+ **VOC SVM / Low-shot SVM**\n",
49 | "+ 线性评估\n",
50 | "+ ImageNet 半监督分类\n",
51 | "+ ImageNet 最邻近分类"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "SVM 评估也是评估自监督预训练模型的常用基准之一。它的 **`基本流程`** 如下:\n",
59 | "1. 每张图像送入自监督预训练好的模型提取特征\n",
60 | "2. 用所有训练集图片的特征向量来训练支持向量机 SVM \n",
61 | "3. 将测试集图片的特征向送入训练好的 SVM,将其分类性能作为评估从自监督学习方法中学习图像特征质量的手段,与其他自监督模型进行比较"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "## 1. 环境配置"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "# Check nvcc version\n",
85 | "!nvcc -V"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "# Check GCC version\n",
95 | "!gcc --version"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "# Check PyTorch installation\n",
105 | "import torch, torchvision\n",
106 | "print(torch.__version__)\n",
107 | "print(torch.cuda.is_available())"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": null,
120 | "metadata": {},
121 | "outputs": [],
122 | "source": [
123 | "!pip install openmim"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "!mim install mmcv"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "### 1.3 安装 MMSelfSup"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "%cd /content"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
158 | "%cd /content/mmselfsup"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "# Install MMSelfSup from source\n",
168 | "!pip install -e . "
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {},
174 | "source": [
175 | "### 1.4 检查安装是否正确"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "import mmselfsup\n",
185 | "print(mmselfsup.__version__)"
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {},
191 | "source": [
192 | "## 2. 准备数据集"
193 | ]
194 | },
195 | {
196 | "cell_type": "markdown",
197 | "metadata": {},
198 | "source": [
199 | "**注意**:目前,MMSelfSup 的 SVM 评估只支持了 `PASCAL VOC 2007` 数据集。"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "### 2.0 数据集介绍"
207 | ]
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "本教程将在 `PASCAL VOC 2007` 数据集上对自监督预训练好的模型进行 SVM 评估。\n",
214 | "\n",
215 | "该数据集包含有 20 个类别。训练集有 5011 张图片,测试集有 4952 张图片。\n",
216 | "\n",
217 | "数据集官方下载地址:http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html"
218 | ]
219 | },
220 | {
221 | "cell_type": "markdown",
222 | "metadata": {},
223 | "source": [
224 | "### 2.1 下载数据集"
225 | ]
226 | },
227 | {
228 | "cell_type": "markdown",
229 | "metadata": {},
230 | "source": [
231 | "使用如下命令,自动将 PASCAL VOC 2007 下载到 `$YOUR_DATA_ROOT` 文件夹中。\n",
232 | "\n",
233 | "```shell\n",
234 | "bash tools/data_converters/prepare_voc07_cls.sh $YOUR_DATA_ROOT\n",
235 | "```"
236 | ]
237 | },
238 | {
239 | "cell_type": "code",
240 | "execution_count": null,
241 | "metadata": {},
242 | "outputs": [],
243 | "source": [
244 | "% cd /content/mmselfsup\n",
245 | "!bash tools/data_converters/prepare_voc07_cls.sh data"
246 | ]
247 | },
248 | {
249 | "cell_type": "code",
250 | "execution_count": null,
251 | "metadata": {},
252 | "outputs": [],
253 | "source": [
254 | "!rm -rf data/VOCtest_06-Nov-2007.tar\n",
255 | "!rm -rf data/VOCtrainval_06-Nov-2007.tar"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "## 3. 准备自监督预训练模型"
263 | ]
264 | },
265 | {
266 | "cell_type": "markdown",
267 | "metadata": {},
268 | "source": [
269 | "目前,MMSelfSup 里 SVM 评估的模型对象既可以是 `自监督预训练模型提取 backbone 部分的权重文件`,也可以是 `自监督预训练过程中直接保存下来的名为 epoch_*.pth 的 checkpoint 文件`。所以,我们这里需要准备好两种模型文件,方便后面对这两种文件进行 SVM 评估的代码演示。"
270 | ]
271 | },
272 | {
273 | "cell_type": "markdown",
274 | "metadata": {},
275 | "source": [
276 | "### 3.1 准备自监督预训练模型提取 backbone 部分的权重文件"
277 | ]
278 | },
279 | {
280 | "cell_type": "markdown",
281 | "metadata": {},
282 | "source": [
283 | "**注意:MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 中的模型文件都已经提取过 backbone 权值,不需要再次提取!**我们直接使用即可。\n",
284 | "\n",
285 | "在模型库中找到在 SimCLR 的预训练模型文件 `simclr_resnet50_8xb32-coslr-200e_in1k`,下载放在 `checkpoints` 文件夹里 "
286 | ]
287 | },
288 | {
289 | "cell_type": "code",
290 | "execution_count": null,
291 | "metadata": {},
292 | "outputs": [],
293 | "source": [
294 | "%cd /content/mmselfsup\n",
295 | "!mkdir checkpoints\n",
296 | "!wget https://download.openmmlab.com/mmselfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth -P checkpoints"
297 | ]
298 | },
299 | {
300 | "cell_type": "markdown",
301 | "metadata": {},
302 | "source": [
303 | "### 3.2 准备自监督预训练过程中直接保存下来的 checkpoint 文件"
304 | ]
305 | },
306 | {
307 | "cell_type": "markdown",
308 | "metadata": {},
309 | "source": [
310 | "我们使用第一个教程 `模型自监督预训练 之 SimCLR` 中训练保存下来的 `epoch_1.pth` 文件进行演示,该文件可以从 [这里](https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth) 下载,存放在文件夹 `mmselfsup/work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200` 里。"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "metadata": {},
317 | "outputs": [],
318 | "source": [
319 | "%cd /content/mmselfsup\n",
320 | "!mkdir -p work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200\n",
321 | "!wget https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth -P work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200"
322 | ]
323 | },
324 | {
325 | "cell_type": "markdown",
326 | "metadata": {},
327 | "source": [
328 | "同时,准备好第一个教程 `模型自监督预训练 之 SimCLR` 中的自监督预训练配置文件 `simclr_resnet50_1xb32-coslr-1e_tinyin200.py`。"
329 | ]
330 | },
331 | {
332 | "cell_type": "code",
333 | "execution_count": null,
334 | "metadata": {},
335 | "outputs": [],
336 | "source": [
337 | "%cd /content/mmselfsup"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": null,
343 | "metadata": {},
344 | "outputs": [],
345 | "source": [
346 | "%%writefile /content/mmselfsup/configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py\n",
347 | "\n",
348 | "_base_ = 'simclr_resnet50_8xb32-coslr-200e_in1k.py'\n",
349 | "\n",
350 | "# dataset\n",
351 | "data = dict(\n",
352 | " samples_per_gpu=32, \n",
353 | " workers_per_gpu=2,\n",
354 | " train=dict(\n",
355 | " data_source=dict(\n",
356 | " data_prefix='data/tiny-imagenet-200/train',\n",
357 | " ann_file='data/tiny-imagenet-200/train.txt',\n",
358 | " )\n",
359 | " )\n",
360 | ")\n",
361 | "\n",
362 | "# optimizer\n",
363 | "optimizer = dict(\n",
364 | " lr=0.3 * ((32 * 1) / (32 * 8)),\n",
365 | ")\n",
366 | "\n",
367 | "runner = dict(max_epochs=1)"
368 | ]
369 | },
370 | {
371 | "cell_type": "markdown",
372 | "metadata": {},
373 | "source": [
374 | "## 4. 自监督预训练模型的 SVM 评估"
375 | ]
376 | },
377 | {
378 | "cell_type": "markdown",
379 | "metadata": {},
380 | "source": [
381 | "下面,我们分别演示对 `自监督预训练模型提取 backbone 部分的权重文件` 和 `自监督预训练过程中直接保存下来的名为 epoch_*.pth 的 checkpoint 文件` 两种文件进行 SVM 评估。\n",
382 | "\n",
383 | "相关代码文件见:[tools/benchmarks/classification/svm_voc07](https://github.com/open-mmlab/mmselfsup/tree/master/tools/benchmarks/classification/svm_voc07)"
384 | ]
385 | },
386 | {
387 | "cell_type": "markdown",
388 | "metadata": {},
389 | "source": [
390 | "### 4.1 对自监督预训练模型的 backbone 权重文件进行 SVM 评估"
391 | ]
392 | },
393 | {
394 | "cell_type": "markdown",
395 | "metadata": {},
396 | "source": [
397 | "在运行自监督预训练模型的 SVM 评估时,我们使用 [dist_test_svm_pretrain.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/benchmarks/classification/svm_voc07/dist_test_svm_pretrain.sh) 脚本来启动自监督预训练模型 backbone 权重文件的评估。\n",
398 | "\n",
399 | "```shell\n",
400 | "bash tools/benchmarks/classification/svm_voc07/dist_test_svm_pretrain.sh ${SELFSUP_CONFIG} ${GPUS} ${PRETRAIN} ${FEATURE_LIST}\n",
401 | "\n",
402 | "```\n",
403 | "\n",
404 | "参数:\n",
405 | "- SELFSUP_CONFIG:自监督预训练的配置文件\n",
406 | "- GPUS:使用 GPU 的数量\n",
407 | "- PRETRAIN:自监督预训练模型的 backbone 权重文件所在的路径。\n",
408 | "- FEATURE_LIST:该参数的值是一个字符串,用于指定评估从 layer1 到 layer5 的特征,默认值为 `\"feat5\"`,表示评估 layer5 的特征。如果想评估 layer3,那么该参数的值是 `\"feat3\"`;如果想评估所有特征,那么该参数的值是 `\"feat1 feat2 feat3 feat4 feat5\"`(注意用空格分隔)。"
409 | ]
410 | },
411 | {
412 | "cell_type": "code",
413 | "execution_count": null,
414 | "metadata": {},
415 | "outputs": [],
416 | "source": [
417 | "%cd /content/mmselfsup"
418 | ]
419 | },
420 | {
421 | "cell_type": "code",
422 | "execution_count": null,
423 | "metadata": {},
424 | "outputs": [],
425 | "source": [
426 | "!bash tools/benchmarks/classification/svm_voc07/dist_test_svm_pretrain.sh \\\n",
427 | "configs/selfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py \\\n",
428 | "1 \\\n",
429 | "checkpoints/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth \\\n",
430 | "\"feat5\""
431 | ]
432 | },
433 | {
434 | "cell_type": "markdown",
435 | "metadata": {},
436 | "source": [
437 | "### 4.2 对自监督预训练过程中直接保存下来的 checkpoint 文件进行 SVM 评估"
438 | ]
439 | },
440 | {
441 | "cell_type": "markdown",
442 | "metadata": {},
443 | "source": [
444 | "在运行自监督预训练模型的 SVM 评估时,我们使用 [dist_test_svm_epoch.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/benchmarks/classification/svm_voc07/dist_test_svm_epoch.sh) 脚本来启动自监督预训练过程中直接保存下来 checkpoint 文件的评估。\n",
445 | "\n",
446 | "```shell\n",
447 | "GPUS=${GPUS} bash tools/benchmarks/classification/svm_voc07/dist_test_svm_epoch.sh ${SELFSUP_CONFIG} ${EPOCH} ${FEATURE_LIST}\n",
448 | "```\n",
449 | "\n",
450 | "参数:\n",
451 | "- GPUS:默认使用的 GPU 数量是 8,如果需要调整,需要加上该环境变量。\n",
452 | "- SELFSUP_CONFIG:自监督预训练的配置文件\n",
453 | "- EPOCH:想要测试 checkpoint 文件的 epoch 数。例如:该参数的值为 5,意味着测名为 `epoch_5.pth` 的 checkpoint 文件。\n",
454 | "- FEATURE_LIST:该参数的值是一个字符串,用于指定评估从 layer1 到 layer5 的特征,默认值为 `\"feat5\"`,表示评估 layer5 的特征。如果想评估 layer3,那么该参数的值是 `\"feat3\"`;如果想评估所有特征,那么该参数的值是 `\"feat1 feat2 feat3 feat4 feat5\"`(注意用空格分隔)。"
455 | ]
456 | },
457 | {
458 | "cell_type": "code",
459 | "execution_count": null,
460 | "metadata": {},
461 | "outputs": [],
462 | "source": [
463 | "%cd /content/mmselfsup"
464 | ]
465 | },
466 | {
467 | "cell_type": "code",
468 | "execution_count": null,
469 | "metadata": {},
470 | "outputs": [],
471 | "source": [
472 | "!GPUS=1 bash tools/benchmarks/classification/svm_voc07/dist_test_svm_epoch.sh \\\n",
473 | "configs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200.py \\\n",
474 | "1 \\\n",
475 | "\"feat5\""
476 | ]
477 | }
478 | ],
479 | "metadata": {
480 | "kernelspec": {
481 | "display_name": "Python 3",
482 | "language": "python",
483 | "name": "python3"
484 | },
485 | "language_info": {
486 | "codemirror_mode": {
487 | "name": "ipython",
488 | "version": 3
489 | },
490 | "file_extension": ".py",
491 | "mimetype": "text/x-python",
492 | "name": "python",
493 | "nbconvert_exporter": "python",
494 | "pygments_lexer": "ipython3",
495 | "version": "3.7.0"
496 | }
497 | },
498 | "nbformat": 4,
499 | "nbformat_minor": 2
500 | }
501 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【5】自监督预训练模型的评估:“检测”下游任务.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 自监督预训练模型的评估:“检测” 下游任务\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 任务介绍"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "在使用 “检测” 下游任务对自监督预训练模型进行评估中,MMSelfSup 目前支持了以下几种评估配置:(详细请参考官方[文档](https://mmselfsup.readthedocs.io/zh_CN/latest/tutorials/6_benchmarks.html#id5))\n",
48 | "+ 将自监督预训练的权值迁移到 `Faster R-CNN` 模型上,并在 `Pascal VOC 2007 + 2012` 数据集上进行评估\n",
49 | "+ 将自监督预训练的权值迁移到 `Mask R-CNN` 模型上,并在 `COCO2017` 数据集上进行评估\n",
50 | "\n",
51 | "本教程将演示第一种评估配置。\n",
52 | "\n",
53 | "**注意:下游任务的 backbone 必须和自监督预训练模型的 backbone 保持一致,才能进行自监督预训练权值的迁移!**"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "“检测” 下游任务评估的 **`基本流程`** 如下:\n",
61 | "1. 将自监督预训练好的 backbone 权值迁移到下游 “检测” 任务的 backbone 上\n",
62 | "2. 使用下游任务(检测)的数据集对下游任务的模型进行微调,以此验证自监督预训练模型提取图片特征的效果。"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "## 1. 环境配置"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "# Check nvcc version\n",
86 | "!nvcc -V"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "# Check GCC version\n",
96 | "!gcc --version"
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "# Check PyTorch installation\n",
106 | "import torch, torchvision\n",
107 | "print(torch.__version__)\n",
108 | "print(torch.cuda.is_available())"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "!pip install openmim"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "!mim install mmcv-full"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "### 1.3 安装 MMSelfSup"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "%cd /content"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
159 | "%cd /content/mmselfsup"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "# Install MMSelfSup from source\n",
169 | "!pip install -e . "
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {},
175 | "source": [
176 | "### 1.4 安装 MMDetection \n",
177 | "在这里,mmdet 是我们这里所需要用到的依赖库,所以直接用 `pip install mmdet` 命令安装即可。\n",
178 | "\n",
179 | "其他方式的安装详情请参考: [MMDetection 文档](https://github.com/open-mmlab/mmdetection/blob/master/docs/en/get_started.md)。"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {},
186 | "outputs": [],
187 | "source": [
188 | "pip install mmdet"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "### 1.5 检查安装是否正确"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "import mmselfsup\n",
205 | "print(mmselfsup.__version__)\n",
206 | "\n",
207 | "import mmdet\n",
208 | "print(mmdet.__version__)"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "## 2. 准备数据集"
216 | ]
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "metadata": {},
221 | "source": [
222 | "### 2.0 数据集介绍"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "metadata": {},
228 | "source": [
229 | "本教程将在 `Pascal VOC 2012` 和 `Pascal VOC 2007` 数据集上对自监督预训练好的模型进行“检测”下游任务的评估。\n",
230 | "\n",
231 | "Pascal VOC 2007 数据集包含 20 个类别,Pascal VOC 2012 在此基础上进行无交集扩展(即类别相同,图片不同)。\n",
232 | "\n",
233 | "在本教程中,我们根据 MoCo 论文里的数据集配置进行训练和测试:使用 VOC 07 和 VOC 12 的训练集和验证集进行微调,在 VOC 07 数据集上进行测试。\n",
234 | "\n",
235 | "数据集官方网址:http://host.robots.ox.ac.uk/pascal/VOC/"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "### 2.1 下载数据集"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "metadata": {},
249 | "outputs": [],
250 | "source": [
251 | "%cd /content/mmselfsup"
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": null,
257 | "metadata": {},
258 | "outputs": [],
259 | "source": [
260 | "!mkdir data\n",
261 | "%cd data\n",
262 | "\n",
263 | "!wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar\n",
264 | "\n",
265 | "!wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar\n",
266 | "!wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "### 2.2 解压数据集"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "!tar -xf VOCtrainval_11-May-2012.tar\n",
283 | "!tar -xf VOCtrainval_06-Nov-2007.tar\n",
284 | "!tar -xf VOCtest_06-Nov-2007.tar"
285 | ]
286 | },
287 | {
288 | "cell_type": "markdown",
289 | "metadata": {},
290 | "source": [
291 | "## 3. 准备自监督预训练模型的 backbone 权值文件"
292 | ]
293 | },
294 | {
295 | "cell_type": "markdown",
296 | "metadata": {},
297 | "source": [
298 | "### 3.1 针对自监督预训练过程中保存的 checkpoint 文件"
299 | ]
300 | },
301 | {
302 | "cell_type": "markdown",
303 | "metadata": {},
304 | "source": [
305 | "我们使用第一个教程 `模型自监督预训练 之 SimCLR` 中训练保存下来的 `epoch_1.pth` 文件进行演示,该文件可以从 [这里](https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth) 下载,存放在文件夹 `mmselfsup/work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200` 里。"
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": null,
311 | "metadata": {},
312 | "outputs": [],
313 | "source": [
314 | "%cd /content/mmselfsup\n",
315 | "!mkdir -p work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200\n",
316 | "!wget https://download.openmmlab.com/mmselfsup/tutorial/epoch_1.pth -P work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200"
317 | ]
318 | },
319 | {
320 | "cell_type": "markdown",
321 | "metadata": {},
322 | "source": [
323 | "可以使用命令 `tools/model_converters/extract_backbone_weights.py` 来提取自监督预训练模型的 backbone 权值,代码如下:\n",
324 | "\n",
325 | "```python\n",
326 | "python tools/model_converters/extract_backbone_weights.py {CHECKPOINT} {MODEL_FILE}\n",
327 | "```\n",
328 | "\n",
329 | "参数:\n",
330 | "- CHECKPOINT:自监督预训练过程中保存下来(名为 `epoch_*.pth`)的模型文件路径\n",
331 | "- MODEL_FILE:输出 backbone 权重文件的保存路径。"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": null,
337 | "metadata": {},
338 | "outputs": [],
339 | "source": [
340 | "%cd /content/mmselfsup"
341 | ]
342 | },
343 | {
344 | "cell_type": "code",
345 | "execution_count": null,
346 | "metadata": {},
347 | "outputs": [],
348 | "source": [
349 | "!python ./tools/model_converters/extract_backbone_weights.py \\\n",
350 | "work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/epoch_1.pth \\\n",
351 | "work_dirs/selfsup/simclr/simclr_resnet50_1xb32-coslr-1e_tinyin200/backbone.pth"
352 | ]
353 | },
354 | {
355 | "cell_type": "markdown",
356 | "metadata": {},
357 | "source": [
358 | "### 3.2 针对 MMSelfSup 模型库里的模型文件"
359 | ]
360 | },
361 | {
362 | "cell_type": "markdown",
363 | "metadata": {},
364 | "source": [
365 | "**注意:MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 中的模型文件都已经提取过 backbone 权值,不需要再次提取!**我们直接使用即可。\n",
366 | "\n",
367 | "在模型库中找到在 SimCLR 的预训练模型文件 `simclr_resnet50_8xb32-coslr-200e_in1k`,下载放在 `checkpoints` 文件夹里 "
368 | ]
369 | },
370 | {
371 | "cell_type": "code",
372 | "execution_count": null,
373 | "metadata": {},
374 | "outputs": [],
375 | "source": [
376 | "%cd /content/mmselfsup\n",
377 | "!mkdir checkpoints\n",
378 | "!wget https://download.openmmlab.com/mmselfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth -P checkpoints"
379 | ]
380 | },
381 | {
382 | "cell_type": "markdown",
383 | "metadata": {},
384 | "source": [
385 | "## 4. 自监督预训练模型的 “检测” 下游任务评估"
386 | ]
387 | },
388 | {
389 | "cell_type": "markdown",
390 | "metadata": {},
391 | "source": [
392 | "### 4.1 写自监督预训练模型的 “检测” 评估配置文件"
393 | ]
394 | },
395 | {
396 | "cell_type": "markdown",
397 | "metadata": {},
398 | "source": [
399 | "1. 新建一个名为 `faster_rcnn_r50_c4_mstrain_3k_voc0712.py` 的配置文件。(配置文件命名要求 & 含义可参考[这里](https://mmsegmentation.readthedocs.io/zh_CN/latest/tutorials/config.html#id3))\n",
400 | "\n",
401 | "\n",
402 | "\n",
403 | "2. `faster_rcnn_r50_c4_mstrain_3k_voc0712.py` 训练配置文件的内容:\n",
404 | " 1. 继承 [faster_rcnn_r50_c4_mstrain_24k_voc0712.py](https://github.com/open-mmlab/mmselfsup/blob/master/configs/benchmarks/mmdetection/voc0712/faster_rcnn_r50_c4_mstrain_24k_voc0712.py) 配置文件\n",
405 | " 2. 根据 batch size 调整学习率(调整原则请参考:[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id2))\n",
406 | " 3. 根据需求修改参数:模型训练多少个 iteration 评估一次、模型训练多少个 iteration 保存一次 checkpoint 文件 以及 总共训练多少个 iteration"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": null,
412 | "metadata": {},
413 | "outputs": [],
414 | "source": [
415 | "%%writefile /content/mmselfsup/configs/benchmarks/mmdetection/voc0712/faster_rcnn_r50_c4_mstrain_3k_voc0712.py\n",
416 | "_base_ = 'faster_rcnn_r50_c4_mstrain_24k_voc0712.py'\n",
417 | "\n",
418 | "optimizer = dict(\n",
419 | " lr=0.02 * (1 / 8)\n",
420 | ")\n",
421 | "\n",
422 | "evaluation = dict(interval=1000, metric='mAP')\n",
423 | "\n",
424 | "checkpoint_config = dict(by_epoch=False, interval=1000)\n",
425 | "\n",
426 | "runner = dict(type='IterBasedRunner', max_iters=3000)"
427 | ]
428 | },
429 | {
430 | "cell_type": "markdown",
431 | "metadata": {},
432 | "source": [
433 | "### 4.2 开始 “检测” 下游任务的评估"
434 | ]
435 | },
436 | {
437 | "cell_type": "markdown",
438 | "metadata": {},
439 | "source": [
440 | "在运行 “检测” 下游任务评估时,我们使用 [mim_dist_train_c4.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/benchmarks/mmdetection/mim_dist_train_c4.sh) 脚本来启动训练。\n",
441 | "\n",
442 | "```shell\n",
443 | "bash tools/benchmarks/mmdetection/mim_dist_train_c4.sh ${CONFIG} ${PRETRAIN} ${GPUS}\n",
444 | "```\n",
445 | "\n",
446 | "参数:\n",
447 | "- CONFIG:“检测” 评估所使用的配置文件,位于 configs/benchmarks/mmdetection/ 里对应的数据集目录下\n",
448 | "- PRETRAIN: 自监督预训练模型的 backbone 权重文件所在的路径\n",
449 | "- GPUS: 所使用 GPU 的数量"
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": null,
455 | "metadata": {},
456 | "outputs": [],
457 | "source": [
458 | "%cd /content/mmselfsup"
459 | ]
460 | },
461 | {
462 | "cell_type": "code",
463 | "execution_count": null,
464 | "metadata": {},
465 | "outputs": [],
466 | "source": [
467 | "!bash tools/benchmarks/mmdetection/mim_dist_train_c4.sh \\\n",
468 | "configs/benchmarks/mmdetection/voc0712/faster_rcnn_r50_c4_mstrain_3k_voc0712.py \\\n",
469 | "checkpoints/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth \\\n",
470 | "1"
471 | ]
472 | }
473 | ],
474 | "metadata": {
475 | "kernelspec": {
476 | "display_name": "Python 3",
477 | "language": "python",
478 | "name": "python3"
479 | },
480 | "language_info": {
481 | "codemirror_mode": {
482 | "name": "ipython",
483 | "version": 3
484 | },
485 | "file_extension": ".py",
486 | "mimetype": "text/x-python",
487 | "name": "python",
488 | "nbconvert_exporter": "python",
489 | "pygments_lexer": "ipython3",
490 | "version": "3.7.0"
491 | }
492 | },
493 | "nbformat": 4,
494 | "nbformat_minor": 2
495 | }
496 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【6】在 MMDetection 中使用自监督预训练模型.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 在 MMDetection 中使用自监督预训练模型\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMDetection Repo**:[https://github.com/open-mmlab/mmdetection](https://github.com/open-mmlab/mmdetection)\n",
24 | "\n",
25 | "**MMDetection 官方文档链接**:[https://mmdetection.readthedocs.io/en/latest](https://mmdetection.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
28 | "\n",
29 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
30 | "\n",
31 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
32 | "\n",
33 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
34 | "\n",
35 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
36 | "\n",
37 | "**作者**:OpenMMLab"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "## 0. 任务介绍"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "我们除了可以直接使用 MMSelfSup 里 `tools/benchmark` 底下的工具对自监督预训练模型进行评估,还可以直接将保存好的模型文件用于 OpenMMLab 的其他库(如 MMDetection、MMSegmentation)中进行训练。\n",
52 | "\n",
53 | "本教程将演示:如何在 MMDetection 中使用自监督预训练模型的权值进行目标检测任务的训练。\n",
54 | "\n",
55 | "**注意:下游任务的 backbone 必须和自监督预训练模型的 backbone 保持一致,才能进行自监督预训练权值的迁移!**"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "## 1. 环境配置"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "# Check nvcc version\n",
79 | "!nvcc -V"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "# Check GCC version\n",
89 | "!gcc --version"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "# Check PyTorch installation\n",
99 | "import torch, torchvision\n",
100 | "print(torch.__version__)\n",
101 | "print(torch.cuda.is_available())"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "### 1.2 安装 MMDetection 的依赖库:MMCV"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "!pip install -U openmim\n",
118 | "!mim install mmcv-full"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "### 1.3 安装 MMDetection\n",
126 | "其他方式的安装详情请参考: [MMDetection 安装文档](https://mmdetection.readthedocs.io/en/latest/get_started.html#best-practices)。"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "metadata": {},
133 | "outputs": [],
134 | "source": [
135 | "%cd /content"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "metadata": {},
142 | "outputs": [],
143 | "source": [
144 | "!git clone https://github.com/open-mmlab/mmdetection.git\n",
145 | "%cd mmdetection\n",
146 | "!pip install -v -e ."
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "### 1.5 检查安装是否正确"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "import mmdet\n",
163 | "print(mmdet.__version__)"
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "## 2. 准备数据集"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {},
176 | "source": [
177 | "### 2.0 数据集介绍"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "本教程将在 `COCO2017` 数据集上训练目标检测任务。\n",
185 | "\n",
186 | "COCO 是一个大规模目标检测、图像分割和图像描述数据集。它包含 80 个物体类别,150 万个物体实例,约 33 万张图像(其中超过 20 万张图像有标注)。\n",
187 | "\n",
188 | "数据集官方网址:https://cocodataset.org"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "### 2.1 下载并解压数据集"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "metadata": {},
201 | "source": [
202 | "使用该命令可以下载并解压 COCO 数据集。该命令支持下载 COCO,VOC 和 LVIS 数据集。\n",
203 | "```shell\n",
204 | "python tools/misc/download_dataset.py --dataset-name ${DATASET_NAME} --unzip --delete\n",
205 | "```\n",
206 | "\n",
207 | "参数:\n",
208 | "+ DATASET_NAME:支持数据集的名称 `coco2017`,`voc2007` 和 `lvis`"
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": null,
214 | "metadata": {},
215 | "outputs": [],
216 | "source": [
217 | "%cd /content/mmdetection\n",
218 | "!mkdir -p data/coco\n",
219 | "%cd data/coco"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "!wget http://images.cocodataset.org/zips/train2017.zip\n",
229 | "!unzip train2017.zip\n",
230 | "!rm -rf train2017.zip\n",
231 | "\n",
232 | "!wget http://images.cocodataset.org/zips/val2017.zip\n",
233 | "!unzip val2017.zip\n",
234 | "!rm -rf val2017.zip\n",
235 | "\n",
236 | "!wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
237 | "!unzip annotations_trainval2017.zip\n",
238 | "!rm -rf annotations_trainval2017.zip"
239 | ]
240 | },
241 | {
242 | "cell_type": "markdown",
243 | "metadata": {},
244 | "source": [
245 | "## 3. 准备自监督预训练模型的 backbone 权值文件"
246 | ]
247 | },
248 | {
249 | "cell_type": "markdown",
250 | "metadata": {},
251 | "source": [
252 | "### 3.1 针对自监督预训练过程中保存的 checkpoint 文件"
253 | ]
254 | },
255 | {
256 | "cell_type": "markdown",
257 | "metadata": {},
258 | "source": [
259 | "可以参考前面的教程,使用命令 `tools/model_converters/extract_backbone_weights.py` 来提取自监督预训练模型的 backbone 权值,代码如下:\n",
260 | "\n",
261 | "```python\n",
262 | "python tools/model_converters/extract_backbone_weights.py {CHECKPOINT} {MODEL_FILE}\n",
263 | "```\n",
264 | "\n",
265 | "参数:\n",
266 | "- CHECKPOINT:自监督预训练过程中保存下来(名为 `epoch_*.pth`)的模型文件路径\n",
267 | "- MODEL_FILE:输出 backbone 权重文件的保存路径。"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {},
273 | "source": [
274 | "### 3.2 针对 MMSelfSup 模型库里的模型文件"
275 | ]
276 | },
277 | {
278 | "cell_type": "markdown",
279 | "metadata": {},
280 | "source": [
281 | "**注意:MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 中的模型文件都已经提取过 backbone 权值,不需要再次提取!**我们直接使用即可。\n",
282 | "\n",
283 | "在模型库中找到在 SimCLR 的预训练模型文件 `simclr_resnet50_8xb32-coslr-200e_in1k`,下载放在 `checkpoints` 文件夹里 "
284 | ]
285 | },
286 | {
287 | "cell_type": "code",
288 | "execution_count": null,
289 | "metadata": {},
290 | "outputs": [],
291 | "source": [
292 | "%cd /content/mmdetection\n",
293 | "!mkdir checkpoints\n",
294 | "!wget https://download.openmmlab.com/mmselfsup/simclr/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth -P checkpoints"
295 | ]
296 | },
297 | {
298 | "cell_type": "markdown",
299 | "metadata": {},
300 | "source": [
301 | "## 4. 使用自监督预训练模型的权值初始化目标检测的模型"
302 | ]
303 | },
304 | {
305 | "cell_type": "markdown",
306 | "metadata": {},
307 | "source": [
308 | "### 4.1 写目标检测任务的配置文件"
309 | ]
310 | },
311 | {
312 | "cell_type": "markdown",
313 | "metadata": {},
314 | "source": [
315 | "1. 新建一个名为 `faster_rcnn_simclr-pretrained_r50_caffe_fpn_9k_coco.py` 的配置文件。\n",
316 | "\n",
317 | "\n",
318 | "2. `faster_rcnn_simclr-pretrained_r50_caffe_fpn_9k_coco.py` 训练配置文件的内容:\n",
319 | " 1. 继承 [faster_rcnn_r50_caffe_fpn_90k_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py) 配置文件\n",
320 | " 2. 将模型的 checkpoint 参数修改为自监督预训练模型的 backbone 所在路径\n",
321 | " 3. 根据 batch size 调整学习率(调整原则请参考:[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id2))\n",
322 | " 4. 修改总共训练的迭代次数 max_iters、模型训练多少个 iteration 评估一次、模型训练多少个 iteration 保存一次 checkpoint 文件等参数"
323 | ]
324 | },
325 | {
326 | "cell_type": "code",
327 | "execution_count": null,
328 | "metadata": {},
329 | "outputs": [],
330 | "source": [
331 | "%%writefile /content/mmdetection/configs/faster_rcnn/faster_rcnn_simclr-pretrained_r50_caffe_fpn_9k_coco.py\n",
332 | "_base_ = 'faster_rcnn_r50_caffe_fpn_90k_coco.py'\n",
333 | "\n",
334 | "model = dict(\n",
335 | " backbone=dict(\n",
336 | " frozen_stages=-1,\n",
337 | " init_cfg=dict(\n",
338 | " type='Pretrained',\n",
339 | " checkpoint='checkpoints/simclr_resnet50_8xb32-coslr-200e_in1k_20220428-46ef6bb9.pth')\n",
340 | " )\n",
341 | ")\n",
342 | "\n",
343 | "# optimizer\n",
344 | "optimizer = dict(\n",
345 | " lr=0.02 * (1 / 8)\n",
346 | ")\n",
347 | "\n",
348 | "# Runner type\n",
349 | "runner = dict(_delete_=True, type='IterBasedRunner', max_iters=9000)\n",
350 | "\n",
351 | "checkpoint_config = dict(interval=3000)\n",
352 | "\n",
353 | "evaluation = dict(interval=3000)"
354 | ]
355 | },
356 | {
357 | "cell_type": "markdown",
358 | "metadata": {},
359 | "source": [
360 | "### 4.2 开始训练目标检测模型"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "在训练目标检测模型时,我们使用 [tools/train.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py) 训练工具来启动训练。\n",
368 | "\n",
369 | "```shell\n",
370 | "python tools/train.py \\\n",
371 | " ${CONFIG_FILE} \\\n",
372 | " [optional arguments]\n",
373 | "```\n",
374 | "\n",
375 | "参数:\n",
376 | "- CONFIG_FILE:“检测” 评估所使用的配置文件,位于 configs 里对应模型的目录下\n",
377 | "\n",
378 | "详情请参考:[文档](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html#training-on-a-single-gpu)"
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": null,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "%cd /content/mmdetection"
388 | ]
389 | },
390 | {
391 | "cell_type": "code",
392 | "execution_count": null,
393 | "metadata": {},
394 | "outputs": [],
395 | "source": [
396 | "!python tools/train.py \\\n",
397 | "configs/faster_rcnn/faster_rcnn_simclr-pretrained_r50_caffe_fpn_9k_coco.py"
398 | ]
399 | }
400 | ],
401 | "metadata": {
402 | "kernelspec": {
403 | "display_name": "Python 3",
404 | "language": "python",
405 | "name": "python3"
406 | },
407 | "language_info": {
408 | "codemirror_mode": {
409 | "name": "ipython",
410 | "version": 3
411 | },
412 | "file_extension": ".py",
413 | "mimetype": "text/x-python",
414 | "name": "python",
415 | "nbconvert_exporter": "python",
416 | "pygments_lexer": "ipython3",
417 | "version": "3.7.0"
418 | }
419 | },
420 | "nbformat": 4,
421 | "nbformat_minor": 2
422 | }
423 |
--------------------------------------------------------------------------------
/codes/MMSelfSup_tutorials/【7】模型自监督预训练 之 MAE.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# 模型自监督预训练 之 MAE\n",
15 | "\n",
16 | "
"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "**MMSelfSup Repo**:[https://github.com/open-mmlab/mmselfsup](https://github.com/open-mmlab/mmselfsup)\n",
24 | "\n",
25 | "**MMSelfSup 官方文档链接**:[https://mmselfsup.readthedocs.io/en/latest](https://mmselfsup.readthedocs.io/en/latest)\n",
26 | "\n",
27 | "**MMSelfSup 视频教学**:[https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287](https://space.bilibili.com/1293512903/channel/collectiondetail?sid=657287)\n",
28 | "\n",
29 | "**MMSelfSup 代码库介绍 PPT 获取方式**:关注 OpenMMLab 公众号,后台回复:mmselfsup,即可获取课程 PPT\n",
30 | "\n",
31 | "**加入微信社群方式**:关注公众号,选择 “加入我们” -> “微信社区”,即可获取入群二维码。非常期待你的到来呀~\n",
32 | "\n",
33 | "**作者**:OpenMMLab"
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "## 0. 自监督预训练方法介绍:Masked Autoencoders (MAE)"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "**论文地址**:https://arxiv.org/pdf/2111.06377.pdf\n",
48 | "\n",
49 | "**MAE 基本思想**:将输入图像分块 `patch`,随机遮挡住一部分图像块。将未被遮挡住的图像块和对应的位置信息一同输入到模型中,让模型恢复被遮挡的图像块内容。"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {},
55 | "source": [
56 | "
"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "## 1. 环境配置"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "### 1.1 查看 Python、PyTorch 和 Torchvision 的版本"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "# Check nvcc version\n",
80 | "!nvcc -V"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "# Check GCC version\n",
90 | "!gcc --version"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "# Check PyTorch installation\n",
100 | "import torch, torchvision\n",
101 | "print(torch.__version__)\n",
102 | "print(torch.cuda.is_available())"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": [
109 | "### 1.2 安装 MMSelfSup 的依赖库:MMCV"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "!pip install openmim"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "!mim install mmcv"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "### 1.3 安装 MMSelfSup"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": null,
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "%cd /content"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "!git clone https://github.com/open-mmlab/mmselfsup.git\n",
153 | "%cd /content/mmselfsup"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "# Install MMSelfSup from source\n",
163 | "!pip install -e . "
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "### 1.4 检查安装是否正确"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "import mmselfsup\n",
180 | "print(mmselfsup.__version__)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "## 2. 准备数据集"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "### 2.0 数据集介绍"
195 | ]
196 | },
197 | {
198 | "cell_type": "markdown",
199 | "metadata": {},
200 | "source": [
201 | "本教程将在 `Tiny ImageNet` 数据集上训练 Masked Autoencoders (MAE) 模型。\n",
202 | "\n",
203 | "Tiny ImageNet 数据集是 ImageNet 的一个子集。\n",
204 | "\n",
205 | "该数据集包含 200 个类别,每个类别有 500 张训练图片、50 张验证图片和 50 张测试图片,共 120,000 张图像。每张图片均为 64×64 彩色图片。\n",
206 | "\n",
207 | "数据集官方下载地址:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
208 | ]
209 | },
210 | {
211 | "cell_type": "markdown",
212 | "metadata": {},
213 | "source": [
214 | "### 2.1 下载数据集"
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "metadata": {},
220 | "source": [
221 | "使用 GNU [Wget](https://www.gnu.org/software/wget/) 工具从斯坦福官方网站下载:http://cs231n.stanford.edu/tiny-imagenet-200.zip"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "metadata": {},
228 | "outputs": [],
229 | "source": [
230 | "%cd /content/mmselfsup"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": [
239 | "!mkdir data\n",
240 | "%cd data\n",
241 | "!wget http://cs231n.stanford.edu/tiny-imagenet-200.zip"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "metadata": {},
247 | "source": [
248 | "### 2.2 解压数据集"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {},
255 | "outputs": [],
256 | "source": [
257 | "!unzip -q tiny-imagenet-200.zip"
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": null,
263 | "metadata": {},
264 | "outputs": [],
265 | "source": [
266 | "!rm -rf tiny-imagenet-200.zip"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "### 2.3 查看数据集目录"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "# Check data directory\n",
283 | "!apt-get install tree\n",
284 | "!tree -d /content/mmselfsup/data"
285 | ]
286 | },
287 | {
288 | "cell_type": "markdown",
289 | "metadata": {},
290 | "source": [
291 | "### 2.4 准备标注文件"
292 | ]
293 | },
294 | {
295 | "cell_type": "markdown",
296 | "metadata": {},
297 | "source": [
298 | "为了减少大家重写 `加载数据集` 代码的负担,我们整理好了标注文件,复制到数据集根目录 `mmselfsup/data/tiny-imagenet-200` 下即可。"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": null,
304 | "metadata": {},
305 | "outputs": [],
306 | "source": [
307 | "%cd /content/mmselfsup/data"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "metadata": {},
314 | "outputs": [],
315 | "source": [
316 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/train.txt -P tiny-imagenet-200\n",
317 | "!wget https://raw.githubusercontent.com/open-mmlab/OpenMMLabCourse/main/codes/MMSelfSup_tutorials/anno_files/val.txt -P tiny-imagenet-200"
318 | ]
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "metadata": {},
323 | "source": [
324 | "## 3. 写模型自监督预训练的配置文件"
325 | ]
326 | },
327 | {
328 | "cell_type": "markdown",
329 | "metadata": {},
330 | "source": [
331 | "1. 新建一个名为 `mae_vit-base-p16_1xb32-coslr-1e_tinyin200.py` 的配置文件。(配置文件命名要求 & 含义可参考[这里](https://mmsegmentation.readthedocs.io/zh_CN/latest/tutorials/config.html#id3))\n",
332 | "\n",
333 | "\n",
334 | "\n",
335 | "2. 写训练配置文件的内容:\n",
336 | " 1. 继承 [mae_vit-base-p16_8xb512-coslr-400e_in1k.py](https://github.com/open-mmlab/mmselfsup/blob/master/configs/selfsup/mae/mae_vit-base-p16_8xb512-coslr-400e_in1k.py) 配置文件\n",
337 | " 2. 根据需求修改参数 samples_per_gpu(单个 GPU 的 Batch size)和 workers_per_gpu (单个 GPU 分配的数据加载线程数)\n",
338 | " 3. 修改数据集路径和数据标注文件路径\n",
339 | " 4. 根据 batch size 调整学习率(调整原则请参考:[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id2))\n",
340 | " 5. 修改训练的总轮数 epoch"
341 | ]
342 | },
343 | {
344 | "cell_type": "code",
345 | "execution_count": null,
346 | "metadata": {},
347 | "outputs": [],
348 | "source": [
349 | "%cd /content/mmselfsup"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": null,
355 | "metadata": {},
356 | "outputs": [],
357 | "source": [
358 | "%%writefile /content/mmselfsup/configs/selfsup/mae/mae_vit-base-p16_1xb32-coslr-1e_tinyin200.py\n",
359 | "\n",
360 | "_base_ = 'mae_vit-base-p16_8xb512-coslr-400e_in1k.py'\n",
361 | "\n",
362 | "# dataset\n",
363 | "data = dict(\n",
364 | " samples_per_gpu=32, \n",
365 | " workers_per_gpu=2,\n",
366 | " train=dict(\n",
367 | " data_source=dict(\n",
368 | " data_prefix='data/tiny-imagenet-200/train',\n",
369 | " ann_file='data/tiny-imagenet-200/train.txt',\n",
370 | " )\n",
371 | " )\n",
372 | ")\n",
373 | "\n",
374 | "# optimizer\n",
375 | "optimizer = dict(\n",
376 | " lr=1.5e-4 * 4096 / 256 * (32 / 512 * 8),\n",
377 | ")\n",
378 | "\n",
379 | "runner = dict(max_epochs=1)"
380 | ]
381 | },
382 | {
383 | "cell_type": "markdown",
384 | "metadata": {},
385 | "source": [
386 | "## 4. 模型自监督预训练"
387 | ]
388 | },
389 | {
390 | "cell_type": "markdown",
391 | "metadata": {},
392 | "source": [
393 | "我们推荐使用分布式训练工具 [tools/dist_train.sh](https://github.com/open-mmlab/mmselfsup/blob/master/tools/dist_train.sh) 来启动训练任务(即使您只用一张 GPU 进行训练)。\n",
394 | "因为一些自监督预训练算法需要用多张 GPU 进行训练,为此 MMSelfSup 支持了多卡训练可能会用到的模块,如 `SyncBN` 等。如果算法在训练的过程中使用到了这些模块,但不使用分布式训练,就会报错。\n",
395 | "\n",
396 | "```shell\n",
397 | "bash tools/dist_train.sh ${CONFIG_FILE} ${GPUS} --work-dir ${YOUR_WORK_DIR} [optional arguments]\n",
398 | "```\n",
399 | "\n",
400 | "参数:\n",
401 | "+ CONFIG_FILE:自监督训练的配置文件所在路径\n",
402 | "\n",
403 | "+ GPUS:进行训练时所使用的 GPU 数量\n",
404 | "\n",
405 | "+ work-dir:训练过程中产生模型和日志等文件的保存路径\n",
406 | "\n",
407 | "其他可选参数 `optional arguments` 可参考[这里](https://mmselfsup.readthedocs.io/zh_CN/latest/get_started.html#id3)。"
408 | ]
409 | },
410 | {
411 | "cell_type": "code",
412 | "execution_count": null,
413 | "metadata": {},
414 | "outputs": [],
415 | "source": [
416 | "%cd /content/mmselfsup"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": null,
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "!bash tools/dist_train.sh \\\n",
426 | "configs/selfsup/mae/mae_vit-base-p16_1xb32-coslr-1e_tinyin200.py \\\n",
427 | "1 \\\n",
428 | "--work_dir work_dirs/selfsup/mae/mae_vit-base-p16_1xb32-coslr-1e_tinyin200/ "
429 | ]
430 | },
431 | {
432 | "cell_type": "markdown",
433 | "metadata": {},
434 | "source": [
435 | "## 5. MAE 模型恢复被遮挡图片效果的可视化"
436 | ]
437 | },
438 | {
439 | "cell_type": "markdown",
440 | "metadata": {},
441 | "source": [
442 | "### 5.1 准备自监督预训练模型文件\n",
443 | "\n",
444 | "**注意**:为了方便大家直接将训练好的模型迁移到下游任务上进行后续的训练,目前 MMSelfSup 的 [模型库](https://github.com/open-mmlab/mmselfsup/blob/master/docs/en/model_zoo.md) 里的模型都是已经提取过 backbone 部分的权值文件,所以是没有 decoder 部分的权值,不可以拿来进行恢复被遮挡图片效果的可视化。"
445 | ]
446 | },
447 | {
448 | "cell_type": "code",
449 | "execution_count": null,
450 | "metadata": {},
451 | "outputs": [],
452 | "source": [
453 | "%cd /content/mmselfsup\n",
454 | "!mkdir checkpoints\n",
455 | "!wget https://download.openmmlab.com/mmselfsup/mae/mae_visualize_vit_large.pth -P checkpoints"
456 | ]
457 | },
458 | {
459 | "cell_type": "markdown",
460 | "metadata": {},
461 | "source": [
462 | "### 5.2 准备自监督预训练模型的配置文件"
463 | ]
464 | },
465 | {
466 | "cell_type": "code",
467 | "execution_count": null,
468 | "metadata": {},
469 | "outputs": [],
470 | "source": [
471 | "%cd /content/mmselfsup"
472 | ]
473 | },
474 | {
475 | "cell_type": "code",
476 | "execution_count": null,
477 | "metadata": {},
478 | "outputs": [],
479 | "source": [
480 | "%%writefile configs/selfsup/mae/mae_visualization.py\n",
481 | "model = dict(\n",
482 | " type='MAE',\n",
483 | " backbone=dict(type='MAEViT', arch='l', patch_size=16, mask_ratio=0.75),\n",
484 | " neck=dict(\n",
485 | " type='MAEPretrainDecoder',\n",
486 | " patch_size=16,\n",
487 | " in_chans=3,\n",
488 | " embed_dim=1024,\n",
489 | " decoder_embed_dim=512,\n",
490 | " decoder_depth=8,\n",
491 | " decoder_num_heads=16,\n",
492 | " mlp_ratio=4.,\n",
493 | " ),\n",
494 | " head=dict(type='MAEPretrainHead', norm_pix=True, patch_size=16))\n",
495 | "\n",
496 | "img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
497 | "# dataset summary\n",
498 | "data = dict(\n",
499 | " test=dict(\n",
500 | " pipeline = [\n",
501 | " dict(type='Resize', size=(224, 224)),\n",
502 | " dict(type='ToTensor'),\n",
503 | " dict(type='Normalize', **img_norm_cfg),]\n",
504 | " ))"
505 | ]
506 | },
507 | {
508 | "cell_type": "markdown",
509 | "metadata": {},
510 | "source": [
511 | "### 5.3 可视化 MAE 模型恢复被遮挡图片的效果"
512 | ]
513 | },
514 | {
515 | "cell_type": "markdown",
516 | "metadata": {},
517 | "source": [
518 | "我们可以使用工具 [tools/misc/mae_visualization.py](https://github.com/open-mmlab/mmselfsup/blob/dev/tools/misc/mae_visualization.py) 来可视化 MAE 模型恢复被遮挡图片的效果。\n",
519 | "\n",
520 | "```shell\n",
521 | "python tools/misc/mae_visualization.py ${IMG_PATH} ${CONFIG} ${CHECKPOINT_PATH} ${OUT_FILE} --device ${DEVICE}\n",
522 | "```\n",
523 | "\n",
524 | "参数:\n",
525 | "+ IMG_PATH:测试图片路径\n",
526 | "+ CONFIG:自监督训练的配置文件所在路径\n",
527 | "+ CHECKPOINT_PATH:自监督预训练过程中保存下来(名为 `epoch_*.pth`)的模型文件路径\n",
528 | "+ OUT_FILE:保存图片路径\n",
529 | "+ device:设置加载模型的设备,默认值为 'cuda:0'"
530 | ]
531 | },
532 | {
533 | "cell_type": "code",
534 | "execution_count": null,
535 | "metadata": {},
536 | "outputs": [],
537 | "source": [
538 | "%cd /content/mmselfsup/work_dirs/selfsup/mae\n",
539 | "!mkdir mae_visualization"
540 | ]
541 | },
542 | {
543 | "cell_type": "code",
544 | "execution_count": null,
545 | "metadata": {},
546 | "outputs": [],
547 | "source": [
548 | "%cd /content/mmselfsup\n",
549 | "\n",
550 | "!python tools/misc/mae_visualization.py \\\n",
551 | "data/tiny-imagenet-200/train/n01443537/images/n01443537_0.JPEG \\\n",
552 | "configs/selfsup/mae/mae_visualization.py \\\n",
553 | "work_dirs/selfsup/mae/mae_visualization/mae_visualization.png \\\n",
554 | "checkpoints/mae_visualize_vit_large.pth"
555 | ]
556 | }
557 | ],
558 | "metadata": {
559 | "kernelspec": {
560 | "display_name": "Python 3",
561 | "language": "python",
562 | "name": "python3"
563 | },
564 | "language_info": {
565 | "codemirror_mode": {
566 | "name": "ipython",
567 | "version": 3
568 | },
569 | "file_extension": ".py",
570 | "mimetype": "text/x-python",
571 | "name": "python",
572 | "nbconvert_exporter": "python",
573 | "pygments_lexer": "ipython3",
574 | "version": "3.7.0"
575 | }
576 | },
577 | "nbformat": 4,
578 | "nbformat_minor": 2
579 | }
580 |
--------------------------------------------------------------------------------
/lecture_sjtu.md:
--------------------------------------------------------------------------------
1 | # 上海交大×商汤科技【OpenMMLab实践公开课】
2 |
3 | 2022年10月,由上海交通大学学生创新中心联合商汤科技与上海人工智能实验室联合打造的 《OpenMMLab实践公开课》 圆满结课。
4 |
5 | 本课程兼顾理论与实践,内容涵盖计算机视觉中的分类、检测、分割等基础问题,并结合OpenMMLab代码库讲解了图像生成、字符识别、目标追踪、人体姿态估计等多个方向的有趣案例,覆盖数据标注、模型训练、模型部署的模型生产全流程。
6 |
7 | 为方便更多同学学习,我们将课程资料公开,关注[OpenMMLab](https://mp.weixin.qq.com/s/Z6PLcYR0CxFoNA0_RXANsA)或[商汤学术](https://mp.weixin.qq.com/s/6ye-kK89PWPHo1jK4fUdCg)微信公众号,回复关键词即可收获课程PPT
8 |
9 | | 讲座内容 | 讲座视频 | 课程中的代码 |
10 | | :- | :-: | :-: |
11 | | [计算机视觉与OpenMMLab 开源算法体系](https://www.bilibili.com/video/BV1WG41177DP/) | [](https://www.bilibili.com/video/BV1WG41177DP/) | []() |
12 | | [深度学习算法基础](https://www.bilibili.com/video/BV1gP411N7yA/) | [](https://www.bilibili.com/video/BV1gP411N7yA/) | []() |
13 | | [上海交大学生创新中心人工智能实训平台介绍](https://www.bilibili.com/video/BV1CV4y1V7SQ/) | [](https://www.bilibili.com/video/BV1CV4y1V7SQ/) | []() |
14 | | [图像分类](https://www.bilibili.com/video/BV11G4y1n7gC/)
从特征工程到特征学习
模型设计
卷积神经网络
轻量化卷积神经网络
Vision Transformers
模型学习与训练技巧
学习率与优化器策略
数据增强
自监督学习简介 | [](https://www.bilibili.com/video/BV11G4y1n7gC/) | []() |
15 | | [MMClassification 介绍与代码实践](https://www.bilibili.com/video/BV1LP411N7G4/) | [](https://www.bilibili.com/video/BV1LP411N7G4/) | [使用预训练模型进行推理](https://github.com/wangruohui/sjtu-openmmlab-tutorial/blob/main/cls-1-inference.ipynb)
[训练自己的图像分类器](https://github.com/wangruohui/sjtu-openmmlab-tutorial/blob/main/cls-2-train.ipynb) |
16 | | [目标检测](https://www.bilibili.com/video/BV1BG4y1n7pn/)
从滑窗到密集预测范式
目标检测中的基本概念
两阶段目标检测算法
单阶段目标检测算法
无锚框目标检测算法
Detection Transformers
检测器的评估方法 | [](https://www.bilibili.com/video/BV1BG4y1n7pn/) | []() |
17 | | [MMDetection 介绍与代码实践](https://www.bilibili.com/video/BV1o8411Y7Td/) | [](https://www.bilibili.com/video/BV1o8411Y7Td/) | [使用预训练模型进行推理](https://github.com/wangruohui/sjtu-openmmlab-tutorial/blob/main/det-1-inference.ipynb)
[认识模型中的模块](https://github.com/wangruohui/sjtu-openmmlab-tutorial/blob/main/det-2-det-modules.ipynb)
[训练自己的检测器](https://github.com/wangruohui/sjtu-openmmlab-tutorial/blob/main/det-3-train-retinanet.ipynb) |
18 | | [数据集采集与标注、LabelBee工具介绍](https://www.bilibili.com/video/BV16W4y1E74Z/) | [](https://www.bilibili.com/video/BV16W4y1E74Z/) | []() |
19 | | [模型部署与 MMDeploy 实践](https://www.bilibili.com/video/BV1Ne411G7RX/) | [](https://www.bilibili.com/video/BV1Ne411G7RX/) | []() |
20 | | [语义分割与 MMSegmentation 实践](https://www.bilibili.com/video/BV1fP411A74D/) | [](https://www.bilibili.com/video/BV1fP411A74D/) | [MMSegmentation_Tutorials](https://github.com/TommyZihao/MMSegmentation_Tutorials) |
21 | | [文字字符识别与 MMOCR 实践](https://www.bilibili.com/video/BV1Qe4y1e7fH/) | [](https://www.bilibili.com/video/BV1Qe4y1e7fH/) | [MMOCR_Tutorials](https://github.com/TommyZihao/MMOCR_tutorials) |
22 | | [人体姿态估计与 MMPose 实践](https://www.bilibili.com/video/BV1Hg411z7Qk/) | [](https://www.bilibili.com/video/BV1Hg411z7Qk/) | [MMPose_Tutorials](https://github.com/TommyZihao/MMPose_Tutorials) |
23 | | [视频目标追踪与 MMTracking 实践](https://www.bilibili.com/video/BV14t4y1T7rd/) | [](https://www.bilibili.com/video/BV14t4y1T7rd/) | [MMTracking_Tutorials](https://github.com/TommyZihao/MMTracking_Tutorials) |
24 | | [生成模型 GAN 与 MMGeneration 实践](https://www.bilibili.com/video/BV1SK411D7uS/) | [](https://www.bilibili.com/video/BV1SK411D7uS/) | [MMGeneration_Tutorials](https://github.com/TommyZihao/MMGeneration_Tutorials) |
25 | | [MMClassification 实践 2](https://www.bilibili.com/video/BV1be4y1e7uS/) | [](https://www.bilibili.com/video/BV1be4y1e7uS/) | [MMClassificaiton_Tutorials](https://github.com/TommyZihao/MMClassification_Tutorials) |
26 |
27 |
28 | 课程宣传页:
29 | [1](https://mp.weixin.qq.com/s/QRvy6jmCpkRHi2nxmkHPWg),
30 | [2](https://mp.weixin.qq.com/s/8yztK5qu9-7cXCF1WK441g)
--------------------------------------------------------------------------------
/mmeval.md:
--------------------------------------------------------------------------------
1 | # MMEval 系列视频
2 |
3 | MMEval 是一个跨框架算法评测库,MMEval 系列视频将会给大家介绍解读 MMEval 相关的内容。
4 |
5 | MMEval 官方地址: https://github.com/open-mmlab/mmeval/
6 | MMEval 系列视频地址: https://space.bilibili.com/1293512903/channel/collectiondetail?sid=862118
7 |
8 | | | 内容 | 视频 | 课程中的代码/文档 |
9 | | :---: | :---------- | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------- |
10 | | 第 1 讲 | MMEval 介绍 | [](https://www.bilibili.com/video/BV1m24y127Db) | [1-MMEval-介绍.ipynb](codes/MMEval_tutorials/1-MMEval-介绍.ipynb) |
11 | | 第 2 讲 | MMEval 使用 | [](https://www.bilibili.com/video/BV1m24y127Db) | [2-MMEval-使用.ipynb](codes/MMEval_tutorials/2-MMEval-使用.ipynb) |
12 | | 第 3 讲 | MMEval 添加评测指标 | [](https://www.bilibili.com/video/BV1m24y127Db) | [3-MMEval-添加评测指标.ipynb](codes/MMEval_tutorials/3-MMEval-添加评测指标.ipynb) |
--------------------------------------------------------------------------------
/mmyolo.md:
--------------------------------------------------------------------------------
1 | # MMYOLO 系列视频
2 |
3 | MMYOLO 系列视频是 MMYOLO 开发者和社区小伙伴们一起录制和分享的教学视频,希望能对您学习和使用 MMYOLO 有一定帮助。
4 |
5 | MMYOLO 官方地址: https://github.com/open-mmlab/mmyolo/
6 | MMYOLO Roadmap 反馈地址: https://github.com/open-mmlab/mmyolo/issues/136
7 | MMYOLO 系列视频地址: https://space.bilibili.com/1293512903/channel/collectiondetail?sid=788924
8 |
9 | ## 工具类
10 |
11 | | | 内容 | 视频 | 课程中的代码/文档 |
12 | | :---: | :----------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------: |
13 | | 第1讲 | 特征图可视化 | [](https://www.bilibili.com/video/BV188411s7o8) | [特征图可视化.ipynb](codes/MMYOLO_tutorials/[工具类第一期]特征图可视化.ipynb) |
14 | | 第2讲 | 基于 sahi 的大图推理 | [](https://www.bilibili.com/video/BV1EK411R7Ws/) | [10分钟轻松掌握大图推理.ipynb](codes/MMYOLO_tutorials/[工具类第二期]10分钟轻松掌握大图推理.ipynb) |
15 |
16 | ## 基础类
17 |
18 | | | 内容 | 视频 | 课程中的代码/文档 |
19 | | :---: | :--------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------: |
20 | | 第1讲 | 配置全解读 | [](https://www.bilibili.com/video/BV1214y157ck) | [`配置全解读`文档](https://zhuanlan.zhihu.com/p/577715188) |
21 | | 第2讲 | 文件结构解读 | [](https://www.bilibili.com/video/BV1LP4y117jS) | [`文件结构解读`文档](https://zhuanlan.zhihu.com/p/584807195) |
22 |
23 | ## 实用类
24 |
25 | | | 内容 | 视频 | 课程中的代码/文档 |
26 | | :---: | :--------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: |
27 | | 第1期 | 源码调试技巧 | [](https://www.bilibili.com/video/BV1N14y1V7mB) | [`源码调试技巧`文档](https://zhuanlan.zhihu.com/p/580885852) |
28 | | 第2期 | 10分钟换遍主干网络 | [](https://www.bilibili.com/video/BV1JG4y1d7GC) | [10分钟换遍主干网络.ipynb](codes/MMYOLO_tutorials/[实用类第二期]10分钟换遍主干网络.ipynb) |
29 | | 第3期 | 自定义数据集从标注到部署保姆级教程 | [](https://www.bilibili.com/video/BV1RG4y137i5) | [`自定义数据集从标注到部署保姆级教程`文档](https://zhuanlan.zhihu.com/p/595497726) |
30 | | 第4期 | 顶会第一步 · 模块自定义 | [](https://www.bilibili.com/video/BV1yd4y1j7VD) | [顶会第一步·模块自定义.ipynb](codes/MMYOLO_tutorials/[实用类第四期]顶会第一步·模块自定义.ipynb) |
31 |
32 | ## 源码解读类
33 |
34 | ## 演示类
35 |
36 | | | 内容 | 视频 |
37 | | :---: | :----------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: |
38 | | 第1期 | 特征图可视化 | [](https://www.bilibili.com/video/BV1je4y1478R) |
39 |
--------------------------------------------------------------------------------
/model_diagrams.md:
--------------------------------------------------------------------------------
1 | # “最好理解的模型图”绘制活动
2 |
3 | “最好理解的模型图”绘制活动是“超级视客营”活动的一部分。其由社区同学 [RangeKing](https://github.com/RangeKing) 发起,目的是帮助大家更好更快地了解和使用 OpenMMLab 系列算法库的各种模型。
4 |
5 | 以下是各位同学的作品合集:
6 |
7 | ## MMYOLO
8 |
9 | 1. YOLOX
10 | 作者:[lyviva](https://github.com/lyviva)
11 |