├── .gitignore
├── LICENSE
├── README.md
├── detection
├── Base-RCNN-C4.yaml
├── Base-RetinaNet.yaml
├── __init__.py
├── demo.py
├── demo_retinanet.py
├── faster_rcnn_R_50_C4.yaml
├── grad_cam.py
├── grad_cam_retinanet.py
└── retinanet_R_50_FPN_3x.yaml
├── examples
├── Grad-CAM++.png
├── grad-cam.jpg
├── multiple_dogs.jpg
├── pic1.jpg
├── pic2.jpg
├── pic3.jpg
└── pic4.jpg
├── interpretability
├── __init__.py
├── grad_cam.py
└── guided_back_propagation.py
├── main.py
└── results
├── multiple_dogs-densenet121-cam++.jpg
├── multiple_dogs-densenet121-cam.jpg
├── multiple_dogs-densenet121-cam_gb.jpg
├── multiple_dogs-densenet121-gb.jpg
├── multiple_dogs-densenet121-heatmap++.jpg
├── multiple_dogs-densenet121-heatmap.jpg
├── multiple_dogs-inception-cam++.jpg
├── multiple_dogs-inception-cam.jpg
├── multiple_dogs-inception-cam_gb.jpg
├── multiple_dogs-inception-gb.jpg
├── multiple_dogs-inception-heatmap++.jpg
├── multiple_dogs-inception-heatmap.jpg
├── multiple_dogs-mobilenet_v2-cam++.jpg
├── multiple_dogs-mobilenet_v2-cam.jpg
├── multiple_dogs-mobilenet_v2-cam_gb.jpg
├── multiple_dogs-mobilenet_v2-gb.jpg
├── multiple_dogs-mobilenet_v2-heatmap++.jpg
├── multiple_dogs-mobilenet_v2-heatmap.jpg
├── multiple_dogs-resnet101-cam++.jpg
├── multiple_dogs-resnet101-cam.jpg
├── multiple_dogs-resnet101-cam_gb.jpg
├── multiple_dogs-resnet101-gb.jpg
├── multiple_dogs-resnet101-heatmap++.jpg
├── multiple_dogs-resnet101-heatmap.jpg
├── multiple_dogs-resnet50-cam++.jpg
├── multiple_dogs-resnet50-cam.jpg
├── multiple_dogs-resnet50-cam_gb.jpg
├── multiple_dogs-resnet50-gb.jpg
├── multiple_dogs-resnet50-heatmap++.jpg
├── multiple_dogs-resnet50-heatmap.jpg
├── multiple_dogs-shufflenet_v2-cam++.jpg
├── multiple_dogs-shufflenet_v2-cam.jpg
├── multiple_dogs-shufflenet_v2-cam_gb.jpg
├── multiple_dogs-shufflenet_v2-gb.jpg
├── multiple_dogs-shufflenet_v2-heatmap++.jpg
├── multiple_dogs-shufflenet_v2-heatmap.jpg
├── multiple_dogs-vgg16-cam++.jpg
├── multiple_dogs-vgg16-cam.jpg
├── multiple_dogs-vgg16-cam_gb.jpg
├── multiple_dogs-vgg16-gb.jpg
├── multiple_dogs-vgg16-heatmap++.jpg
├── multiple_dogs-vgg16-heatmap.jpg
├── multiple_dogs-vgg19-cam++.jpg
├── multiple_dogs-vgg19-cam.jpg
├── multiple_dogs-vgg19-cam_gb.jpg
├── multiple_dogs-vgg19-gb.jpg
├── multiple_dogs-vgg19-heatmap++.jpg
├── multiple_dogs-vgg19-heatmap.jpg
├── pic1-densenet121-cam++.jpg
├── pic1-densenet121-cam.jpg
├── pic1-densenet121-cam_gb.jpg
├── pic1-densenet121-gb.jpg
├── pic1-densenet121-heatmap++.jpg
├── pic1-densenet121-heatmap.jpg
├── pic1-frcnn-heatmap++.jpg
├── pic1-frcnn-heatmap.jpg
├── pic1-frcnn-predict_box.jpg
├── pic1-inception-cam++.jpg
├── pic1-inception-cam.jpg
├── pic1-inception-cam_gb.jpg
├── pic1-inception-gb.jpg
├── pic1-inception-heatmap++.jpg
├── pic1-inception-heatmap.jpg
├── pic1-mobilenet_v2-cam++.jpg
├── pic1-mobilenet_v2-cam.jpg
├── pic1-mobilenet_v2-cam_gb.jpg
├── pic1-mobilenet_v2-gb.jpg
├── pic1-mobilenet_v2-heatmap++.jpg
├── pic1-mobilenet_v2-heatmap.jpg
├── pic1-resnet101-cam++.jpg
├── pic1-resnet101-cam.jpg
├── pic1-resnet101-cam_gb.jpg
├── pic1-resnet101-gb.jpg
├── pic1-resnet101-heatmap++.jpg
├── pic1-resnet101-heatmap.jpg
├── pic1-resnet50-cam++.jpg
├── pic1-resnet50-cam.jpg
├── pic1-resnet50-cam_gb.jpg
├── pic1-resnet50-gb.jpg
├── pic1-resnet50-heatmap++.jpg
├── pic1-resnet50-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.0-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.0-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.1-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.1-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.2-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.2-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.3-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.3-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.4-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.4-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.5-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.5-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.6-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.6-heatmap.jpg
├── pic1-retinanet-head.cls_subnet.7-heatmap++.jpg
├── pic1-retinanet-head.cls_subnet.7-heatmap.jpg
├── pic1-retinanet-predict_box.jpg
├── pic1-shufflenet_v2-cam++.jpg
├── pic1-shufflenet_v2-cam.jpg
├── pic1-shufflenet_v2-cam_gb.jpg
├── pic1-shufflenet_v2-gb.jpg
├── pic1-shufflenet_v2-heatmap++.jpg
├── pic1-shufflenet_v2-heatmap.jpg
├── pic1-vgg16-cam++.jpg
├── pic1-vgg16-cam.jpg
├── pic1-vgg16-cam_gb.jpg
├── pic1-vgg16-gb.jpg
├── pic1-vgg16-heatmap++.jpg
├── pic1-vgg16-heatmap.jpg
├── pic1-vgg19-cam++.jpg
├── pic1-vgg19-cam.jpg
├── pic1-vgg19-cam_gb.jpg
├── pic1-vgg19-gb.jpg
├── pic1-vgg19-heatmap++.jpg
├── pic1-vgg19-heatmap.jpg
├── pic2-frcnn-heatmap++.jpg
├── pic2-frcnn-heatmap.jpg
├── pic2-frcnn-predict_box.jpg
├── pic2-retinanet-head.cls_subnet.0-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.0-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.1-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.1-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.2-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.2-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.3-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.3-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.4-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.4-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.5-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.5-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.6-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.6-heatmap.jpg
├── pic2-retinanet-head.cls_subnet.7-heatmap++.jpg
├── pic2-retinanet-head.cls_subnet.7-heatmap.jpg
├── pic2-retinanet-predict_box.jpg
├── pic3-frcnn-heatmap++.jpg
├── pic3-frcnn-heatmap.jpg
├── pic3-frcnn-predict_box.jpg
├── pic3-retinanet-head.cls_subnet.0-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.0-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.1-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.1-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.2-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.2-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.3-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.3-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.4-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.4-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.5-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.5-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.6-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.6-heatmap.jpg
├── pic3-retinanet-head.cls_subnet.7-heatmap++.jpg
├── pic3-retinanet-head.cls_subnet.7-heatmap.jpg
├── pic3-retinanet-predict_box.jpg
├── pic4-frcnn-heatmap++.jpg
├── pic4-frcnn-heatmap.jpg
├── pic4-frcnn-predict_box.jpg
├── pic4-retinanet-head.cls_subnet.0-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.0-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.1-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.1-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.2-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.2-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.3-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.3-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.4-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.4-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.5-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.5-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.6-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.6-heatmap.jpg
├── pic4-retinanet-head.cls_subnet.7-heatmap++.jpg
├── pic4-retinanet-head.cls_subnet.7-heatmap.jpg
└── pic4-retinanet-predict_box.jpg
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | .idea/
106 |
107 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Grad-CAM.pytorch
2 |
3 | pytorch 实现[Grad-CAM:Visual Explanations from Deep Networks via Gradient-based Localization](https://arxiv.org/pdf/1610.02391) 和
4 |
5 | [Grad-CAM++: Improved Visual Explanations for Deep Convolutional Networks](https://arxiv.org/pdf/1710.11063.pdf)
6 |
7 | 1. [依赖](#依赖)
8 | 2. [使用方法](#使用方法)
9 | 3. [样例分析](#样例分析)
10 | 3.1 [单个对象](#单个对象)
11 | 3.3 [多个对象](#多个对象)
12 | 4. [总结](#总结)
13 | 5. [目标检测-faster-r-cnn](#目标检测-faster-r-cnn)
14 | 5.1 [detectron2安装](#detectron2安装)
15 | 5.2 [测试](#测试)
16 | 5.3 [Grad-CAM结果](#Grad-CAM结果)
17 | 5.4 [总结](#总结)
18 | 6. [目标检测-retinanet](#目标检测-retinanet)
19 | 6.1 [detectron2安装](#detectron2安装)
20 | 6.2 [测试](#测试)
21 | 6.3 [Grad-CAM结果](#Grad-CAM结果)
22 | 6.4 [总结](#总结)
23 |
24 | **Grad-CAM整体架构**
25 |
26 | 
27 |
28 |
29 |
30 | **Grad-CAM++与Grad-CAM的异同**
31 |
32 | 
33 |
34 |
35 |
36 | ## 依赖
37 |
38 | ```wiki
39 | python 3.6.x
40 | pytoch 1.0.1+
41 | torchvision 0.2.2
42 | opencv-python
43 | matplotlib
44 | scikit-image
45 | numpy
46 | ```
47 |
48 |
49 |
50 | ## 使用方法
51 |
52 | ```shell
53 | python main.py --image-path examples/pic1.jpg \
54 | --network densenet121 \
55 | --weight-path /opt/pretrained_model/densenet121-a639ec97.pth
56 | ```
57 |
58 | **参数说明**:
59 |
60 | - image-path:需要可视化的图像路径(可选,默认`./examples/pic1.jpg`)
61 |
62 | - network: 网络名称(可选,默认`resnet50`)
63 | - weight-path: 网络对应的与训练参数权重路径(可选,默认从pytorch官网下载对应的预训练权重)
64 | - layer-name: Grad-CAM使用的层名(可选,默认最后一个卷积层)
65 | - class-id:Grad-CAM和Guided Back Propagation反向传播使用的类别id(可选,默认网络预测的类别)
66 | - output-dir:可视化结果图像保存目录(可选,默认`results`目录)
67 |
68 |
69 |
70 | ## 样例分析
71 |
72 | ### 单个对象
73 |
74 | **原始图像**
75 |
76 | 
77 |
78 | **效果**
79 |
80 | | network | HeatMap | Grad-CAM | HeatMap++ | Grad-CAM++ | Guided backpropagation | Guided Grad-CAM |
81 | | ------------ | ----------------------------------------- | ------------------------------------- | ------------------------------------------- | --------------------------------------- | ------------------------------------ | ---------------------------------------- |
82 | | vgg16 |  |  |  |  |  |  |
83 | | vgg19 |  |  |  |  |  |  |
84 | | resnet50 |  |  |  |  |  |  |
85 | | resnet101 |  |  |  |  |  |  |
86 | | densenet121 |  |  |  |  |  |  |
87 | | inception_v3 |  |  |  |  |  |  |
88 | | mobilenet_v2 |  |  |  |  |  |  |
89 | | shufflenet_v2 |  |  |  |  |  |  |
90 |
91 | ### 多个对象
92 |
93 | 对应多个图像Grad-CAM++比Grad-CAM覆盖要更全面一些,这也是Grad-CAM++最主要的优势
94 |
95 | **原始图像**
96 |
97 | 
98 |
99 | **效果**
100 |
101 | | network | HeatMap | Grad-CAM | HeatMap++ | Grad-CAM++ | Guided backpropagation | Guided Grad-CAM |
102 | | ------------ | ----------------------------------------- | ------------------------------------- | ------------------------------------------- | --------------------------------------- | ------------------------------------ | ---------------------------------------- |
103 | | vgg16 |  |  |  |  |  |  |
104 | | vgg19 |  |  |  |  |  |  |
105 | | resnet50 |  |  |  |  |  |  |
106 | | resnet101 |  |  |  |  |  |  |
107 | | densenet121 |  |  |  |  |  |  |
108 | | inception_v3 |  |  |  |  |  |  |
109 | | mobilenet_v2 |  |  |  |  |  |  |
110 | | shufflenet_v2 |  |  |  |  |  |  |
111 |
112 |
113 |
114 | ## 总结
115 |
116 | - vgg模型的Grad-CAM并没有覆盖整个对象,相对来说resnet和denset覆盖更全,特别是densenet;从侧面说明就模型的泛化和鲁棒性而言densenet>resnet>vgg
117 | - Grad-CAM++相对于Grad-CAM也是覆盖对象更全面,特别是对于同一个类别有多个实例的情况下,Grad-CAM可能只覆盖部分对象,Grad-CAM++基本覆盖所有对象;但是这仅仅对于vgg而言,想densenet直接使用Grad-CAM也基本能够覆盖所有对象
118 | - MobileNet V2的Grad-CAM覆盖也很全面
119 | - Inception V3和MobileNet V2的Guided backpropagation图轮廓很模糊,但是ShuffleNet V2的轮廓则比较清晰
120 |
121 |
122 |
123 | ## 目标检测-faster-r-cnn
124 |
125 | 有位网友[SHAOSIHAN]()问道怎样在目标检测中使用Grad-CAM;在Grad-CAM和Grad-CAM++论文中都没有提及对目标检测生成CAM图。我想主要有两个原因:
126 |
127 | a) 目标检测不同于分类,分类网络只有一个分类损失,而且所有网络都是一样的(几个类别最后一层就是几个神经元),最后的预测输出都是单一的类别得分分布。目标检测则不同,输出都不是单一的,而且不同的网络如Faster R-CNN, CornerNet,CenterNet,FCOS,它们的建模方式不一样,输出的含义都不相同。所以不会有统一的生成Grad-CAM图的方法。
128 |
129 | b) 分类属于弱监督,通过CAM可以了解网络预测时主要关注的空间位置,也就是"看哪里",对分析问题有实际的价值;而目标检测,本身是强监督,预测边框就直接指示了“看哪里”。
130 |
131 |
132 |
133 | 这里以detetron2中的faster-rcnn网络为例,生成Grad-CAM图。主要思路是直接获取预测分值最高的边框;将该边框的预测分值反向传播梯度到,该边框对应的proposal 边框的feature map上,生成此feature map的CAM图。
134 |
135 |
136 |
137 | ### detectron2安装
138 |
139 | a) 下载
140 |
141 | ```shell
142 | git clone https://github.com/facebookresearch/detectron2.git
143 | ```
144 |
145 |
146 |
147 | b) 修改`detectron2/modeling/roi_heads/fast_rcnn.py`文件中的`fast_rcnn_inference_single_image`函数,主要是增加索引号,记录分值高的预测边框是由第几个proposal边框生成的;修改后的`fast_rcnn_inference_single_image`函数如下:
148 |
149 | ```python
150 | def fast_rcnn_inference_single_image(
151 | boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
152 | ):
153 | """
154 | Single-image inference. Return bounding-box detection results by thresholding
155 | on scores and applying non-maximum suppression (NMS).
156 |
157 | Args:
158 | Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
159 | per image.
160 |
161 | Returns:
162 | Same as `fast_rcnn_inference`, but for only one image.
163 | """
164 | valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
165 | indices = torch.arange(start=0, end=scores.shape[0], dtype=int)
166 | indices = indices.expand((scores.shape[1], scores.shape[0])).T
167 | if not valid_mask.all():
168 | boxes = boxes[valid_mask]
169 | scores = scores[valid_mask]
170 | indices = indices[valid_mask]
171 | scores = scores[:, :-1]
172 | indices = indices[:, :-1]
173 |
174 | num_bbox_reg_classes = boxes.shape[1] // 4
175 | # Convert to Boxes to use the `clip` function ...
176 | boxes = Boxes(boxes.reshape(-1, 4))
177 | boxes.clip(image_shape)
178 | boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
179 |
180 | # Filter results based on detection scores
181 | filter_mask = scores > score_thresh # R x K
182 | # R' x 2. First column contains indices of the R predictions;
183 | # Second column contains indices of classes.
184 | filter_inds = filter_mask.nonzero()
185 | if num_bbox_reg_classes == 1:
186 | boxes = boxes[filter_inds[:, 0], 0]
187 | else:
188 | boxes = boxes[filter_mask]
189 |
190 | scores = scores[filter_mask]
191 | indices = indices[filter_mask]
192 | # Apply per-class NMS
193 | keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
194 | if topk_per_image >= 0:
195 | keep = keep[:topk_per_image]
196 | boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
197 | indices = indices[keep]
198 |
199 | result = Instances(image_shape)
200 | result.pred_boxes = Boxes(boxes)
201 | result.scores = scores
202 | result.pred_classes = filter_inds[:, 1]
203 | result.indices = indices
204 | return result, filter_inds[:, 0]
205 | ```
206 |
207 |
208 |
209 | c) 安装;如遇到问题,请参考[detectron2](https://github.com/facebookresearch/detectron2);不同操作系统安装有差异
210 |
211 | ```shell
212 | cd detectron2
213 | pip install -e .
214 | ```
215 |
216 |
217 |
218 | ### 测试
219 |
220 | a) 预训练模型下载
221 |
222 | ```shell
223 | wget https://dl.fbaipublicfiles.com/detectron2/PascalVOC-Detection/faster_rcnn_R_50_C4/142202221/model_final_b1acc2.pkl
224 | ```
225 |
226 |
227 |
228 | b) 测试Grad-CAM图像生成
229 |
230 | 在本工程目录下执行如下命令
231 |
232 | ```shell
233 | export KMP_DUPLICATE_LIB_OK=TRUE
234 | python detection/demo.py --config-file detection/faster_rcnn_R_50_C4.yaml \
235 | --input ./examples/pic1.jpg \
236 | --opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_b1acc2.pkl MODEL.DEVICE cpu
237 | ```
238 |
239 |
240 |
241 | ### Grad-CAM结果
242 |
243 | | 原始图像 | 检测边框 | Grad-CAM HeatMap | Grad-CAM++ HeatMap | 边框预测类别 |
244 | | ------------------------ | ----------------------------------------- | ------------------------------------- | --------------------------------------- | ------------ |
245 | |  |  |  |  | Dog |
246 | |  |  |  |  | Aeroplane |
247 | |  |  |  |  | Person |
248 | |  |  |  |  | Horse |
249 |
250 |
251 | ### 总结
252 |
253 | 对于目标检测Grad-CAM++的效果并没有比Grad-CAM效果好,推测目标检测中预测边框已经是单个对象了,Grad-CAM++在多个对象的情况下优于Grad-CAM
254 |
255 |
256 |
257 |
258 |
259 | ## 目标检测-retinanet
260 |
261 | 在目标检测网络faster r-cnn的Grad-CAM完成后,有两位网友[**abhigoku10**]() 、[**wangzyon**]()问道怎样在retinanet中实现Grad-CAM。retinanet与faster r-cnn网络结构不同,CAM的生成也有一些差异;以下是详细的过程:
262 |
263 | ### detectron2安装
264 |
265 | a) 下载
266 |
267 | ```shell
268 | git clone https://github.com/facebookresearch/detectron2.git
269 | ```
270 |
271 |
272 |
273 | b) 修改`detectron2/modeling/meta_arch/retinanet.py` 文件中的`inference_single_image`函数,主要是增加feature level 索引,记录分值高的预测边框是由第几层feature map生成的;修改后的`inference_single_image`函数如下:
274 |
275 | ```python
276 | def inference_single_image(self, box_cls, box_delta, anchors, image_size):
277 | """
278 | Single-image inference. Return bounding-box detection results by thresholding
279 | on scores and applying non-maximum suppression (NMS).
280 |
281 | Arguments:
282 | box_cls (list[Tensor]): list of #feature levels. Each entry contains
283 | tensor of size (H x W x A, K)
284 | box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
285 | anchors (list[Boxes]): list of #feature levels. Each entry contains
286 | a Boxes object, which contains all the anchors for that
287 | image in that feature level.
288 | image_size (tuple(H, W)): a tuple of the image height and width.
289 |
290 | Returns:
291 | Same as `inference`, but for only one image.
292 | """
293 | boxes_all = []
294 | scores_all = []
295 | class_idxs_all = []
296 | feature_level_all = []
297 |
298 | # Iterate over every feature level
299 | for i, (box_cls_i, box_reg_i, anchors_i) in enumerate(zip(box_cls, box_delta, anchors)):
300 | # (HxWxAxK,)
301 | box_cls_i = box_cls_i.flatten().sigmoid_()
302 |
303 | # Keep top k top scoring indices only.
304 | num_topk = min(self.topk_candidates, box_reg_i.size(0))
305 | # torch.sort is actually faster than .topk (at least on GPUs)
306 | predicted_prob, topk_idxs = box_cls_i.sort(descending=True)
307 | predicted_prob = predicted_prob[:num_topk]
308 | topk_idxs = topk_idxs[:num_topk]
309 |
310 | # filter out the proposals with low confidence score
311 | keep_idxs = predicted_prob > self.score_threshold
312 | predicted_prob = predicted_prob[keep_idxs]
313 | topk_idxs = topk_idxs[keep_idxs]
314 |
315 | anchor_idxs = topk_idxs // self.num_classes
316 | classes_idxs = topk_idxs % self.num_classes
317 |
318 | box_reg_i = box_reg_i[anchor_idxs]
319 | anchors_i = anchors_i[anchor_idxs]
320 | # predict boxes
321 | predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor)
322 |
323 | boxes_all.append(predicted_boxes)
324 | scores_all.append(predicted_prob)
325 | class_idxs_all.append(classes_idxs)
326 | feature_level_all.append(torch.ones_like(classes_idxs) * i)
327 |
328 | boxes_all, scores_all, class_idxs_all, feature_level_all = [
329 | cat(x) for x in [boxes_all, scores_all, class_idxs_all, feature_level_all]
330 | ]
331 | keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold)
332 | keep = keep[: self.max_detections_per_image]
333 |
334 | result = Instances(image_size)
335 | result.pred_boxes = Boxes(boxes_all[keep])
336 | result.scores = scores_all[keep]
337 | result.pred_classes = class_idxs_all[keep]
338 | result.feature_levels = feature_level_all[keep]
339 | return result
340 | ```
341 |
342 | c) 修改`detectron2/modeling/meta_arch/retinanet.py` 文件增加`predict`函数,具体如下:
343 |
344 | ```python
345 | def predict(self, batched_inputs):
346 | """
347 | Args:
348 | batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
349 | Each item in the list contains the inputs for one image.
350 | For now, each item in the list is a dict that contains:
351 |
352 | * image: Tensor, image in (C, H, W) format.
353 | * instances: Instances
354 |
355 | Other information that's included in the original dicts, such as:
356 |
357 | * "height", "width" (int): the output resolution of the model, used in inference.
358 | See :meth:`postprocess` for details.
359 | Returns:
360 | dict[str: Tensor]:
361 | mapping from a named loss to a tensor storing the loss. Used during training only.
362 | """
363 | images = self.preprocess_image(batched_inputs)
364 |
365 | features = self.backbone(images.tensor)
366 | features = [features[f] for f in self.in_features]
367 | box_cls, box_delta = self.head(features)
368 | anchors = self.anchor_generator(features)
369 |
370 | results = self.inference(box_cls, box_delta, anchors, images.image_sizes)
371 | processed_results = []
372 | for results_per_image, input_per_image, image_size in zip(
373 | results, batched_inputs, images.image_sizes
374 | ):
375 | height = input_per_image.get("height", image_size[0])
376 | width = input_per_image.get("width", image_size[1])
377 | r = detector_postprocess(results_per_image, height, width)
378 | processed_results.append({"instances": r})
379 | return processed_results
380 | ```
381 |
382 |
383 |
384 | d) 安装;如遇到问题,请参考[detectron2](https://github.com/facebookresearch/detectron2);不同操作系统安装有差异
385 |
386 | ```shell
387 | cd detectron2
388 | pip install -e .
389 | ```
390 |
391 |
392 |
393 | ### 测试
394 |
395 | a) 预训练模型下载
396 |
397 | ```shell
398 | wget https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_50_FPN_3x/137849486/model_final_4cafe0.pkl
399 | ```
400 |
401 |
402 |
403 | b) 测试Grad-CAM图像生成
404 |
405 | 在本工程目录下执行如下命令:
406 |
407 | ```shell
408 | export KMP_DUPLICATE_LIB_OK=TRUE
409 | python detection/demo_retinanet.py --config-file detection/retinanet_R_50_FPN_3x.yaml \
410 | --input ./examples/pic1.jpg \
411 | --layer-name head.cls_subnet.0 \
412 | --opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_4cafe0.pkl MODEL.DEVICE cpu
413 | ```
414 |
415 |
416 |
417 | ### Grad-CAM结果
418 |
419 | | | 图像1 | 图像2 | 图像3 | 图像4 |
420 | | ---------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
421 | | 原图 |  |  |  |  |
422 | | 预测边框 |  |  |  |  |
423 | | GradCAM-cls_subnet.0 |  |  |  |  |
424 | | GradCAM-cls_subnet.1 |  |  |  |  |
425 | | GradCAM-cls_subnet.2 |  |  |  |  |
426 | | GradCAM-cls_subnet.3 |  |  |  |  |
427 | | GradCAM-cls_subnet.4 |  |  |  |  |
428 | | GradCAM-cls_subnet.5 |  |  |  |  |
429 | | GradCAM-cls_subnet.6 |  |  |  |  |
430 | | GradCAM-cls_subnet.7 |  |  |  |  |
431 | | GradCAM++-cls_subnet.0 |  |  |  |  |
432 | | GradCAM++-cls_subnet.1 |  |  |  |  |
433 | | GradCAM++-cls_subnet.2 |  |  |  |  |
434 | | GradCAM++-cls_subnet.3 |  |  |  |  |
435 | | GradCAM++-cls_subnet.4 |  |  |  |  |
436 | | GradCAM++-cls_subnet.5 |  |  |  |  |
437 | | GradCAM++-cls_subnet.6 |  |  |  |  |
438 | | GradCAM++-cls_subnet.7 |  |  |  |  |
439 |
440 |
441 |
442 | 注:以上分别对head.cls_subnet.0~head.cls_subnet.7共8个层生成Grad-CAM图,这8层分别对应retinanet分类子网络的4层卷积feature map及ReLu激活后的feature map
443 |
444 |
445 |
446 | ### 总结
447 |
448 | a) retinanet的Grad-CAM图效果都不算好,相对来说中间层head.cls_subnet.2~head.cls_subnet.4相对好一点
449 |
450 | b) 个人认为retinanet效果不要的原因是,retinanet最后的分类是卷积层,卷积核实3\*3,也就是说反向传播到最后一个卷积层的feature map上,只有3\*3个单元有梯度。而分类网络或者faster r-cnn分类都是全连接层,感受全局信息,最后一个卷积层的feature map上所有单元都有梯度。
451 |
452 | c) 反向传播到浅层的feature map上,有梯度的单元会逐渐增加,但是就像Grad-CAM论文中说的,越浅层的feature map语义信息越弱,所以可以看到head.cls_subnet.0的CAM图效果很差。
--------------------------------------------------------------------------------
/detection/Base-RCNN-C4.yaml:
--------------------------------------------------------------------------------
1 | MODEL:
2 | META_ARCHITECTURE: "GeneralizedRCNN"
3 | RPN:
4 | PRE_NMS_TOPK_TEST: 6000
5 | POST_NMS_TOPK_TEST: 1000
6 | ROI_HEADS:
7 | NAME: "Res5ROIHeads"
8 | DATASETS:
9 | TRAIN: ("coco_2017_train",)
10 | TEST: ("coco_2017_val",)
11 | SOLVER:
12 | IMS_PER_BATCH: 16
13 | BASE_LR: 0.02
14 | STEPS: (60000, 80000)
15 | MAX_ITER: 90000
16 | INPUT:
17 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
18 | VERSION: 2
19 |
--------------------------------------------------------------------------------
/detection/Base-RetinaNet.yaml:
--------------------------------------------------------------------------------
1 | MODEL:
2 | META_ARCHITECTURE: "RetinaNet"
3 | BACKBONE:
4 | NAME: "build_retinanet_resnet_fpn_backbone"
5 | RESNETS:
6 | OUT_FEATURES: ["res3", "res4", "res5"]
7 | ANCHOR_GENERATOR:
8 | SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"]
9 | FPN:
10 | IN_FEATURES: ["res3", "res4", "res5"]
11 | RETINANET:
12 | IOU_THRESHOLDS: [0.4, 0.5]
13 | IOU_LABELS: [0, -1, 1]
14 | DATASETS:
15 | TRAIN: ("coco_2017_train",)
16 | TEST: ("coco_2017_val",)
17 | SOLVER:
18 | IMS_PER_BATCH: 16
19 | BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate
20 | STEPS: (60000, 80000)
21 | MAX_ITER: 90000
22 | INPUT:
23 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
24 | VERSION: 2
25 |
--------------------------------------------------------------------------------
/detection/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @File : __init__.py.py
4 | @Time : 2020/2/24 下午10:08
5 | @Author : yizuotian
6 | @Description :
7 | """
--------------------------------------------------------------------------------
/detection/demo.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | import argparse
3 | import multiprocessing as mp
4 | import os
5 |
6 | import cv2
7 | import detectron2.data.transforms as T
8 | import numpy as np
9 | import torch
10 | from detectron2.checkpoint import DetectionCheckpointer
11 | from detectron2.config import get_cfg
12 | from detectron2.data import MetadataCatalog
13 | from detectron2.data.detection_utils import read_image
14 | from detectron2.modeling import build_model
15 | from detectron2.utils.logger import setup_logger
16 | from grad_cam import GradCAM, GradCamPlusPlus
17 | from skimage import io
18 | from torch import nn
19 |
20 | # constants
21 | WINDOW_NAME = "COCO detections"
22 |
23 |
24 | def setup_cfg(args):
25 | # load config from file and command-line arguments
26 | cfg = get_cfg()
27 | cfg.merge_from_file(args.config_file)
28 | cfg.merge_from_list(args.opts)
29 | # Set score_threshold for builtin models
30 | cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
31 | cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
32 | cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
33 | cfg.freeze()
34 | return cfg
35 |
36 |
37 | def get_last_conv_name(net):
38 | """
39 | 获取网络的最后一个卷积层的名字
40 | :param net:
41 | :return:
42 | """
43 | layer_name = None
44 | for name, m in net.named_modules():
45 | if isinstance(m, nn.Conv2d):
46 | layer_name = name
47 | return layer_name
48 |
49 |
50 | class GuidedBackPropagation(object):
51 |
52 | def __init__(self, net):
53 | self.net = net
54 | for (name, module) in self.net.named_modules():
55 | if isinstance(module, nn.ReLU):
56 | module.register_backward_hook(self.backward_hook)
57 | self.net.eval()
58 |
59 | @classmethod
60 | def backward_hook(cls, module, grad_in, grad_out):
61 | """
62 |
63 | :param module:
64 | :param grad_in: tuple,长度为1
65 | :param grad_out: tuple,长度为1
66 | :return: tuple(new_grad_in,)
67 | """
68 | return torch.clamp(grad_in[0], min=0.0),
69 |
70 | def __call__(self, inputs, index=0):
71 | """
72 |
73 | :param inputs: {"image": [C,H,W], "height": height, "width": width}
74 | :param index: 第几个边框
75 | :return:
76 | """
77 | self.net.zero_grad()
78 | output = self.net.inference([inputs])
79 | score = output[0]['instances'].scores[index]
80 | score.backward()
81 |
82 | return inputs['image'].grad # [3,H,W]
83 |
84 |
85 | def norm_image(image):
86 | """
87 | 标准化图像
88 | :param image: [H,W,C]
89 | :return:
90 | """
91 | image = image.copy()
92 | image -= np.max(np.min(image), 0)
93 | image /= np.max(image)
94 | image *= 255.
95 | return np.uint8(image)
96 |
97 |
98 | def gen_cam(image, mask):
99 | """
100 | 生成CAM图
101 | :param image: [H,W,C],原始图像
102 | :param mask: [H,W],范围0~1
103 | :return: tuple(cam,heatmap)
104 | """
105 | # mask转为heatmap
106 | heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
107 | heatmap = np.float32(heatmap) / 255
108 | heatmap = heatmap[..., ::-1] # gbr to rgb
109 |
110 | # 合并heatmap到原始图像
111 | cam = heatmap + np.float32(image)
112 | return norm_image(cam), heatmap
113 |
114 |
115 | def gen_gb(grad):
116 | """
117 | 生guided back propagation 输入图像的梯度
118 | :param grad: tensor,[3,H,W]
119 | :return:
120 | """
121 | # 标准化
122 | grad = grad.data.numpy()
123 | gb = np.transpose(grad, (1, 2, 0))
124 | return gb
125 |
126 |
127 | def save_image(image_dicts, input_image_name, network='frcnn', output_dir='./results'):
128 | prefix = os.path.splitext(input_image_name)[0]
129 | for key, image in image_dicts.items():
130 | io.imsave(os.path.join(output_dir, '{}-{}-{}.jpg'.format(prefix, network, key)), image)
131 |
132 |
133 | def get_parser():
134 | parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
135 | parser.add_argument(
136 | "--config-file",
137 | default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
138 | metavar="FILE",
139 | help="path to config file",
140 | )
141 | parser.add_argument("--input", help="A list of space separated input images")
142 | parser.add_argument(
143 | "--output",
144 | help="A file or directory to save output visualizations. "
145 | "If not given, will show output in an OpenCV window.",
146 | )
147 |
148 | parser.add_argument(
149 | "--confidence-threshold",
150 | type=float,
151 | default=0.5,
152 | help="Minimum score for instance predictions to be shown",
153 | )
154 | parser.add_argument(
155 | "--opts",
156 | help="Modify config options using the command-line 'KEY VALUE' pairs",
157 | default=[],
158 | nargs=argparse.REMAINDER,
159 | )
160 | return parser
161 |
162 |
163 | def main(args):
164 | setup_logger(name="fvcore")
165 | logger = setup_logger()
166 | logger.info("Arguments: " + str(args))
167 |
168 | cfg = setup_cfg(args)
169 | print(cfg)
170 | # 构建模型
171 | model = build_model(cfg)
172 | # 加载权重
173 | checkpointer = DetectionCheckpointer(model)
174 | checkpointer.load(cfg.MODEL.WEIGHTS)
175 |
176 | # 加载图像
177 | path = os.path.expanduser(args.input)
178 | original_image = read_image(path, format="BGR")
179 | height, width = original_image.shape[:2]
180 | transform_gen = T.ResizeShortestEdge(
181 | [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
182 | )
183 | image = transform_gen.get_transform(original_image).apply_image(original_image)
184 | image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
185 |
186 | inputs = {"image": image, "height": height, "width": width}
187 |
188 | # Grad-CAM
189 | layer_name = get_last_conv_name(model)
190 | grad_cam = GradCAM(model, layer_name)
191 | mask, box, class_id = grad_cam(inputs) # cam mask
192 | grad_cam.remove_handlers()
193 |
194 | #
195 | image_dict = {}
196 | img = original_image[..., ::-1]
197 | x1, y1, x2, y2 = box
198 | image_dict['predict_box'] = img[y1:y2, x1:x2]
199 | image_cam, image_dict['heatmap'] = gen_cam(img[y1:y2, x1:x2], mask)
200 |
201 | # Grad-CAM++
202 | grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
203 | mask_plus_plus = grad_cam_plus_plus(inputs) # cam mask
204 | _, image_dict['heatmap++'] = gen_cam(img[y1:y2, x1:x2], mask_plus_plus)
205 | grad_cam_plus_plus.remove_handlers()
206 |
207 | # 获取类别名称
208 | meta = MetadataCatalog.get(
209 | cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
210 | )
211 | label = meta.thing_classes[class_id]
212 |
213 | print("label:{}".format(label))
214 | # # GuidedBackPropagation
215 | # gbp = GuidedBackPropagation(model)
216 | # inputs['image'].grad.zero_() # 梯度置零
217 | # grad = gbp(inputs)
218 | # print("grad.shape:{}".format(grad.shape))
219 | # gb = gen_gb(grad)
220 | # gb = gb[y1:y2, x1:x2]
221 | # image_dict['gb'] = gb
222 | # # 生成Guided Grad-CAM
223 | # cam_gb = gb * mask[..., np.newaxis]
224 | # image_dict['cam_gb'] = norm_image(cam_gb)
225 |
226 | save_image(image_dict, os.path.basename(path))
227 |
228 |
229 | if __name__ == "__main__":
230 | """
231 | Usage:export KMP_DUPLICATE_LIB_OK=TRUE
232 | python detection/demo.py --config-file detection/faster_rcnn_R_50_C4.yaml \
233 | --input ./examples/pic1.jpg \
234 | --opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_b1acc2.pkl MODEL.DEVICE cpu
235 | """
236 | mp.set_start_method("spawn", force=True)
237 | arguments = get_parser().parse_args()
238 | main(arguments)
239 |
--------------------------------------------------------------------------------
/detection/demo_retinanet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @File : demo_retinanet.py
4 | @Time : 2020/5/16 下午9:59
5 | @Author : yizuotian
6 | @Description :
7 | """
8 |
9 | import argparse
10 | import multiprocessing as mp
11 | import os
12 |
13 | import cv2
14 | import detectron2.data.transforms as T
15 | import numpy as np
16 | import torch
17 | from detectron2.checkpoint import DetectionCheckpointer
18 | from detectron2.config import get_cfg
19 | from detectron2.data import MetadataCatalog
20 | from detectron2.data.detection_utils import read_image
21 | from detectron2.modeling import build_model
22 | from detectron2.utils.logger import setup_logger
23 | from skimage import io
24 |
25 | from grad_cam_retinanet import GradCAM, GradCamPlusPlus
26 |
27 | # constants
28 | WINDOW_NAME = "COCO detections"
29 |
30 |
31 | def setup_cfg(args):
32 | # load config from file and command-line arguments
33 | cfg = get_cfg()
34 | cfg.merge_from_file(args.config_file)
35 | cfg.merge_from_list(args.opts)
36 | # Set score_threshold for builtin models
37 | cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
38 | cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
39 | cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
40 | cfg.freeze()
41 | return cfg
42 |
43 |
44 | def norm_image(image):
45 | """
46 | 标准化图像
47 | :param image: [H,W,C]
48 | :return:
49 | """
50 | image = image.copy()
51 | image -= np.max(np.min(image), 0)
52 | image /= np.max(image)
53 | image *= 255.
54 | return np.uint8(image)
55 |
56 |
57 | def gen_cam(image, mask):
58 | """
59 | 生成CAM图
60 | :param image: [H,W,C],原始图像
61 | :param mask: [H,W],范围0~1
62 | :return: tuple(cam,heatmap)
63 | """
64 | # mask转为heatmap
65 | heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
66 | heatmap = np.float32(heatmap) / 255
67 | heatmap = heatmap[..., ::-1] # gbr to rgb
68 |
69 | # 合并heatmap到原始图像
70 | cam = heatmap + np.float32(image)
71 | return norm_image(cam), heatmap
72 |
73 |
74 | def save_image(image_dicts, input_image_name, layer_name, network='retinanet', output_dir='./results'):
75 | prefix = os.path.splitext(input_image_name)[0]
76 | for key, image in image_dicts.items():
77 | if key == 'predict_box':
78 | io.imsave(os.path.join(output_dir,
79 | '{}-{}-{}.jpg'.format(prefix, network, key)),
80 | image)
81 | else:
82 | io.imsave(os.path.join(output_dir,
83 | '{}-{}-{}-{}.jpg'.format(prefix, network, layer_name, key)),
84 | image)
85 |
86 |
87 | def get_parser():
88 | parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
89 | parser.add_argument(
90 | "--config-file",
91 | default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
92 | metavar="FILE",
93 | help="path to config file",
94 | )
95 | parser.add_argument("--input", help="A list of space separated input images")
96 | parser.add_argument(
97 | "--output",
98 | help="A file or directory to save output visualizations. "
99 | "If not given, will show output in an OpenCV window.",
100 | )
101 |
102 | parser.add_argument(
103 | "--confidence-threshold",
104 | type=float,
105 | default=0.5,
106 | help="Minimum score for instance predictions to be shown",
107 | )
108 | parser.add_argument(
109 | "--opts",
110 | help="Modify config options using the command-line 'KEY VALUE' pairs",
111 | default=[],
112 | nargs=argparse.REMAINDER,
113 | )
114 | parser.add_argument('--layer-name', type=str, default='head.cls_subnet.2',
115 | help='使用哪层特征去生成CAM')
116 | return parser
117 |
118 |
119 | def main(args):
120 | setup_logger(name="fvcore")
121 | logger = setup_logger()
122 | logger.info("Arguments: " + str(args))
123 |
124 | cfg = setup_cfg(args)
125 | print(cfg)
126 | # 构建模型
127 | model = build_model(cfg)
128 | # 加载权重
129 | checkpointer = DetectionCheckpointer(model)
130 | checkpointer.load(cfg.MODEL.WEIGHTS)
131 |
132 | # 加载图像
133 | path = os.path.expanduser(args.input)
134 | original_image = read_image(path, format="BGR")
135 | height, width = original_image.shape[:2]
136 | transform_gen = T.ResizeShortestEdge(
137 | [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
138 | )
139 | image = transform_gen.get_transform(original_image).apply_image(original_image)
140 | image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
141 |
142 | inputs = {"image": image, "height": height, "width": width}
143 |
144 | # Grad-CAM
145 | layer_name = args.layer_name
146 | grad_cam = GradCAM(model, layer_name)
147 | mask, box, class_id = grad_cam(inputs) # cam mask
148 | grad_cam.remove_handlers()
149 |
150 | #
151 | image_dict = {}
152 | img = original_image[..., ::-1]
153 | x1, y1, x2, y2 = box
154 | image_dict['predict_box'] = img[y1:y2, x1:x2]
155 | image_cam, image_dict['heatmap'] = gen_cam(img[y1:y2, x1:x2], mask[y1:y2, x1:x2])
156 |
157 | # Grad-CAM++
158 | grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
159 | mask_plus_plus = grad_cam_plus_plus(inputs) # cam mask
160 |
161 | _, image_dict['heatmap++'] = gen_cam(img[y1:y2, x1:x2], mask_plus_plus[y1:y2, x1:x2])
162 | grad_cam_plus_plus.remove_handlers()
163 |
164 | # 获取类别名称
165 | meta = MetadataCatalog.get(
166 | cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
167 | )
168 | label = meta.thing_classes[class_id]
169 |
170 | print("label:{}".format(label))
171 |
172 | save_image(image_dict, os.path.basename(path), args.layer_name)
173 |
174 |
175 | if __name__ == "__main__":
176 | """
177 | Usage:export KMP_DUPLICATE_LIB_OK=TRUE
178 | python detection/demo_retinanet.py --config-file detection/retinanet_R_50_FPN_3x.yaml \
179 | --input ./examples/pic1.jpg \
180 | --layer-name head.cls_subnet.7 \
181 | --opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_4cafe0.pkl MODEL.DEVICE cpu
182 | """
183 | mp.set_start_method("spawn", force=True)
184 | arguments = get_parser().parse_args()
185 | main(arguments)
186 |
--------------------------------------------------------------------------------
/detection/faster_rcnn_R_50_C4.yaml:
--------------------------------------------------------------------------------
1 | _BASE_: "./Base-RCNN-C4.yaml"
2 | MODEL:
3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
4 | MASK_ON: False
5 | RESNETS:
6 | DEPTH: 50
7 | ROI_HEADS:
8 | NUM_CLASSES: 20
9 | INPUT:
10 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
11 | MIN_SIZE_TEST: 800
12 | DATASETS:
13 | TRAIN: ('voc_2007_trainval', 'voc_2012_trainval')
14 | TEST: ('voc_2007_test',)
15 | SOLVER:
16 | STEPS: (12000, 16000)
17 | MAX_ITER: 18000 # 17.4 epochs
18 | WARMUP_ITERS: 100
19 |
--------------------------------------------------------------------------------
/detection/grad_cam.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @File : grad_cam.py
4 | @Time : 2020/3/14 下午4:06
5 | @Author : yizuotian
6 | @Description :
7 | """
8 | import cv2
9 | import numpy as np
10 |
11 |
12 | class GradCAM(object):
13 | """
14 | 1: 网络不更新梯度,输入需要梯度更新
15 | 2: 使用目标类别的得分做反向传播
16 | """
17 |
18 | def __init__(self, net, layer_name):
19 | self.net = net
20 | self.layer_name = layer_name
21 | self.feature = None
22 | self.gradient = None
23 | self.net.eval()
24 | self.handlers = []
25 | self._register_hook()
26 |
27 | def _get_features_hook(self, module, input, output):
28 | self.feature = output
29 | print("feature shape:{}".format(output.size()))
30 |
31 | def _get_grads_hook(self, module, input_grad, output_grad):
32 | """
33 |
34 | :param input_grad: tuple, input_grad[0]: None
35 | input_grad[1]: weight
36 | input_grad[2]: bias
37 | :param output_grad:tuple,长度为1
38 | :return:
39 | """
40 | self.gradient = output_grad[0]
41 |
42 | def _register_hook(self):
43 | for (name, module) in self.net.named_modules():
44 | if name == self.layer_name:
45 | self.handlers.append(module.register_forward_hook(self._get_features_hook))
46 | self.handlers.append(module.register_backward_hook(self._get_grads_hook))
47 |
48 | def remove_handlers(self):
49 | for handle in self.handlers:
50 | handle.remove()
51 |
52 | def __call__(self, inputs, index=0):
53 | """
54 |
55 | :param inputs: {"image": [C,H,W], "height": height, "width": width}
56 | :param index: 第几个边框
57 | :return:
58 | """
59 | self.net.zero_grad()
60 | output = self.net.inference([inputs])
61 | print(output)
62 | score = output[0]['instances'].scores[index]
63 | proposal_idx = output[0]['instances'].indices[index] # box来自第几个proposal
64 | score.backward()
65 |
66 | gradient = self.gradient[proposal_idx].cpu().data.numpy() # [C,H,W]
67 | weight = np.mean(gradient, axis=(1, 2)) # [C]
68 |
69 | feature = self.feature[proposal_idx].cpu().data.numpy() # [C,H,W]
70 |
71 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
72 | cam = np.sum(cam, axis=0) # [H,W]
73 | cam = np.maximum(cam, 0) # ReLU
74 |
75 | # 数值归一化
76 | cam -= np.min(cam)
77 | cam /= np.max(cam)
78 | # resize to 224*224
79 | box = output[0]['instances'].pred_boxes.tensor[index].detach().numpy().astype(np.int32)
80 | x1, y1, x2, y2 = box
81 | cam = cv2.resize(cam, (x2 - x1, y2 - y1))
82 |
83 | class_id = output[0]['instances'].pred_classes[index].detach().numpy()
84 | return cam, box, class_id
85 |
86 |
87 | class GradCamPlusPlus(GradCAM):
88 | def __init__(self, net, layer_name):
89 | super(GradCamPlusPlus, self).__init__(net, layer_name)
90 |
91 | def __call__(self, inputs, index=0):
92 | """
93 |
94 | :param inputs: {"image": [C,H,W], "height": height, "width": width}
95 | :param index: 第几个边框
96 | :return:
97 | """
98 | self.net.zero_grad()
99 | output = self.net.inference([inputs])
100 | print(output)
101 | score = output[0]['instances'].scores[index]
102 | proposal_idx = output[0]['instances'].indices[index] # box来自第几个proposal
103 | score.backward()
104 |
105 | gradient = self.gradient[proposal_idx].cpu().data.numpy() # [C,H,W]
106 | gradient = np.maximum(gradient, 0.) # ReLU
107 | indicate = np.where(gradient > 0, 1., 0.) # 示性函数
108 | norm_factor = np.sum(gradient, axis=(1, 2)) # [C]归一化
109 | for i in range(len(norm_factor)):
110 | norm_factor[i] = 1. / norm_factor[i] if norm_factor[i] > 0. else 0. # 避免除零
111 | alpha = indicate * norm_factor[:, np.newaxis, np.newaxis] # [C,H,W]
112 |
113 | weight = np.sum(gradient * alpha, axis=(1, 2)) # [C] alpha*ReLU(gradient)
114 |
115 | feature = self.feature[proposal_idx].cpu().data.numpy() # [C,H,W]
116 |
117 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
118 | cam = np.sum(cam, axis=0) # [H,W]
119 | # cam = np.maximum(cam, 0) # ReLU
120 |
121 | # 数值归一化
122 | cam -= np.min(cam)
123 | cam /= np.max(cam)
124 | # resize to box scale
125 | box = output[0]['instances'].pred_boxes.tensor[index].detach().numpy().astype(np.int32)
126 | x1, y1, x2, y2 = box
127 | cam = cv2.resize(cam, (x2 - x1, y2 - y1))
128 |
129 | return cam
130 |
--------------------------------------------------------------------------------
/detection/grad_cam_retinanet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @File : grad_cam_retinanet.py
4 | @Time : 2020/5/16 下午9:41
5 | @Author : yizuotian
6 | @Description : retinanet GradCAM
7 | """
8 | import cv2
9 | import numpy as np
10 |
11 |
12 | class GradCAM(object):
13 | """
14 | 1: 网络不更新梯度,输入需要梯度更新
15 | 2: 使用目标类别的得分做反向传播
16 | """
17 |
18 | def __init__(self, net, layer_name):
19 | self.net = net
20 | self.layer_name = layer_name
21 | self.feature = []
22 | self.gradient = []
23 | self.net.eval()
24 | self.handlers = []
25 | self._register_hook()
26 |
27 | def _get_features_hook(self, module, input, output):
28 | self.feature.append(output) # 不同层级特征
29 | print("feature shape:{}".format(output.size()))
30 |
31 | def _get_grads_hook(self, module, input_grad, output_grad):
32 | """
33 |
34 | :param input_grad: tuple, input_grad[0]: None
35 | input_grad[1]: weight
36 | input_grad[2]: bias
37 | :param output_grad:tuple,长度为1
38 | :return:
39 | """
40 | self.gradient.insert(0, output_grad[0]) # 梯度的顺序反的
41 | print("gradient shape:{}".format(output_grad[0].size()))
42 |
43 | def _register_hook(self):
44 | for (name, module) in self.net.named_modules():
45 | if name == self.layer_name:
46 | self.handlers.append(module.register_forward_hook(self._get_features_hook))
47 | self.handlers.append(module.register_backward_hook(self._get_grads_hook))
48 |
49 | def remove_handlers(self):
50 | for handle in self.handlers:
51 | handle.remove()
52 |
53 | def __call__(self, inputs, index=0):
54 | """
55 |
56 | :param inputs: {"image": [C,H,W], "height": height, "width": width}
57 | :param index: 第几个边框
58 | :return:
59 | """
60 | self.net.zero_grad()
61 | output = self.net.predict([inputs])
62 | print(output)
63 | score = output[0]['instances'].scores[index]
64 | feature_level = output[0]['instances'].feature_levels[index] # box来自第几层feature map
65 | score.backward()
66 |
67 | gradient = self.gradient[feature_level][0].cpu().data.numpy() # [C,H,W]
68 | weight = np.mean(gradient, axis=(1, 2)) # [C]
69 |
70 | # feature_level 指feature map的层级,0去除batch维
71 | feature = self.feature[feature_level][0].cpu().data.numpy() # [C,H,W]
72 |
73 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
74 | cam = np.sum(cam, axis=0) # [H,W]
75 | cam = np.maximum(cam, 0) # ReLU
76 |
77 | # 数值归一化
78 | cam -= np.min(cam)
79 | cam /= np.max(cam)
80 | # 缩放到输入图像尺寸
81 | h, w = inputs['height'], inputs['width']
82 | cam = cv2.resize(cam, (w, h))
83 |
84 | box = output[0]['instances'].pred_boxes.tensor[index].detach().numpy().astype(np.int32)
85 | class_id = output[0]['instances'].pred_classes[index].detach().numpy()
86 | return cam, box, class_id
87 |
88 |
89 | class GradCamPlusPlus(GradCAM):
90 | def __init__(self, net, layer_name):
91 | super(GradCamPlusPlus, self).__init__(net, layer_name)
92 |
93 | def __call__(self, inputs, index=0):
94 | """
95 |
96 | :param inputs: {"image": [C,H,W], "height": height, "width": width}
97 | :param index: 第几个边框
98 | :return:
99 | """
100 | self.net.zero_grad()
101 | output = self.net.predict([inputs])
102 | print(output)
103 | score = output[0]['instances'].scores[index]
104 | feature_level = output[0]['instances'].feature_levels[index] # box来自第几层feature map
105 | score.backward()
106 |
107 | gradient = self.gradient[feature_level][0].cpu().data.numpy() # [C,H,W]
108 | gradient = np.maximum(gradient, 0.) # ReLU
109 | indicate = np.where(gradient > 0, 1., 0.) # 示性函数
110 | norm_factor = np.sum(gradient, axis=(1, 2)) # [C]归一化
111 | for i in range(len(norm_factor)):
112 | norm_factor[i] = 1. / norm_factor[i] if norm_factor[i] > 0. else 0. # 避免除零
113 | alpha = indicate * norm_factor[:, np.newaxis, np.newaxis] # [C,H,W]
114 |
115 | weight = np.sum(gradient * alpha, axis=(1, 2)) # [C] alpha*ReLU(gradient)
116 |
117 | feature = self.feature[feature_level][0].cpu().data.numpy() # [C,H,W]
118 |
119 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
120 | cam = np.sum(cam, axis=0) # [H,W]
121 | # cam = np.maximum(cam, 0) # ReLU
122 |
123 | # 数值归一化
124 | cam -= np.min(cam)
125 | cam /= np.max(cam)
126 | # 缩放到输入图像尺寸
127 | h, w = inputs['height'], inputs['width']
128 | cam = cv2.resize(cam, (w, h))
129 |
130 | return cam
131 |
--------------------------------------------------------------------------------
/detection/retinanet_R_50_FPN_3x.yaml:
--------------------------------------------------------------------------------
1 | _BASE_: "./Base-RetinaNet.yaml"
2 | MODEL:
3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
4 | RESNETS:
5 | DEPTH: 50
6 | SOLVER:
7 | STEPS: (210000, 250000)
8 | MAX_ITER: 270000
9 |
--------------------------------------------------------------------------------
/examples/Grad-CAM++.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/Grad-CAM++.png
--------------------------------------------------------------------------------
/examples/grad-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/grad-cam.jpg
--------------------------------------------------------------------------------
/examples/multiple_dogs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/multiple_dogs.jpg
--------------------------------------------------------------------------------
/examples/pic1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/pic1.jpg
--------------------------------------------------------------------------------
/examples/pic2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/pic2.jpg
--------------------------------------------------------------------------------
/examples/pic3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/pic3.jpg
--------------------------------------------------------------------------------
/examples/pic4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/examples/pic4.jpg
--------------------------------------------------------------------------------
/interpretability/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on 2019/8/4 上午9:37
4 |
5 | @author: mick.yi
6 |
7 | """
--------------------------------------------------------------------------------
/interpretability/grad_cam.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on 2019/8/4 上午9:37
4 |
5 | @author: mick.yi
6 |
7 | """
8 | import numpy as np
9 | import cv2
10 |
11 |
12 | class GradCAM(object):
13 | """
14 | 1: 网络不更新梯度,输入需要梯度更新
15 | 2: 使用目标类别的得分做反向传播
16 | """
17 |
18 | def __init__(self, net, layer_name):
19 | self.net = net
20 | self.layer_name = layer_name
21 | self.feature = None
22 | self.gradient = None
23 | self.net.eval()
24 | self.handlers = []
25 | self._register_hook()
26 |
27 | def _get_features_hook(self, module, input, output):
28 | self.feature = output
29 | print("feature shape:{}".format(output.size()))
30 |
31 | def _get_grads_hook(self, module, input_grad, output_grad):
32 | """
33 |
34 | :param input_grad: tuple, input_grad[0]: None
35 | input_grad[1]: weight
36 | input_grad[2]: bias
37 | :param output_grad:tuple,长度为1
38 | :return:
39 | """
40 | self.gradient = output_grad[0]
41 |
42 | def _register_hook(self):
43 | for (name, module) in self.net.named_modules():
44 | if name == self.layer_name:
45 | self.handlers.append(module.register_forward_hook(self._get_features_hook))
46 | self.handlers.append(module.register_backward_hook(self._get_grads_hook))
47 |
48 | def remove_handlers(self):
49 | for handle in self.handlers:
50 | handle.remove()
51 |
52 | def __call__(self, inputs, index):
53 | """
54 |
55 | :param inputs: [1,3,H,W]
56 | :param index: class id
57 | :return:
58 | """
59 | self.net.zero_grad()
60 | output = self.net(inputs) # [1,num_classes]
61 | if index is None:
62 | index = np.argmax(output.cpu().data.numpy())
63 | target = output[0][index]
64 | target.backward()
65 |
66 | gradient = self.gradient[0].cpu().data.numpy() # [C,H,W]
67 | weight = np.mean(gradient, axis=(1, 2)) # [C]
68 |
69 | feature = self.feature[0].cpu().data.numpy() # [C,H,W]
70 |
71 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
72 | cam = np.sum(cam, axis=0) # [H,W]
73 | cam = np.maximum(cam, 0) # ReLU
74 |
75 | # 数值归一化
76 | cam -= np.min(cam)
77 | cam /= np.max(cam)
78 | # resize to 224*224
79 | cam = cv2.resize(cam, (224, 224))
80 | return cam
81 |
82 |
83 | class GradCamPlusPlus(GradCAM):
84 | def __init__(self, net, layer_name):
85 | super(GradCamPlusPlus, self).__init__(net, layer_name)
86 |
87 | def __call__(self, inputs, index):
88 | """
89 |
90 | :param inputs: [1,3,H,W]
91 | :param index: class id
92 | :return:
93 | """
94 | self.net.zero_grad()
95 | output = self.net(inputs) # [1,num_classes]
96 | if index is None:
97 | index = np.argmax(output.cpu().data.numpy())
98 | target = output[0][index]
99 | target.backward()
100 |
101 | gradient = self.gradient[0].cpu().data.numpy() # [C,H,W]
102 | gradient = np.maximum(gradient, 0.) # ReLU
103 | indicate = np.where(gradient > 0, 1., 0.) # 示性函数
104 | norm_factor = np.sum(gradient, axis=(1, 2)) # [C]归一化
105 | for i in range(len(norm_factor)):
106 | norm_factor[i] = 1. / norm_factor[i] if norm_factor[i] > 0. else 0. # 避免除零
107 | alpha = indicate * norm_factor[:, np.newaxis, np.newaxis] # [C,H,W]
108 |
109 | weight = np.sum(gradient * alpha, axis=(1, 2)) # [C] alpha*ReLU(gradient)
110 |
111 | feature = self.feature[0].cpu().data.numpy() # [C,H,W]
112 |
113 | cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]
114 | cam = np.sum(cam, axis=0) # [H,W]
115 | # cam = np.maximum(cam, 0) # ReLU
116 |
117 | # 数值归一化
118 | cam -= np.min(cam)
119 | cam /= np.max(cam)
120 | # resize to 224*224
121 | cam = cv2.resize(cam, (224, 224))
122 | return cam
123 |
--------------------------------------------------------------------------------
/interpretability/guided_back_propagation.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on 2019/8/4 上午9:45
4 |
5 | @author: mick.yi
6 |
7 | """
8 | import torch
9 | from torch import nn
10 | import numpy as np
11 |
12 |
13 | class GuidedBackPropagation(object):
14 |
15 | def __init__(self, net):
16 | self.net = net
17 | for (name, module) in self.net.named_modules():
18 | if isinstance(module, nn.ReLU):
19 | module.register_backward_hook(self.backward_hook)
20 | self.net.eval()
21 |
22 | @classmethod
23 | def backward_hook(cls, module, grad_in, grad_out):
24 | """
25 |
26 | :param module:
27 | :param grad_in: tuple,长度为1
28 | :param grad_out: tuple,长度为1
29 | :return: tuple(new_grad_in,)
30 | """
31 | return torch.clamp(grad_in[0], min=0.0),
32 |
33 | def __call__(self, inputs, index=None):
34 | """
35 |
36 | :param inputs: [1,3,H,W]
37 | :param index: class_id
38 | :return:
39 | """
40 | self.net.zero_grad()
41 | output = self.net(inputs) # [1,num_classes]
42 | if index is None:
43 | index = np.argmax(output.cpu().data.numpy())
44 | target = output[0][index]
45 |
46 | target.backward()
47 |
48 | return inputs.grad[0] # [3,H,W]
49 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on 2019/8/4 上午9:53
4 |
5 | @author: mick.yi
6 |
7 | 入口类
8 |
9 | """
10 | import argparse
11 | import os
12 | import re
13 |
14 | import cv2
15 | import numpy as np
16 | import torch
17 | from skimage import io
18 | from torch import nn
19 | from torchvision import models
20 |
21 | from interpretability.grad_cam import GradCAM, GradCamPlusPlus
22 | from interpretability.guided_back_propagation import GuidedBackPropagation
23 |
24 |
25 | def get_net(net_name, weight_path=None):
26 | """
27 | 根据网络名称获取模型
28 | :param net_name: 网络名称
29 | :param weight_path: 与训练权重路径
30 | :return:
31 | """
32 | pretrain = weight_path is None # 没有指定权重路径,则加载默认的预训练权重
33 | if net_name in ['vgg', 'vgg16']:
34 | net = models.vgg16(pretrained=pretrain)
35 | elif net_name == 'vgg19':
36 | net = models.vgg19(pretrained=pretrain)
37 | elif net_name in ['resnet', 'resnet50']:
38 | net = models.resnet50(pretrained=pretrain)
39 | elif net_name == 'resnet101':
40 | net = models.resnet101(pretrained=pretrain)
41 | elif net_name in ['densenet', 'densenet121']:
42 | net = models.densenet121(pretrained=pretrain)
43 | elif net_name in ['inception']:
44 | net = models.inception_v3(pretrained=pretrain)
45 | elif net_name in ['mobilenet_v2']:
46 | net = models.mobilenet_v2(pretrained=pretrain)
47 | elif net_name in ['shufflenet_v2']:
48 | net = models.shufflenet_v2_x1_0(pretrained=pretrain)
49 | else:
50 | raise ValueError('invalid network name:{}'.format(net_name))
51 | # 加载指定路径的权重参数
52 | if weight_path is not None and net_name.startswith('densenet'):
53 | pattern = re.compile(
54 | r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
55 | state_dict = torch.load(weight_path)
56 | for key in list(state_dict.keys()):
57 | res = pattern.match(key)
58 | if res:
59 | new_key = res.group(1) + res.group(2)
60 | state_dict[new_key] = state_dict[key]
61 | del state_dict[key]
62 | net.load_state_dict(state_dict)
63 | elif weight_path is not None:
64 | net.load_state_dict(torch.load(weight_path))
65 | return net
66 |
67 |
68 | def get_last_conv_name(net):
69 | """
70 | 获取网络的最后一个卷积层的名字
71 | :param net:
72 | :return:
73 | """
74 | layer_name = None
75 | for name, m in net.named_modules():
76 | if isinstance(m, nn.Conv2d):
77 | layer_name = name
78 | return layer_name
79 |
80 |
81 | def prepare_input(image):
82 | image = image.copy()
83 |
84 | # 归一化
85 | means = np.array([0.485, 0.456, 0.406])
86 | stds = np.array([0.229, 0.224, 0.225])
87 | image -= means
88 | image /= stds
89 |
90 | image = np.ascontiguousarray(np.transpose(image, (2, 0, 1))) # channel first
91 | image = image[np.newaxis, ...] # 增加batch维
92 |
93 | return torch.tensor(image, requires_grad=True)
94 |
95 |
96 | def gen_cam(image, mask):
97 | """
98 | 生成CAM图
99 | :param image: [H,W,C],原始图像
100 | :param mask: [H,W],范围0~1
101 | :return: tuple(cam,heatmap)
102 | """
103 | # mask转为heatmap
104 | heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
105 | heatmap = np.float32(heatmap) / 255
106 | heatmap = heatmap[..., ::-1] # gbr to rgb
107 |
108 | # 合并heatmap到原始图像
109 | cam = heatmap + np.float32(image)
110 | return norm_image(cam), (heatmap * 255).astype(np.uint8)
111 |
112 |
113 | def norm_image(image):
114 | """
115 | 标准化图像
116 | :param image: [H,W,C]
117 | :return:
118 | """
119 | image = image.copy()
120 | image -= np.max(np.min(image), 0)
121 | image /= np.max(image)
122 | image *= 255.
123 | return np.uint8(image)
124 |
125 |
126 | def gen_gb(grad):
127 | """
128 | 生guided back propagation 输入图像的梯度
129 | :param grad: tensor,[3,H,W]
130 | :return:
131 | """
132 | # 标准化
133 | grad = grad.data.numpy()
134 | gb = np.transpose(grad, (1, 2, 0))
135 | return gb
136 |
137 |
138 | def save_image(image_dicts, input_image_name, network, output_dir):
139 | prefix = os.path.splitext(input_image_name)[0]
140 | for key, image in image_dicts.items():
141 | io.imsave(os.path.join(output_dir, '{}-{}-{}.jpg'.format(prefix, network, key)), image)
142 |
143 |
144 | def main(args):
145 | # 输入
146 | img = io.imread(args.image_path)
147 | img = np.float32(cv2.resize(img, (224, 224))) / 255
148 | inputs = prepare_input(img)
149 | # 输出图像
150 | image_dict = {}
151 | # 网络
152 | net = get_net(args.network, args.weight_path)
153 | # Grad-CAM
154 | layer_name = get_last_conv_name(net) if args.layer_name is None else args.layer_name
155 | grad_cam = GradCAM(net, layer_name)
156 | mask = grad_cam(inputs, args.class_id) # cam mask
157 | image_dict['cam'], image_dict['heatmap'] = gen_cam(img, mask)
158 | grad_cam.remove_handlers()
159 | # Grad-CAM++
160 | grad_cam_plus_plus = GradCamPlusPlus(net, layer_name)
161 | mask_plus_plus = grad_cam_plus_plus(inputs, args.class_id) # cam mask
162 | image_dict['cam++'], image_dict['heatmap++'] = gen_cam(img, mask_plus_plus)
163 | grad_cam_plus_plus.remove_handlers()
164 |
165 | # GuidedBackPropagation
166 | gbp = GuidedBackPropagation(net)
167 | inputs.grad.zero_() # 梯度置零
168 | grad = gbp(inputs)
169 |
170 | gb = gen_gb(grad)
171 | image_dict['gb'] = norm_image(gb)
172 | # 生成Guided Grad-CAM
173 | cam_gb = gb * mask[..., np.newaxis]
174 | image_dict['cam_gb'] = norm_image(cam_gb)
175 |
176 | save_image(image_dict, os.path.basename(args.image_path), args.network, args.output_dir)
177 |
178 |
179 | if __name__ == '__main__':
180 | parser = argparse.ArgumentParser()
181 | parser.add_argument('--network', type=str, default='resnet50',
182 | help='ImageNet classification network')
183 | parser.add_argument('--image-path', type=str, default='./examples/pic1.jpg',
184 | help='input image path')
185 | parser.add_argument('--weight-path', type=str, default=None,
186 | help='weight path of the model')
187 | parser.add_argument('--layer-name', type=str, default=None,
188 | help='last convolutional layer name')
189 | parser.add_argument('--class-id', type=int, default=None,
190 | help='class id')
191 | parser.add_argument('--output-dir', type=str, default='results',
192 | help='output directory to save results')
193 | arguments = parser.parse_args()
194 |
195 | main(arguments)
196 |
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-densenet121-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-densenet121-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-inception-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-inception-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-mobilenet_v2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-mobilenet_v2-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet101-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet101-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-resnet50-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-resnet50-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-shufflenet_v2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-shufflenet_v2-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg16-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg16-heatmap.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-cam++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-cam.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-cam_gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-gb.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-heatmap++.jpg
--------------------------------------------------------------------------------
/results/multiple_dogs-vgg19-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/multiple_dogs-vgg19-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-densenet121-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-densenet121-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-frcnn-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-frcnn-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-frcnn-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-frcnn-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-frcnn-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-frcnn-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-inception-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-inception-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-mobilenet_v2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-mobilenet_v2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet101-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet101-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-resnet50-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-resnet50-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.0-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.0-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.0-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.0-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.1-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.1-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.1-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.1-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.3-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.3-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.3-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.3-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.4-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.4-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.4-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.4-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.5-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.5-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.5-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.5-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.6-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.6-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.6-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.6-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.7-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.7-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-head.cls_subnet.7-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-head.cls_subnet.7-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-retinanet-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-retinanet-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-shufflenet_v2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-shufflenet_v2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg16-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg16-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-cam++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-cam++.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-cam.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-cam.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-cam_gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-cam_gb.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-gb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-gb.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic1-vgg19-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic1-vgg19-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-frcnn-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-frcnn-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-frcnn-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-frcnn-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-frcnn-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-frcnn-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.0-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.0-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.0-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.0-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.1-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.1-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.1-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.1-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.3-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.3-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.3-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.3-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.4-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.4-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.4-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.4-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.5-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.5-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.5-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.5-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.6-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.6-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.6-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.6-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.7-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.7-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-head.cls_subnet.7-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-head.cls_subnet.7-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic2-retinanet-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic2-retinanet-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic3-frcnn-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-frcnn-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-frcnn-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-frcnn-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-frcnn-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-frcnn-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.0-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.0-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.0-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.0-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.1-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.1-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.1-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.1-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.3-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.3-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.3-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.3-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.4-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.4-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.4-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.4-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.5-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.5-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.5-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.5-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.6-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.6-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.6-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.6-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.7-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.7-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-head.cls_subnet.7-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-head.cls_subnet.7-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic3-retinanet-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic3-retinanet-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic4-frcnn-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-frcnn-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-frcnn-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-frcnn-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-frcnn-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-frcnn-predict_box.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.0-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.0-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.0-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.0-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.1-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.1-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.1-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.1-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.2-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.2-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.2-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.2-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.3-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.3-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.3-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.3-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.4-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.4-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.4-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.4-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.5-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.5-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.5-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.5-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.6-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.6-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.6-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.6-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.7-heatmap++.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.7-heatmap++.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-head.cls_subnet.7-heatmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-head.cls_subnet.7-heatmap.jpg
--------------------------------------------------------------------------------
/results/pic4-retinanet-predict_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhenshen-mla/Grad-CAM.pytorch/d26e0c7748fe7764b8bbb3317c3183f685cb896d/results/pic4-retinanet-predict_box.jpg
--------------------------------------------------------------------------------