├── .gitignore
├── LICENSE
├── README.md
├── README_zh-CN.md
├── configs
├── train_e2fgvi.json
└── train_e2fgvi_hq.json
├── core
├── dataset.py
├── dist.py
├── loss.py
├── lr_scheduler.py
├── metrics.py
├── trainer.py
└── utils.py
├── datasets
├── davis
│ ├── test.json
│ └── train.json
├── youtube-vos
│ ├── test.json
│ └── train.json
└── zip_dir.sh
├── environment.yml
├── evaluate.py
├── examples
├── schoolgirls.mp4
├── schoolgirls_mask
│ ├── 00000.png
│ ├── 00001.png
│ ├── 00002.png
│ ├── 00003.png
│ ├── 00004.png
│ ├── 00005.png
│ ├── 00006.png
│ ├── 00007.png
│ ├── 00008.png
│ ├── 00009.png
│ ├── 00010.png
│ ├── 00011.png
│ ├── 00012.png
│ ├── 00013.png
│ ├── 00014.png
│ ├── 00015.png
│ ├── 00016.png
│ ├── 00017.png
│ ├── 00018.png
│ ├── 00019.png
│ ├── 00020.png
│ ├── 00021.png
│ ├── 00022.png
│ ├── 00023.png
│ ├── 00024.png
│ ├── 00025.png
│ ├── 00026.png
│ ├── 00027.png
│ ├── 00028.png
│ ├── 00029.png
│ ├── 00030.png
│ ├── 00031.png
│ ├── 00032.png
│ ├── 00033.png
│ ├── 00034.png
│ ├── 00035.png
│ ├── 00036.png
│ ├── 00037.png
│ ├── 00038.png
│ ├── 00039.png
│ ├── 00040.png
│ ├── 00041.png
│ ├── 00042.png
│ ├── 00043.png
│ ├── 00044.png
│ ├── 00045.png
│ ├── 00046.png
│ ├── 00047.png
│ ├── 00048.png
│ ├── 00049.png
│ ├── 00050.png
│ ├── 00051.png
│ ├── 00052.png
│ ├── 00053.png
│ ├── 00054.png
│ ├── 00055.png
│ ├── 00056.png
│ ├── 00057.png
│ ├── 00058.png
│ ├── 00059.png
│ ├── 00060.png
│ ├── 00061.png
│ ├── 00062.png
│ ├── 00063.png
│ ├── 00064.png
│ ├── 00065.png
│ ├── 00066.png
│ ├── 00067.png
│ ├── 00068.png
│ ├── 00069.png
│ ├── 00070.png
│ ├── 00071.png
│ ├── 00072.png
│ ├── 00073.png
│ ├── 00074.png
│ ├── 00075.png
│ ├── 00076.png
│ ├── 00077.png
│ ├── 00078.png
│ └── 00079.png
├── tennis
│ ├── 00000.png
│ ├── 00001.png
│ ├── 00002.png
│ ├── 00003.png
│ ├── 00004.png
│ ├── 00005.png
│ ├── 00006.png
│ ├── 00007.png
│ ├── 00008.png
│ ├── 00009.png
│ ├── 00010.png
│ ├── 00011.png
│ ├── 00012.png
│ ├── 00013.png
│ ├── 00014.png
│ ├── 00015.png
│ ├── 00016.png
│ ├── 00017.png
│ ├── 00018.png
│ ├── 00019.png
│ ├── 00020.png
│ ├── 00021.png
│ ├── 00022.png
│ ├── 00023.png
│ ├── 00024.png
│ ├── 00025.png
│ ├── 00026.png
│ ├── 00027.png
│ ├── 00028.png
│ ├── 00029.png
│ ├── 00030.png
│ ├── 00031.png
│ ├── 00032.png
│ ├── 00033.png
│ ├── 00034.png
│ ├── 00035.png
│ ├── 00036.png
│ ├── 00037.png
│ ├── 00038.png
│ ├── 00039.png
│ ├── 00040.png
│ ├── 00041.png
│ ├── 00042.png
│ ├── 00043.png
│ ├── 00044.png
│ ├── 00045.png
│ ├── 00046.png
│ ├── 00047.png
│ ├── 00048.png
│ ├── 00049.png
│ ├── 00050.png
│ ├── 00051.png
│ ├── 00052.png
│ ├── 00053.png
│ ├── 00054.png
│ ├── 00055.png
│ ├── 00056.png
│ ├── 00057.png
│ ├── 00058.png
│ ├── 00059.png
│ ├── 00060.png
│ ├── 00061.png
│ ├── 00062.png
│ ├── 00063.png
│ ├── 00064.png
│ ├── 00065.png
│ ├── 00066.png
│ ├── 00067.png
│ ├── 00068.png
│ └── 00069.png
└── tennis_mask
│ ├── 00000.png
│ ├── 00001.png
│ ├── 00002.png
│ ├── 00003.png
│ ├── 00004.png
│ ├── 00005.png
│ ├── 00006.png
│ ├── 00007.png
│ ├── 00008.png
│ ├── 00009.png
│ ├── 00010.png
│ ├── 00011.png
│ ├── 00012.png
│ ├── 00013.png
│ ├── 00014.png
│ ├── 00015.png
│ ├── 00016.png
│ ├── 00017.png
│ ├── 00018.png
│ ├── 00019.png
│ ├── 00020.png
│ ├── 00021.png
│ ├── 00022.png
│ ├── 00023.png
│ ├── 00024.png
│ ├── 00025.png
│ ├── 00026.png
│ ├── 00027.png
│ ├── 00028.png
│ ├── 00029.png
│ ├── 00030.png
│ ├── 00031.png
│ ├── 00032.png
│ ├── 00033.png
│ ├── 00034.png
│ ├── 00035.png
│ ├── 00036.png
│ ├── 00037.png
│ ├── 00038.png
│ ├── 00039.png
│ ├── 00040.png
│ ├── 00041.png
│ ├── 00042.png
│ ├── 00043.png
│ ├── 00044.png
│ ├── 00045.png
│ ├── 00046.png
│ ├── 00047.png
│ ├── 00048.png
│ ├── 00049.png
│ ├── 00050.png
│ ├── 00051.png
│ ├── 00052.png
│ ├── 00053.png
│ ├── 00054.png
│ ├── 00055.png
│ ├── 00056.png
│ ├── 00057.png
│ ├── 00058.png
│ ├── 00059.png
│ ├── 00060.png
│ ├── 00061.png
│ ├── 00062.png
│ ├── 00063.png
│ ├── 00064.png
│ ├── 00065.png
│ ├── 00066.png
│ ├── 00067.png
│ ├── 00068.png
│ └── 00069.png
├── figs
├── demo_coco.gif
├── demo_tennis.gif
├── framework.png
├── quantitative_results.png
└── teaser.gif
├── model
├── e2fgvi.py
├── e2fgvi_hq.py
└── modules
│ ├── feat_prop.py
│ ├── flow_comp.py
│ ├── spectral_norm.py
│ ├── tfocal_transformer.py
│ └── tfocal_transformer_hq.py
├── release_model
└── README.md
├── test.py
└── train.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Customized
2 | *.pth
3 | *.pt
4 | keys.txt
5 | results/
6 | .vscode/
7 |
8 | # Byte-compiled / optimized / DLL files
9 | __pycache__/
10 | *.py[cod]
11 | *$py.class
12 |
13 | # C extensions
14 | *.so
15 |
16 | # Distribution / packaging
17 | .Python
18 | build/
19 | develop-eggs/
20 | dist/
21 | downloads/
22 | eggs/
23 | .eggs/
24 | lib/
25 | lib64/
26 | parts/
27 | sdist/
28 | var/
29 | wheels/
30 | pip-wheel-metadata/
31 | share/python-wheels/
32 | *.egg-info/
33 | .installed.cfg
34 | *.egg
35 | MANIFEST
36 |
37 | # PyInstaller
38 | # Usually these files are written by a python script from a template
39 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
40 | *.manifest
41 | *.spec
42 |
43 | # Installer logs
44 | pip-log.txt
45 | pip-delete-this-directory.txt
46 |
47 | # Unit test / coverage reports
48 | htmlcov/
49 | .tox/
50 | .nox/
51 | .coverage
52 | .coverage.*
53 | .cache
54 | nosetests.xml
55 | coverage.xml
56 | *.cover
57 | *.py,cover
58 | .hypothesis/
59 | .pytest_cache/
60 |
61 | # Translations
62 | *.mo
63 | *.pot
64 |
65 | # Django stuff:
66 | *.log
67 | local_settings.py
68 | db.sqlite3
69 | db.sqlite3-journal
70 |
71 | # Flask stuff:
72 | instance/
73 | .webassets-cache
74 |
75 | # Scrapy stuff:
76 | .scrapy
77 |
78 | # Sphinx documentation
79 | docs/_build/
80 |
81 | # PyBuilder
82 | target/
83 |
84 | # Jupyter Notebook
85 | .ipynb_checkpoints
86 |
87 | # IPython
88 | profile_default/
89 | ipython_config.py
90 |
91 | # pyenv
92 | .python-version
93 |
94 | # pipenv
95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
98 | # install all needed dependencies.
99 | #Pipfile.lock
100 |
101 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
102 | __pypackages__/
103 |
104 | # Celery stuff
105 | celerybeat-schedule
106 | celerybeat.pid
107 |
108 | # SageMath parsed files
109 | *.sage.py
110 |
111 | # Environments
112 | .env
113 | .venv
114 | env/
115 | venv/
116 | ENV/
117 | env.bak/
118 | venv.bak/
119 |
120 | # Spyder project settings
121 | .spyderproject
122 | .spyproject
123 |
124 | # Rope project settings
125 | .ropeproject
126 |
127 | # mkdocs documentation
128 | /site
129 |
130 | # mypy
131 | .mypy_cache/
132 | .dmypy.json
133 | dmypy.json
134 |
135 | # Pyre type checker
136 | .pyre/
137 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | ## creative commons
2 |
3 | # Attribution-NonCommercial 4.0 International
4 |
5 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
6 |
7 | ### Using Creative Commons Public Licenses
8 |
9 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
10 |
11 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors).
12 |
13 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).
14 |
15 | ## Creative Commons Attribution-NonCommercial 4.0 International Public License
16 |
17 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
18 |
19 | ### Section 1 – Definitions.
20 |
21 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
22 |
23 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
24 |
25 | c. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
26 |
27 | d. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
28 |
29 | e. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
30 |
31 | f. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
32 |
33 | g. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
34 |
35 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License.
36 |
37 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
38 |
39 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
40 |
41 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
42 |
43 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
44 |
45 | ### Section 2 – Scope.
46 |
47 | a. ___License grant.___
48 |
49 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
50 |
51 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and
52 |
53 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only.
54 |
55 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
56 |
57 | 3. __Term.__ The term of this Public License is specified in Section 6(a).
58 |
59 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
60 |
61 | 5. __Downstream recipients.__
62 |
63 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
64 |
65 | B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
66 |
67 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
68 |
69 | b. ___Other rights.___
70 |
71 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
72 |
73 | 2. Patent and trademark rights are not licensed under this Public License.
74 |
75 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.
76 |
77 | ### Section 3 – License Conditions.
78 |
79 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
80 |
81 | a. ___Attribution.___
82 |
83 | 1. If You Share the Licensed Material (including in modified form), You must:
84 |
85 | A. retain the following if it is supplied by the Licensor with the Licensed Material:
86 |
87 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
88 |
89 | ii. a copyright notice;
90 |
91 | iii. a notice that refers to this Public License;
92 |
93 | iv. a notice that refers to the disclaimer of warranties;
94 |
95 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
96 |
97 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
98 |
99 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
100 |
101 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
102 |
103 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
104 |
105 | 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License.
106 |
107 | ### Section 4 – Sui Generis Database Rights.
108 |
109 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
110 |
111 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only;
112 |
113 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
114 |
115 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
116 |
117 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
118 |
119 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability.
120 |
121 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__
122 |
123 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__
124 |
125 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
126 |
127 | ### Section 6 – Term and Termination.
128 |
129 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
130 |
131 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
132 |
133 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
134 |
135 | 2. upon express reinstatement by the Licensor.
136 |
137 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
138 |
139 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
140 |
141 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
142 |
143 | ### Section 7 – Other Terms and Conditions.
144 |
145 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
146 |
147 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
148 |
149 | ### Section 8 – Interpretation.
150 |
151 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
152 |
153 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
154 |
155 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
156 |
157 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
158 |
159 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
160 | >
161 | > Creative Commons may be contacted at creativecommons.org
162 |
163 | Copyright (c) 2022 MCG-NKU
164 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # E2FGVI (CVPR 2022)
2 | [](https://paperswithcode.com/sota/video-inpainting-on-davis?p=towards-an-end-to-end-framework-for-flow)
3 | [](https://paperswithcode.com/sota/video-inpainting-on-youtube-vos?p=towards-an-end-to-end-framework-for-flow)
4 |
5 | 
6 | 
7 |
8 | English | [简体中文](README_zh-CN.md)
9 |
10 | This repository contains the official implementation of the following paper:
11 | > **Towards An End-to-End Framework for Flow-Guided Video Inpainting**
12 | > Zhen Li#, Cheng-Ze Lu#, Jianhua Qin, Chun-Le Guo*, Ming-Ming Cheng
13 | > IEEE/CVF Conference on Computer Vision and Pattern Recognition (**CVPR**), 2022
14 |
15 | [[Paper](https://arxiv.org/abs/2204.02663)]
16 | [[Demo Video (Youtube)](https://www.youtube.com/watch?v=N--qC3T2wc4)]
17 | [[演示视频 (B站)](https://www.bilibili.com/video/BV1Ta411n7eH?spm_id_from=333.999.0.0)]
18 | [[MindSpore Implementation](https://github.com/Dragoniss/minspore-phase2-E2FGVI)]
19 | [Project Page (TBD)]
20 | [Poster (TBD)]
21 |
22 | You can try our colab demo here: [](https://colab.research.google.com/drive/12rwY2gtG8jVWlNx9pjmmM8uGmh5ue18G?usp=sharing)
23 |
24 | ## :star: News
25 | - *2022.05.15:* We release E2FGVI-HQ, which can handle videos with **arbitrary resolution**. This model could generalize well to much higher resolutions, while it only used 432x240 videos for training. Besides, it performs **better** than our original model on both PSNR and SSIM metrics.
26 | :link: Download links: [[Google Drive](https://drive.google.com/file/d/10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3/view?usp=sharing)] [[Baidu Disk](https://pan.baidu.com/s/1jfm1oFU1eIy-IRfuHP8YXw?pwd=ssb3)] :movie_camera: Demo video: [[Youtube](https://www.youtube.com/watch?v=N--qC3T2wc4)] [[B站](https://www.bilibili.com/video/BV1Ta411n7eH?spm_id_from=333.999.0.0)]
27 |
28 | - *2022.04.06:* Our code is publicly available.
29 | ## Demo
30 |
31 | 
32 |
33 | ### More examples (click for details):
34 |
35 |
36 |
37 |
38 |
39 |
40 | Coco (click me)
41 |
42 |
43 |
44 | |
45 |
46 |
47 |
48 | Tennis
49 |
50 |
51 |
52 | |
53 |
54 |
55 |
56 |
57 |
58 | Space
59 |
60 |
61 |
62 | |
63 |
64 |
65 |
66 | Motocross
67 |
68 |
69 |
70 | |
71 |
72 |
73 |
74 | ## Overview
75 | 
76 |
77 | ### :rocket: Highlights:
78 | - **SOTA performance**: The proposed E2FGVI achieves significant improvements on all quantitative metrics in comparison with SOTA methods.
79 | - **Highly effiency**: Our method processes 432 × 240 videos at 0.12 seconds per frame on a Titan XP GPU, which is nearly 15× faster than previous flow-based methods. Besides, our method has the lowest FLOPs among all compared SOTA
80 | methods.
81 |
82 | ## Work in Progress
83 | - [ ] Update website page
84 | - [ ] Hugging Face demo
85 | - [ ] Efficient inference
86 |
87 | ## Dependencies and Installation
88 |
89 | 1. Clone Repo
90 |
91 | ```bash
92 | git clone https://github.com/MCG-NKU/E2FGVI.git
93 | ```
94 |
95 | 2. Create Conda Environment and Install Dependencies
96 |
97 | ```bash
98 | conda env create -f environment.yml
99 | conda activate e2fgvi
100 | ```
101 | - Python >= 3.7
102 | - PyTorch >= 1.5
103 | - CUDA >= 9.2
104 | - [mmcv-full](https://github.com/open-mmlab/mmcv#installation) (following the pipeline to install)
105 |
106 | If the `environment.yml` file does not work for you, please follow [this issue](https://github.com/MCG-NKU/E2FGVI/issues/3) to solve the problem.
107 |
108 | ## Get Started
109 | ### Prepare pretrained models
110 | Before performing the following steps, please download our pretrained model first.
111 |
112 |
113 |
114 |
115 | Model |
116 | :link: Download Links |
117 | Support Arbitrary Resolution ? |
118 | PSNR / SSIM / VFID (DAVIS) |
119 |
120 |
121 |
122 |
123 | E2FGVI |
124 |
125 | [Google Drive]
126 | [Baidu Disk]
127 | |
128 | :x: |
129 | 33.01 / 0.9721 / 0.116 |
130 |
131 |
132 | E2FGVI-HQ |
133 |
134 | [Google Drive]
135 | [Baidu Disk]
136 | |
137 | :o: |
138 | 33.06 / 0.9722 / 0.117 |
139 |
140 |
141 |
142 |
143 | Then, unzip the file and place the models to `release_model` directory.
144 |
145 | The directory structure will be arranged as:
146 | ```
147 | release_model
148 | |- E2FGVI-CVPR22.pth
149 | |- E2FGVI-HQ-CVPR22.pth
150 | |- i3d_rgb_imagenet.pt (for evaluating VFID metric)
151 | |- README.md
152 | ```
153 |
154 | ### Quick test
155 | We provide two examples in the [`examples`](./examples) directory.
156 |
157 | Run the following command to enjoy them:
158 | ```shell
159 | # The first example (using split video frames)
160 | python test.py --model e2fgvi (or e2fgvi_hq) --video examples/tennis --mask examples/tennis_mask --ckpt release_model/E2FGVI-CVPR22.pth (or release_model/E2FGVI-HQ-CVPR22.pth)
161 | # The second example (using mp4 format video)
162 | python test.py --model e2fgvi (or e2fgvi_hq) --video examples/schoolgirls.mp4 --mask examples/schoolgirls_mask --ckpt release_model/E2FGVI-CVPR22.pth (or release_model/E2FGVI-HQ-CVPR22.pth)
163 | ```
164 | The inpainting video will be saved in the `results` directory.
165 | Please prepare your own **mp4 video** (or **split frames**) and **frame-wise masks** if you want to test more cases.
166 |
167 | *Note:* E2FGVI always rescales the input video to a fixed resolution (432x240), while E2FGVI-HQ does not change the resolution of the input video. If you want to custom the output resolution, please use the `--set_size` flag and set the values of `--width` and `--height`.
168 |
169 | Example:
170 | ```shell
171 | # Using this command to output a 720p video
172 | python test.py --model e2fgvi_hq --video --mask --ckpt release_model/E2FGVI-HQ-CVPR22.pth --set_size --width 1280 --height 720
173 | ```
174 |
175 |
176 | ### Prepare dataset for training and evaluation
177 |
178 |
179 |
180 | Dataset |
181 | YouTube-VOS |
182 | DAVIS |
183 |
184 |
185 |
186 |
187 | Details |
188 | For training (3,471) and evaluation (508) |
189 | For evaluation (50 in 90) |
190 |
191 | Images |
192 | [Official Link] (Download train and test all frames) |
193 | [Official Link] (2017, 480p, TrainVal) |
194 |
195 |
196 | Masks |
197 | [Google Drive] [Baidu Disk] (For reproducing paper results) |
198 |
199 |
200 |
201 |
202 | The training and test split files are provided in `datasets/`.
203 |
204 | For each dataset, you should place `JPEGImages` to `datasets/`.
205 |
206 | Then, run `sh datasets/zip_dir.sh` (**Note**: please edit the folder path accordingly) for compressing each video in `datasets//JPEGImages`.
207 |
208 | Unzip downloaded mask files to `datasets`.
209 |
210 | The `datasets` directory structure will be arranged as: (**Note**: please check it carefully)
211 | ```
212 | datasets
213 | |- davis
214 | |- JPEGImages
215 | |- .zip
216 | |- .zip
217 | |- test_masks
218 | |-
219 | |- 00000.png
220 | |- 00001.png
221 | |- train.json
222 | |- test.json
223 | |- youtube-vos
224 | |- JPEGImages
225 | |- .zip
226 | |- .zip
227 | |- test_masks
228 | |-
229 | |- 00000.png
230 | |- 00001.png
231 | |- train.json
232 | |- test.json
233 | |- zip_file.sh
234 | ```
235 | ### Evaluation
236 | Run one of the following commands for evaluation:
237 | ```shell
238 | # For evaluating E2FGVI model
239 | python evaluate.py --model e2fgvi --dataset --data_root datasets/ --ckpt release_model/E2FGVI-CVPR22.pth
240 | # For evaluating E2FGVI-HQ model
241 | python evaluate.py --model e2fgvi_hq --dataset --data_root datasets/ --ckpt release_model/E2FGVI-HQ-CVPR22.pth
242 |
243 | ```
244 | You will get scores as paper reported if you evaluate E2FGVI.
245 | The scores of E2FGVI-HQ can be found in [[Prepare pretrained models](https://github.com/MCG-NKU/E2FGVI#prepare-pretrained-models)].
246 |
247 | The scores will also be saved in the `results/_` directory.
248 |
249 | Please `--save_results` for further [evaluating temporal warping error](https://github.com/phoenix104104/fast_blind_video_consistency#evaluation).
250 |
251 | ### Training
252 | Our training configures are provided in [`train_e2fgvi.json`](./configs/train_e2fgvi.json) (for E2FGVI) and [`train_e2fgvi_hq.json`](./configs/train_e2fgvi_hq.json) (for E2FGVI-HQ).
253 |
254 | Run one of the following commands for training:
255 | ```shell
256 | # For training E2FGVI
257 | python train.py -c configs/train_e2fgvi.json
258 | # For training E2FGVI-HQ
259 | python train.py -c configs/train_e2fgvi_hq.json
260 | ```
261 | You could run the same command if you want to resume your training.
262 |
263 | The training loss can be monitored by running:
264 | ```shell
265 | tensorboard --logdir release_model
266 | ```
267 |
268 | You could follow [this pipeline](https://github.com/MCG-NKU/E2FGVI#evaluation) to evaluate your model.
269 | ## Results
270 |
271 | ### Quantitative results
272 | 
273 | ## Citation
274 |
275 | If you find our repo useful for your research, please consider citing our paper:
276 |
277 | ```bibtex
278 | @inproceedings{liCvpr22vInpainting,
279 | title={Towards An End-to-End Framework for Flow-Guided Video Inpainting},
280 | author={Li, Zhen and Lu, Cheng-Ze and Qin, Jianhua and Guo, Chun-Le and Cheng, Ming-Ming},
281 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
282 | year={2022}
283 | }
284 | ```
285 | ## Contact
286 |
287 | If you have any question, please feel free to contact us via `zhenli1031ATgmail.com` or `czlu919AToutlook.com`.
288 |
289 | ## License
290 | Licensed under a [Creative Commons Attribution-NonCommercial 4.0 International](https://creativecommons.org/licenses/by-nc/4.0/) for Non-commercial use only.
291 | Any commercial use should get formal permission first.
292 |
293 | ## Acknowledgement
294 |
295 | This repository is maintained by [Zhen Li](https://paper99.github.io) and [Cheng-Ze Lu](https://github.com/LGYoung).
296 |
297 | This code is based on [STTN](https://github.com/researchmm/STTN), [FuseFormer](https://github.com/ruiliu-ai/FuseFormer), [Focal-Transformer](https://github.com/microsoft/Focal-Transformer), and [MMEditing](https://github.com/open-mmlab/mmediting).
298 |
--------------------------------------------------------------------------------
/README_zh-CN.md:
--------------------------------------------------------------------------------
1 | # E2FGVI (CVPR 2022)-简体中文
2 | [](https://paperswithcode.com/sota/video-inpainting-on-davis?p=towards-an-end-to-end-framework-for-flow)
3 | [](https://paperswithcode.com/sota/video-inpainting-on-youtube-vos?p=towards-an-end-to-end-framework-for-flow)
4 |
5 | 
6 | 
7 |
8 | [English](README.md) | 简体中文
9 |
10 | 本项目包含了以下论文的官方实现:
11 | > **Towards An End-to-End Framework for Flow-Guided Video Inpainting**
12 | > Zhen Li#, Cheng-Ze Lu#, Jianhua Qin, Chun-Le Guo*, Ming-Ming Cheng
13 | > IEEE/CVF Conference on Computer Vision and Pattern Recognition (**CVPR**), 2022
14 |
15 | [[论文](https://arxiv.org/abs/2204.02663)]
16 | [[Demo Video (Youtube)](https://www.youtube.com/watch?v=N--qC3T2wc4)]
17 | [[演示视频 (B站)](https://www.bilibili.com/video/BV1Ta411n7eH?spm_id_from=333.999.0.0)]
18 | [项目主页 (待定)]
19 | [海报 (待定)]
20 |
21 | Colab实例:[](https://colab.research.google.com/drive/12rwY2gtG8jVWlNx9pjmmM8uGmh5ue18G?usp=sharing)
22 |
23 | ## :star: 最新进展
24 | - *2022.05.15:* 可适配**任意分辨率**的E2FGVI-HQ已发布.该模型仅需要在 432x240 的分辨率下进行训练, 即可适配更高分辨率下的推理任务.并且, 该模型比原先模型能够取得**更好**的PSNR/SSIM指标.
25 | :link: 下载链接: [[Google Drive](https://drive.google.com/file/d/10wGdKSUOie0XmCr8SQ2A2FeDe-mfn5w3/view?usp=sharing)] [[Baidu Disk](https://pan.baidu.com/s/1jfm1oFU1eIy-IRfuHP8YXw?pwd=ssb3)] :movie_camera: 演示视频: [[Youtube](https://www.youtube.com/watch?v=N--qC3T2wc4)] [[B站](https://www.bilibili.com/video/BV1Ta411n7eH?spm_id_from=333.999.0.0)]
26 |
27 | - *2022.04.06:* 代码公开发布.
28 | ## 演示视频
29 |
30 | 
31 |
32 | ### 更多示例 (点击查看详情):
33 |
34 |
35 |
36 |
37 |
38 |
39 | Coco
40 |
41 |
42 |
43 | |
44 |
45 |
46 |
47 | Tennis
48 |
49 |
50 |
51 | |
52 |
53 |
54 |
55 |
56 |
57 | Space
58 |
59 |
60 |
61 | |
62 |
63 |
64 |
65 | Motocross
66 |
67 |
68 |
69 | |
70 |
71 |
72 |
73 | ## 概述
74 | 
75 |
76 | ### :rocket: 特性:
77 | - **更好的性能**: 本文提出的E2FGVI模型相较于现有工作在所有量化指标上取得了显著提升.
78 | - **更快的速度**: 本文的方法在一张Titan XP GPU上, 处理分辨率为 432 × 240 的视频大约需要0.12秒/帧, 大约是前有的基于光流的方法的15倍.除此以外, 本文的方法相较于之前最优的方法具有最低的FLOPs计算量.
79 |
80 | ## 正在进行中的工作
81 | - [ ] 更新项目主页
82 | - [ ] Hugging Face 演示
83 | - [ ] 更高效的推理过程
84 |
85 | ## 安装
86 |
87 | 1. 克隆仓库
88 |
89 | ```bash
90 | git clone https://github.com/MCG-NKU/E2FGVI.git
91 | ```
92 |
93 | 2. 创建Conda环境并且安装依赖
94 |
95 | ```bash
96 | conda env create -f environment.yml
97 | conda activate e2fgvi
98 | ```
99 | - Python >= 3.7
100 | - PyTorch >= 1.5
101 | - CUDA >= 9.2
102 | - [mmcv-full](https://github.com/open-mmlab/mmcv#installation) (following the pipeline to install)
103 |
104 | 若无法使用`environment.yml`安装依赖, 请参照[此处](https://github.com/MCG-NKU/E2FGVI/issues/3).
105 |
106 | ## 快速入门
107 | ### 准备预训练模型
108 | 首先请下载预训练模型
109 |
110 |
111 |
112 |
113 | 模型 |
114 | :link: 下载链接 |
115 | 支持任意分辨率 ? |
116 | PSNR / SSIM / VFID (DAVIS) |
117 |
118 |
119 |
120 |
121 | E2FGVI |
122 |
123 | [谷歌网盘]
124 | [百度网盘]
125 | |
126 | :x: |
127 | 33.01 / 0.9721 / 0.116 |
128 |
129 |
130 | E2FGVI-HQ |
131 |
132 | [谷歌网盘]
133 | [百度网盘]
134 | |
135 | :o: |
136 | 33.06 / 0.9722 / 0.117 |
137 |
138 |
139 |
140 |
141 | 然后, 解压文件并且将模型放入`release_model`文件夹下.
142 |
143 | 文件夹目录结构如下:
144 | ```
145 | release_model
146 | |- E2FGVI-CVPR22.pth
147 | |- E2FGVI-HQ-CVPR22.pth
148 | |- i3d_rgb_imagenet.pt (for evaluating VFID metric)
149 | |- README.md
150 | ```
151 |
152 | ### 测试
153 | 我们提供了两个测试[`示例`](./examples)
154 |
155 | 使用如下命令运行:
156 | ```shell
157 | # 第一个示例 (使用视频帧)
158 | python test.py --model e2fgvi (or e2fgvi_hq) --video examples/tennis --mask examples/tennis_mask --ckpt release_model/E2FGVI-CVPR22.pth (or release_model/E2FGVI-HQ-CVPR22.pth)
159 | # 第二个示例 (使用mp4格式的视频)
160 | python test.py --model e2fgvi (or e2fgvi_hq) --video examples/schoolgirls.mp4 --mask examples/schoolgirls_mask --ckpt release_model/E2FGVI-CVPR22.pth (or release_model/E2FGVI-HQ-CVPR22.pth)
161 | ```
162 | 视频补全的结果会被保存在`results`路径下.若果想要测试更多样例, 请准备**mp4视频**(或**视频帧**)以及**每一帧的mask**.
163 |
164 | *注意:* E2FGVI会将输入视频放缩到固定的分辨率(432x240), 然而E2FGVI-HQ不会改变输入视频的分辨率.如果需要自定义输出的分辨率, 请设置`--set_size`参数以及设置输出分辨率的`--width`和`--height`值.
165 |
166 | 例:
167 | ```shell
168 | # 使用该命令输入720p视频
169 | python test.py --model e2fgvi_hq --video --mask --ckpt release_model/E2FGVI-HQ-CVPR22.pth --set_size --width 1280 --height 720
170 | ```
171 |
172 |
173 | ### 准备训练与验证集
174 |
175 |
176 |
177 | 数据集 |
178 | YouTube-VOS |
179 | DAVIS |
180 |
181 |
182 |
183 |
184 | 详情 |
185 | 训练: 3,471, 验证: 508 |
186 | 验证: 50 (共90) |
187 |
188 | Images |
189 | [官方链接] (下载全部训练测试集) |
190 | [官方链接] (2017, 480p, TrainVal) |
191 |
192 |
193 | Masks |
194 | [谷歌网盘] [百度网盘] (复现论文结果) |
195 |
196 |
197 |
198 |
199 | 训练与测试集分割文件位于 `datasets/`.
200 |
201 | 对于每一个数据集, 需要将 `JPEGImages` 放入 `datasets/`目录下.
202 |
203 | 然后, 运行 `sh datasets/zip_dir.sh` (**注意**: 请编辑对应的目录路径) 来压缩位于`datasets//JPEGImages`的每一个视频.
204 |
205 | 将下载的mask解压缩至 `datasets`.
206 |
207 | `datasets`目录结构如下: (**注意**: 请仔细核验)
208 | ```
209 | datasets
210 | |- davis
211 | |- JPEGImages
212 | |- .zip
213 | |- .zip
214 | |- test_masks
215 | |-
216 | |- 00000.png
217 | |- 00001.png
218 | |- train.json
219 | |- test.json
220 | |- youtube-vos
221 | |- JPEGImages
222 | |- .zip
223 | |- .zip
224 | |- test_masks
225 | |-
226 | |- 00000.png
227 | |- 00001.png
228 | |- train.json
229 | |- test.json
230 | |- zip_file.sh
231 | ```
232 | ### Evaluation
233 | 运行如下的一个命令进行验证:
234 | ```shell
235 | # 验证E2FGVI模型
236 | python evaluate.py --model e2fgvi --dataset --data_root datasets/ --ckpt release_model/E2FGVI-CVPR22.pth
237 | # 验证E2FGVI-HQ模型
238 | python evaluate.py --model e2fgvi_hq --dataset --data_root datasets/ --ckpt release_model/E2FGVI-HQ-CVPR22.pth
239 |
240 | ```
241 | 若你验证 E2FGVI 模型, 那么将会得到论文中的验证结果.
242 | E2FGVI-HQ 的验证结果请参考 [[此处](https://github.com/MCG-NKU/E2FGVI#prepare-pretrained-models)].
243 |
244 | 验证结果将被保存在 `results/_` 目录下.
245 |
246 | 若需[验证temporal warping error](https://github.com/phoenix104104/fast_blind_video_consistency#evaluation), 请添加 `--save_results` 参数.
247 |
248 | ### 训练
249 | Our training configures are provided in [`train_e2fgvi.json`](./configs/train_e2fgvi.json) (for E2FGVI) and [`train_e2fgvi_hq.json`](./configs/train_e2fgvi_hq.json) (for E2FGVI-HQ).
250 |
251 | 本文的训练配置如 [`train_e2fgvi.json`](./configs/train_e2fgvi.json) (对于 E2FGVI) 与 [`train_e2fgvi_hq.json`](./configs/train_e2fgvi_hq.json) (对于 E2FGVI-HQ) 所示.
252 |
253 | 运行如下的一条命令进行训练:
254 | ```shell
255 | # 训练 E2FGVI
256 | python train.py -c configs/train_e2fgvi.json
257 | # 训练 E2FGVI-HQ
258 | python train.py -c configs/train_e2fgvi_hq.json
259 | ```
260 | 如果需要恢复训练, 请运行相同的指令.
261 |
262 | 训练损失能够使用如下命令可视化:
263 | ```shell
264 | tensorboard --logdir release_model
265 | ```
266 |
267 | 请使用上述[步骤](https://github.com/MCG-NKU/E2FGVI#evaluation)来验证训练的模型.
268 |
269 | ## 结果
270 |
271 | ### 定量结果
272 | 
273 | ## 引用
274 |
275 | 若我们的仓库对你的研究内容有帮助, 请参考如下 bibtex 引用本文:
276 |
277 | ```bibtex
278 | @inproceedings{liCvpr22vInpainting,
279 | title={Towards An End-to-End Framework for Flow-Guided Video Inpainting},
280 | author={Li, Zhen and Lu, Cheng-Ze and Qin, Jianhua and Guo, Chun-Le and Cheng, Ming-Ming},
281 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
282 | year={2022}
283 | }
284 | ```
285 | ## 联系方式
286 |
287 | 若有任何疑问, 请通过`zhenli1031ATgmail.com` 或 `czlu919AToutlook.com`联系.
288 |
289 |
290 | ## 致谢
291 |
292 | 该仓库由 [Zhen Li](https://paper99.github.io) 与 [Cheng-Ze Lu](https://github.com/LGYoung) 维护.
293 |
294 | 代码基于 [STTN](https://github.com/researchmm/STTN), [FuseFormer](https://github.com/ruiliu-ai/FuseFormer), [Focal-Transformer](https://github.com/microsoft/Focal-Transformer), 与 [MMEditing](https://github.com/open-mmlab/mmediting).
295 |
--------------------------------------------------------------------------------
/configs/train_e2fgvi.json:
--------------------------------------------------------------------------------
1 | {
2 | "seed": 2021,
3 | "save_dir": "release_model/",
4 | "train_data_loader": {
5 | "name": "youtube-vos",
6 | "data_root": "datasets",
7 | "w": 432,
8 | "h": 240,
9 | "num_local_frames": 5,
10 | "num_ref_frames": 3
11 | },
12 | "losses": {
13 | "hole_weight": 1,
14 | "valid_weight": 1,
15 | "flow_weight": 1,
16 | "adversarial_weight": 0.01,
17 | "GAN_LOSS": "hinge"
18 | },
19 | "model": {
20 | "net": "e2fgvi",
21 | "no_dis": 0
22 | },
23 | "trainer": {
24 | "type": "Adam",
25 | "beta1": 0,
26 | "beta2": 0.99,
27 | "lr": 1e-4,
28 | "batch_size": 8,
29 | "num_workers": 2,
30 | "log_freq": 100,
31 | "save_freq": 5e3,
32 | "iterations": 50e4,
33 | "scheduler": {
34 | "type": "MultiStepLR",
35 | "milestones": [
36 | 40e4
37 | ],
38 | "gamma": 0.1
39 | }
40 | }
41 | }
--------------------------------------------------------------------------------
/configs/train_e2fgvi_hq.json:
--------------------------------------------------------------------------------
1 | {
2 | "seed": 2021,
3 | "save_dir": "release_model/",
4 | "train_data_loader": {
5 | "name": "youtube-vos",
6 | "data_root": "datasets",
7 | "w": 432,
8 | "h": 240,
9 | "num_local_frames": 5,
10 | "num_ref_frames": 3
11 | },
12 | "losses": {
13 | "hole_weight": 1,
14 | "valid_weight": 1,
15 | "flow_weight": 1,
16 | "adversarial_weight": 0.01,
17 | "GAN_LOSS": "hinge"
18 | },
19 | "model": {
20 | "net": "e2fgvi_hq",
21 | "no_dis": 0
22 | },
23 | "trainer": {
24 | "type": "Adam",
25 | "beta1": 0,
26 | "beta2": 0.99,
27 | "lr": 1e-4,
28 | "batch_size": 8,
29 | "num_workers": 2,
30 | "log_freq": 100,
31 | "save_freq": 5e3,
32 | "iterations": 50e4,
33 | "scheduler": {
34 | "type": "MultiStepLR",
35 | "milestones": [
36 | 40e4
37 | ],
38 | "gamma": 0.1
39 | }
40 | }
41 | }
--------------------------------------------------------------------------------
/core/dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import random
4 |
5 | import cv2
6 | from PIL import Image
7 | import numpy as np
8 |
9 | import torch
10 | import torchvision.transforms as transforms
11 |
12 | from core.utils import (TrainZipReader, TestZipReader,
13 | create_random_shape_with_random_motion, Stack,
14 | ToTorchFormatTensor, GroupRandomHorizontalFlip)
15 |
16 |
17 | class TrainDataset(torch.utils.data.Dataset):
18 | def __init__(self, args: dict, debug=False):
19 | self.args = args
20 | self.num_local_frames = args['num_local_frames']
21 | self.num_ref_frames = args['num_ref_frames']
22 | self.size = self.w, self.h = (args['w'], args['h'])
23 |
24 | json_path = os.path.join(args['data_root'], args['name'], 'train.json')
25 | with open(json_path, 'r') as f:
26 | self.video_dict = json.load(f)
27 | self.video_names = list(self.video_dict.keys())
28 | if debug:
29 | self.video_names = self.video_names[:100]
30 |
31 | self._to_tensors = transforms.Compose([
32 | Stack(),
33 | ToTorchFormatTensor(),
34 | ])
35 |
36 | def __len__(self):
37 | return len(self.video_names)
38 |
39 | def __getitem__(self, index):
40 | item = self.load_item(index)
41 | return item
42 |
43 | def _sample_index(self, length, sample_length, num_ref_frame=3):
44 | complete_idx_set = list(range(length))
45 | pivot = random.randint(0, length - sample_length)
46 | local_idx = complete_idx_set[pivot:pivot + sample_length]
47 | remain_idx = list(set(complete_idx_set) - set(local_idx))
48 | ref_index = sorted(random.sample(remain_idx, num_ref_frame))
49 |
50 | return local_idx + ref_index
51 |
52 | def load_item(self, index):
53 | video_name = self.video_names[index]
54 | # create masks
55 | all_masks = create_random_shape_with_random_motion(
56 | self.video_dict[video_name], imageHeight=self.h, imageWidth=self.w)
57 |
58 | # create sample index
59 | selected_index = self._sample_index(self.video_dict[video_name],
60 | self.num_local_frames,
61 | self.num_ref_frames)
62 |
63 | # read video frames
64 | frames = []
65 | masks = []
66 | for idx in selected_index:
67 | video_path = os.path.join(self.args['data_root'],
68 | self.args['name'], 'JPEGImages',
69 | f'{video_name}.zip')
70 | img = TrainZipReader.imread(video_path, idx).convert('RGB')
71 | img = img.resize(self.size)
72 | frames.append(img)
73 | masks.append(all_masks[idx])
74 |
75 | # normalizate, to tensors
76 | frames = GroupRandomHorizontalFlip()(frames)
77 | frame_tensors = self._to_tensors(frames) * 2.0 - 1.0
78 | mask_tensors = self._to_tensors(masks)
79 | return frame_tensors, mask_tensors, video_name
80 |
81 |
82 | class TestDataset(torch.utils.data.Dataset):
83 | def __init__(self, args):
84 | self.args = args
85 | self.size = self.w, self.h = args.size
86 |
87 | with open(os.path.join(args.data_root, args.dataset, 'test.json'),
88 | 'r') as f:
89 | self.video_dict = json.load(f)
90 | self.video_names = list(self.video_dict.keys())
91 |
92 | self._to_tensors = transforms.Compose([
93 | Stack(),
94 | ToTorchFormatTensor(),
95 | ])
96 |
97 | def __len__(self):
98 | return len(self.video_names)
99 |
100 | def __getitem__(self, index):
101 | item = self.load_item(index)
102 | return item
103 |
104 | def load_item(self, index):
105 | video_name = self.video_names[index]
106 | ref_index = list(range(self.video_dict[video_name]))
107 |
108 | # read video frames
109 | frames = []
110 | masks = []
111 | for idx in ref_index:
112 | video_path = os.path.join(self.args.data_root, self.args.dataset,
113 | 'JPEGImages', f'{video_name}.zip')
114 | img = TestZipReader.imread(video_path, idx).convert('RGB')
115 | img = img.resize(self.size)
116 | frames.append(img)
117 | mask_path = os.path.join(self.args.data_root, self.args.dataset,
118 | 'test_masks', video_name,
119 | str(idx).zfill(5) + '.png')
120 | mask = Image.open(mask_path).resize(self.size,
121 | Image.NEAREST).convert('L')
122 | # origin: 0 indicates missing. now: 1 indicates missing
123 | mask = np.asarray(mask)
124 | m = np.array(mask > 0).astype(np.uint8)
125 | m = cv2.dilate(m,
126 | cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)),
127 | iterations=4)
128 | mask = Image.fromarray(m * 255)
129 | masks.append(mask)
130 |
131 | # to tensors
132 | frames_PIL = [np.array(f).astype(np.uint8) for f in frames]
133 | frame_tensors = self._to_tensors(frames) * 2.0 - 1.0
134 | mask_tensors = self._to_tensors(masks)
135 | return frame_tensors, mask_tensors, video_name, frames_PIL
136 |
--------------------------------------------------------------------------------
/core/dist.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 |
4 |
5 | def get_world_size():
6 | """Find OMPI world size without calling mpi functions
7 | :rtype: int
8 | """
9 | if os.environ.get('PMI_SIZE') is not None:
10 | return int(os.environ.get('PMI_SIZE') or 1)
11 | elif os.environ.get('OMPI_COMM_WORLD_SIZE') is not None:
12 | return int(os.environ.get('OMPI_COMM_WORLD_SIZE') or 1)
13 | else:
14 | return torch.cuda.device_count()
15 |
16 |
17 | def get_global_rank():
18 | """Find OMPI world rank without calling mpi functions
19 | :rtype: int
20 | """
21 | if os.environ.get('PMI_RANK') is not None:
22 | return int(os.environ.get('PMI_RANK') or 0)
23 | elif os.environ.get('OMPI_COMM_WORLD_RANK') is not None:
24 | return int(os.environ.get('OMPI_COMM_WORLD_RANK') or 0)
25 | else:
26 | return 0
27 |
28 |
29 | def get_local_rank():
30 | """Find OMPI local rank without calling mpi functions
31 | :rtype: int
32 | """
33 | if os.environ.get('MPI_LOCALRANKID') is not None:
34 | return int(os.environ.get('MPI_LOCALRANKID') or 0)
35 | elif os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') is not None:
36 | return int(os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') or 0)
37 | else:
38 | return 0
39 |
40 |
41 | def get_master_ip():
42 | if os.environ.get('AZ_BATCH_MASTER_NODE') is not None:
43 | return os.environ.get('AZ_BATCH_MASTER_NODE').split(':')[0]
44 | elif os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') is not None:
45 | return os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE')
46 | else:
47 | return "127.0.0.1"
48 |
--------------------------------------------------------------------------------
/core/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class AdversarialLoss(nn.Module):
6 | r"""
7 | Adversarial loss
8 | https://arxiv.org/abs/1711.10337
9 | """
10 | def __init__(self,
11 | type='nsgan',
12 | target_real_label=1.0,
13 | target_fake_label=0.0):
14 | r"""
15 | type = nsgan | lsgan | hinge
16 | """
17 | super(AdversarialLoss, self).__init__()
18 | self.type = type
19 | self.register_buffer('real_label', torch.tensor(target_real_label))
20 | self.register_buffer('fake_label', torch.tensor(target_fake_label))
21 |
22 | if type == 'nsgan':
23 | self.criterion = nn.BCELoss()
24 | elif type == 'lsgan':
25 | self.criterion = nn.MSELoss()
26 | elif type == 'hinge':
27 | self.criterion = nn.ReLU()
28 |
29 | def __call__(self, outputs, is_real, is_disc=None):
30 | if self.type == 'hinge':
31 | if is_disc:
32 | if is_real:
33 | outputs = -outputs
34 | return self.criterion(1 + outputs).mean()
35 | else:
36 | return (-outputs).mean()
37 | else:
38 | labels = (self.real_label
39 | if is_real else self.fake_label).expand_as(outputs)
40 | loss = self.criterion(outputs, labels)
41 | return loss
42 |
--------------------------------------------------------------------------------
/core/lr_scheduler.py:
--------------------------------------------------------------------------------
1 | """
2 | LR scheduler from BasicSR https://github.com/xinntao/BasicSR
3 | """
4 | import math
5 | from collections import Counter
6 | from torch.optim.lr_scheduler import _LRScheduler
7 |
8 |
9 | class MultiStepRestartLR(_LRScheduler):
10 | """ MultiStep with restarts learning rate scheme.
11 | Args:
12 | optimizer (torch.nn.optimizer): Torch optimizer.
13 | milestones (list): Iterations that will decrease learning rate.
14 | gamma (float): Decrease ratio. Default: 0.1.
15 | restarts (list): Restart iterations. Default: [0].
16 | restart_weights (list): Restart weights at each restart iteration.
17 | Default: [1].
18 | last_epoch (int): Used in _LRScheduler. Default: -1.
19 | """
20 | def __init__(self,
21 | optimizer,
22 | milestones,
23 | gamma=0.1,
24 | restarts=(0, ),
25 | restart_weights=(1, ),
26 | last_epoch=-1):
27 | self.milestones = Counter(milestones)
28 | self.gamma = gamma
29 | self.restarts = restarts
30 | self.restart_weights = restart_weights
31 | assert len(self.restarts) == len(
32 | self.restart_weights), 'restarts and their weights do not match.'
33 | super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
34 |
35 | def get_lr(self):
36 | if self.last_epoch in self.restarts:
37 | weight = self.restart_weights[self.restarts.index(self.last_epoch)]
38 | return [
39 | group['initial_lr'] * weight
40 | for group in self.optimizer.param_groups
41 | ]
42 | if self.last_epoch not in self.milestones:
43 | return [group['lr'] for group in self.optimizer.param_groups]
44 | return [
45 | group['lr'] * self.gamma**self.milestones[self.last_epoch]
46 | for group in self.optimizer.param_groups
47 | ]
48 |
49 |
50 | def get_position_from_periods(iteration, cumulative_period):
51 | """Get the position from a period list.
52 | It will return the index of the right-closest number in the period list.
53 | For example, the cumulative_period = [100, 200, 300, 400],
54 | if iteration == 50, return 0;
55 | if iteration == 210, return 2;
56 | if iteration == 300, return 2.
57 | Args:
58 | iteration (int): Current iteration.
59 | cumulative_period (list[int]): Cumulative period list.
60 | Returns:
61 | int: The position of the right-closest number in the period list.
62 | """
63 | for i, period in enumerate(cumulative_period):
64 | if iteration <= period:
65 | return i
66 |
67 |
68 | class CosineAnnealingRestartLR(_LRScheduler):
69 | """ Cosine annealing with restarts learning rate scheme.
70 | An example of config:
71 | periods = [10, 10, 10, 10]
72 | restart_weights = [1, 0.5, 0.5, 0.5]
73 | eta_min=1e-7
74 | It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
75 | scheduler will restart with the weights in restart_weights.
76 | Args:
77 | optimizer (torch.nn.optimizer): Torch optimizer.
78 | periods (list): Period for each cosine anneling cycle.
79 | restart_weights (list): Restart weights at each restart iteration.
80 | Default: [1].
81 | eta_min (float): The mimimum lr. Default: 0.
82 | last_epoch (int): Used in _LRScheduler. Default: -1.
83 | """
84 | def __init__(self,
85 | optimizer,
86 | periods,
87 | restart_weights=(1, ),
88 | eta_min=1e-7,
89 | last_epoch=-1):
90 | self.periods = periods
91 | self.restart_weights = restart_weights
92 | self.eta_min = eta_min
93 | assert (len(self.periods) == len(self.restart_weights)
94 | ), 'periods and restart_weights should have the same length.'
95 | self.cumulative_period = [
96 | sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
97 | ]
98 | super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
99 |
100 | def get_lr(self):
101 | idx = get_position_from_periods(self.last_epoch,
102 | self.cumulative_period)
103 | current_weight = self.restart_weights[idx]
104 | nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
105 | current_period = self.periods[idx]
106 |
107 | return [
108 | self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) *
109 | (1 + math.cos(math.pi * (
110 | (self.last_epoch - nearest_restart) / current_period)))
111 | for base_lr in self.base_lrs
112 | ]
113 |
--------------------------------------------------------------------------------
/core/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import io
3 | import cv2
4 | import random
5 | import numpy as np
6 | from PIL import Image, ImageOps
7 | import zipfile
8 |
9 | import torch
10 | import matplotlib
11 | import matplotlib.patches as patches
12 | from matplotlib.path import Path
13 | from matplotlib import pyplot as plt
14 | from torchvision import transforms
15 |
16 | # matplotlib.use('agg')
17 |
18 | # ###########################################################################
19 | # Directory IO
20 | # ###########################################################################
21 |
22 |
23 | def read_dirnames_under_root(root_dir):
24 | dirnames = [
25 | name for i, name in enumerate(sorted(os.listdir(root_dir)))
26 | if os.path.isdir(os.path.join(root_dir, name))
27 | ]
28 | print(f'Reading directories under {root_dir}, num: {len(dirnames)}')
29 | return dirnames
30 |
31 |
32 | class TrainZipReader(object):
33 | file_dict = dict()
34 |
35 | def __init__(self):
36 | super(TrainZipReader, self).__init__()
37 |
38 | @staticmethod
39 | def build_file_dict(path):
40 | file_dict = TrainZipReader.file_dict
41 | if path in file_dict:
42 | return file_dict[path]
43 | else:
44 | file_handle = zipfile.ZipFile(path, 'r')
45 | file_dict[path] = file_handle
46 | return file_dict[path]
47 |
48 | @staticmethod
49 | def imread(path, idx):
50 | zfile = TrainZipReader.build_file_dict(path)
51 | filelist = zfile.namelist()
52 | filelist.sort()
53 | data = zfile.read(filelist[idx])
54 | #
55 | im = Image.open(io.BytesIO(data))
56 | return im
57 |
58 |
59 | class TestZipReader(object):
60 | file_dict = dict()
61 |
62 | def __init__(self):
63 | super(TestZipReader, self).__init__()
64 |
65 | @staticmethod
66 | def build_file_dict(path):
67 | file_dict = TestZipReader.file_dict
68 | if path in file_dict:
69 | return file_dict[path]
70 | else:
71 | file_handle = zipfile.ZipFile(path, 'r')
72 | file_dict[path] = file_handle
73 | return file_dict[path]
74 |
75 | @staticmethod
76 | def imread(path, idx):
77 | zfile = TestZipReader.build_file_dict(path)
78 | filelist = zfile.namelist()
79 | filelist.sort()
80 | data = zfile.read(filelist[idx])
81 | file_bytes = np.asarray(bytearray(data), dtype=np.uint8)
82 | im = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
83 | im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
84 | # im = Image.open(io.BytesIO(data))
85 | return im
86 |
87 |
88 | # ###########################################################################
89 | # Data augmentation
90 | # ###########################################################################
91 |
92 |
93 | def to_tensors():
94 | return transforms.Compose([Stack(), ToTorchFormatTensor()])
95 |
96 |
97 | class GroupRandomHorizontalFlowFlip(object):
98 | """Randomly horizontally flips the given PIL.Image with a probability of 0.5
99 | """
100 | def __init__(self, is_flow=True):
101 | self.is_flow = is_flow
102 |
103 | def __call__(self, img_group, mask_group, flowF_group, flowB_group):
104 | v = random.random()
105 | if v < 0.5:
106 | ret_img = [
107 | img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group
108 | ]
109 | ret_mask = [
110 | mask.transpose(Image.FLIP_LEFT_RIGHT) for mask in mask_group
111 | ]
112 | ret_flowF = [ff[:, ::-1] * [-1.0, 1.0] for ff in flowF_group]
113 | ret_flowB = [fb[:, ::-1] * [-1.0, 1.0] for fb in flowB_group]
114 | return ret_img, ret_mask, ret_flowF, ret_flowB
115 | else:
116 | return img_group, mask_group, flowF_group, flowB_group
117 |
118 |
119 | class GroupRandomHorizontalFlip(object):
120 | """Randomly horizontally flips the given PIL.Image with a probability of 0.5
121 | """
122 | def __init__(self, is_flow=False):
123 | self.is_flow = is_flow
124 |
125 | def __call__(self, img_group, is_flow=False):
126 | v = random.random()
127 | if v < 0.5:
128 | ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
129 | if self.is_flow:
130 | for i in range(0, len(ret), 2):
131 | # invert flow pixel values when flipping
132 | ret[i] = ImageOps.invert(ret[i])
133 | return ret
134 | else:
135 | return img_group
136 |
137 |
138 | class Stack(object):
139 | def __init__(self, roll=False):
140 | self.roll = roll
141 |
142 | def __call__(self, img_group):
143 | mode = img_group[0].mode
144 | if mode == '1':
145 | img_group = [img.convert('L') for img in img_group]
146 | mode = 'L'
147 | if mode == 'L':
148 | return np.stack([np.expand_dims(x, 2) for x in img_group], axis=2)
149 | elif mode == 'RGB':
150 | if self.roll:
151 | return np.stack([np.array(x)[:, :, ::-1] for x in img_group],
152 | axis=2)
153 | else:
154 | return np.stack(img_group, axis=2)
155 | else:
156 | raise NotImplementedError(f"Image mode {mode}")
157 |
158 |
159 | class ToTorchFormatTensor(object):
160 | """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
161 | to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
162 | def __init__(self, div=True):
163 | self.div = div
164 |
165 | def __call__(self, pic):
166 | if isinstance(pic, np.ndarray):
167 | # numpy img: [L, C, H, W]
168 | img = torch.from_numpy(pic).permute(2, 3, 0, 1).contiguous()
169 | else:
170 | # handle PIL Image
171 | img = torch.ByteTensor(torch.ByteStorage.from_buffer(
172 | pic.tobytes()))
173 | img = img.view(pic.size[1], pic.size[0], len(pic.mode))
174 | # put it from HWC to CHW format
175 | # yikes, this transpose takes 80% of the loading time/CPU
176 | img = img.transpose(0, 1).transpose(0, 2).contiguous()
177 | img = img.float().div(255) if self.div else img.float()
178 | return img
179 |
180 |
181 | # ###########################################################################
182 | # Create masks with random shape
183 | # ###########################################################################
184 |
185 |
186 | def create_random_shape_with_random_motion(video_length,
187 | imageHeight=240,
188 | imageWidth=432):
189 | # get a random shape
190 | height = random.randint(imageHeight // 3, imageHeight - 1)
191 | width = random.randint(imageWidth // 3, imageWidth - 1)
192 | edge_num = random.randint(6, 8)
193 | ratio = random.randint(6, 8) / 10
194 | region = get_random_shape(edge_num=edge_num,
195 | ratio=ratio,
196 | height=height,
197 | width=width)
198 | region_width, region_height = region.size
199 | # get random position
200 | x, y = random.randint(0, imageHeight - region_height), random.randint(
201 | 0, imageWidth - region_width)
202 | velocity = get_random_velocity(max_speed=3)
203 | m = Image.fromarray(np.zeros((imageHeight, imageWidth)).astype(np.uint8))
204 | m.paste(region, (y, x, y + region.size[0], x + region.size[1]))
205 | masks = [m.convert('L')]
206 | # return fixed masks
207 | if random.uniform(0, 1) > 0.5:
208 | return masks * video_length
209 | # return moving masks
210 | for _ in range(video_length - 1):
211 | x, y, velocity = random_move_control_points(x,
212 | y,
213 | imageHeight,
214 | imageWidth,
215 | velocity,
216 | region.size,
217 | maxLineAcceleration=(3,
218 | 0.5),
219 | maxInitSpeed=3)
220 | m = Image.fromarray(
221 | np.zeros((imageHeight, imageWidth)).astype(np.uint8))
222 | m.paste(region, (y, x, y + region.size[0], x + region.size[1]))
223 | masks.append(m.convert('L'))
224 | return masks
225 |
226 |
227 | def get_random_shape(edge_num=9, ratio=0.7, width=432, height=240):
228 | '''
229 | There is the initial point and 3 points per cubic bezier curve.
230 | Thus, the curve will only pass though n points, which will be the sharp edges.
231 | The other 2 modify the shape of the bezier curve.
232 | edge_num, Number of possibly sharp edges
233 | points_num, number of points in the Path
234 | ratio, (0, 1) magnitude of the perturbation from the unit circle,
235 | '''
236 | points_num = edge_num * 3 + 1
237 | angles = np.linspace(0, 2 * np.pi, points_num)
238 | codes = np.full(points_num, Path.CURVE4)
239 | codes[0] = Path.MOVETO
240 | # Using this instad of Path.CLOSEPOLY avoids an innecessary straight line
241 | verts = np.stack((np.cos(angles), np.sin(angles))).T * \
242 | (2*ratio*np.random.random(points_num)+1-ratio)[:, None]
243 | verts[-1, :] = verts[0, :]
244 | path = Path(verts, codes)
245 | # draw paths into images
246 | fig = plt.figure()
247 | ax = fig.add_subplot(111)
248 | patch = patches.PathPatch(path, facecolor='black', lw=2)
249 | ax.add_patch(patch)
250 | ax.set_xlim(np.min(verts) * 1.1, np.max(verts) * 1.1)
251 | ax.set_ylim(np.min(verts) * 1.1, np.max(verts) * 1.1)
252 | ax.axis('off') # removes the axis to leave only the shape
253 | fig.canvas.draw()
254 | # convert plt images into numpy images
255 | data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
256 | data = data.reshape((fig.canvas.get_width_height()[::-1] + (3, )))
257 | plt.close(fig)
258 | # postprocess
259 | data = cv2.resize(data, (width, height))[:, :, 0]
260 | data = (1 - np.array(data > 0).astype(np.uint8)) * 255
261 | corrdinates = np.where(data > 0)
262 | xmin, xmax, ymin, ymax = np.min(corrdinates[0]), np.max(
263 | corrdinates[0]), np.min(corrdinates[1]), np.max(corrdinates[1])
264 | region = Image.fromarray(data).crop((ymin, xmin, ymax, xmax))
265 | return region
266 |
267 |
268 | def random_accelerate(velocity, maxAcceleration, dist='uniform'):
269 | speed, angle = velocity
270 | d_speed, d_angle = maxAcceleration
271 | if dist == 'uniform':
272 | speed += np.random.uniform(-d_speed, d_speed)
273 | angle += np.random.uniform(-d_angle, d_angle)
274 | elif dist == 'guassian':
275 | speed += np.random.normal(0, d_speed / 2)
276 | angle += np.random.normal(0, d_angle / 2)
277 | else:
278 | raise NotImplementedError(
279 | f'Distribution type {dist} is not supported.')
280 | return (speed, angle)
281 |
282 |
283 | def get_random_velocity(max_speed=3, dist='uniform'):
284 | if dist == 'uniform':
285 | speed = np.random.uniform(max_speed)
286 | elif dist == 'guassian':
287 | speed = np.abs(np.random.normal(0, max_speed / 2))
288 | else:
289 | raise NotImplementedError(
290 | f'Distribution type {dist} is not supported.')
291 | angle = np.random.uniform(0, 2 * np.pi)
292 | return (speed, angle)
293 |
294 |
295 | def random_move_control_points(X,
296 | Y,
297 | imageHeight,
298 | imageWidth,
299 | lineVelocity,
300 | region_size,
301 | maxLineAcceleration=(3, 0.5),
302 | maxInitSpeed=3):
303 | region_width, region_height = region_size
304 | speed, angle = lineVelocity
305 | X += int(speed * np.cos(angle))
306 | Y += int(speed * np.sin(angle))
307 | lineVelocity = random_accelerate(lineVelocity,
308 | maxLineAcceleration,
309 | dist='guassian')
310 | if ((X > imageHeight - region_height) or (X < 0)
311 | or (Y > imageWidth - region_width) or (Y < 0)):
312 | lineVelocity = get_random_velocity(maxInitSpeed, dist='guassian')
313 | new_X = np.clip(X, 0, imageHeight - region_height)
314 | new_Y = np.clip(Y, 0, imageWidth - region_width)
315 | return new_X, new_Y, lineVelocity
316 |
317 |
318 | if __name__ == '__main__':
319 |
320 | trials = 10
321 | for _ in range(trials):
322 | video_length = 10
323 | # The returned masks are either stationary (50%) or moving (50%)
324 | masks = create_random_shape_with_random_motion(video_length,
325 | imageHeight=240,
326 | imageWidth=432)
327 |
328 | for m in masks:
329 | cv2.imshow('mask', np.array(m))
330 | cv2.waitKey(500)
331 |
--------------------------------------------------------------------------------
/datasets/davis/test.json:
--------------------------------------------------------------------------------
1 | {"bear": 82, "blackswan": 50, "bmx-bumps": 90, "bmx-trees": 80, "boat": 75, "breakdance": 84, "breakdance-flare": 71, "bus": 80, "camel": 90, "car-roundabout": 75, "car-shadow": 40, "car-turn": 80, "cows": 104, "dance-jump": 60, "dance-twirl": 90, "dog": 60, "dog-agility": 25, "drift-chicane": 52, "drift-straight": 50, "drift-turn": 64, "elephant": 80, "flamingo": 80, "goat": 90, "hike": 80, "hockey": 75, "horsejump-high": 50, "horsejump-low": 60, "kite-surf": 50, "kite-walk": 80, "libby": 49, "lucia": 70, "mallard-fly": 70, "mallard-water": 80, "motocross-bumps": 60, "motocross-jump": 40, "motorbike": 43, "paragliding": 70, "paragliding-launch": 80, "parkour": 100, "rhino": 90, "rollerblade": 35, "scooter-black": 43, "scooter-gray": 75, "soapbox": 99, "soccerball": 48, "stroller": 91, "surf": 55, "swing": 60, "tennis": 70, "train": 80}
--------------------------------------------------------------------------------
/datasets/davis/train.json:
--------------------------------------------------------------------------------
1 | {"baseball": 90, "basketball-game": 77, "bears-ball": 78, "bmx-rider": 85, "butterfly": 80, "car-competition": 66, "cat": 52, "chairlift": 99, "circus": 73, "city-ride": 70, "crafting": 45, "curling": 76, "dog-competition": 85, "dolphins-show": 74, "dribbling": 49, "drone-flying": 70, "ducks": 75, "elephant-hyenas": 55, "giraffes": 88, "gym-ball": 69, "helicopter-landing": 77, "horse-race": 80, "horses-kids": 78, "hurdles-race": 55, "ice-hockey": 52, "jet-ski": 83, "juggling-selfie": 78, "kayak-race": 63, "kids-robot": 75, "landing": 35, "luggage": 83, "mantaray": 73, "marbles": 70, "mascot": 78, "mermaid": 78, "monster-trucks": 99, "motorbike-indoors": 79, "motorbike-race": 88, "music-band": 87, "obstacles": 81, "obstacles-race": 48, "peacock": 75, "plane-exhibition": 73, "puppet": 100, "robot-battle": 85, "robotic-arm": 82, "rodeo": 85, "sea-turtle": 90, "skydiving-jumping": 75, "snowboard-race": 75, "snowboard-sand": 55, "surfer": 80, "swimmer": 86, "table-tennis": 70, "tram": 84, "trucks-race": 78, "twist-dance": 83, "volleyball-beach": 73, "water-slide": 88, "weightlifting": 90}
--------------------------------------------------------------------------------
/datasets/youtube-vos/test.json:
--------------------------------------------------------------------------------
1 | {"0070461469": 91, "00bd64cb00": 180, "00fef116ee": 96, "012257ffcf": 180, "01475d1fe7": 180, "0163b18674": 96, "017fa2adaa": 180, "0232ba85ed": 180, "02b1a46f42": 180, "02caec8ac0": 91, "047436c72c": 96, "0481e165b4": 150, "04f98557e7": 144, "05e73c3ecb": 96, "08f95ce1ff": 144, "0b6db1c6fd": 96, "0bd8c18197": 180, "0c6d13ee2c": 91, "0c7ba00455": 96, "0cba3e52eb": 91, "0d16524447": 150, "0d4827437d": 150, "0d62fa582a": 180, "0e1f91c0d7": 91, "0ef454b3f0": 91, "10e18fcf0c": 96, "11105e147e": 91, "11444b16da": 91, "11a4df37a4": 180, "11b3298d6a": 96, "13006c4c7e": 96, "1345523ba1": 180, "144a16eb12": 180, "15a6536e74": 180, "1616507c9e": 180, "1655f4782a": 92, "16608ccef6": 96, "16bc05b66c": 150, "16f1e1779b": 96, "17caf00e26": 96, "18f1e2f716": 91, "191a0bfcdf": 180, "19d4acf831": 91, "1a1dc21969": 96, "1a72d9fcea": 150, "1a92c81edd": 180, "1b2c2022a3": 96, "1d1601d079": 180, "1db7b25d1c": 180, "1dee5b7b5a": 150, "1e0c2e54f2": 96, "1e458b1539": 92, "1e6ac08c86": 91, "1e790eae99": 56, "1ed0c6ca5b": 96, "1edbdb6d18": 180, "1f2015e056": 96, "215ac56b15": 180, "2233485b49": 96, "224d171af6": 180, "237c6ebaf4": 91, "2462c51412": 96, "24bf968338": 180, "250d5953a0": 150, "25bcf222fb": 180, "25ea8feecf": 150, "25fc493839": 92, "262f69837e": 180, "264ca20298": 180, "26d8d48248": 51, "270f84c5e5": 91, "27889bc0fe": 180, "29b87846e7": 96, "29d2e79171": 180, "2a44411a3d": 180, "2b426fd330": 180, "2c4c4e2d5b": 180, "2c4c718eda": 180, "2c962c1bbe": 180, "2cc841341c": 92, "2cf6c4d17e": 91, "2d7ef0be04": 180, "2e5e52c6c8": 150, "2ef6fce8c6": 144, "3014e769bf": 180, "30d5f163b6": 180, "318df73d6a": 90, "31fbb9df3c": 96, "3255fcad2f": 180, "3303eea8e4": 91, "3447c30052": 150, "362722660c": 180, "37e0b4642b": 91, "383e51ed93": 180, "386b050bd0": 41, "3876ba3136": 180, "388ec2934c": 180, "38b45d9c6b": 96, "396680839c": 150, "39ffa3a4a4": 180, "3b0291b2be": 150, "3b333693f4": 180, "3bde1da2cf": 96, "3c5f4e6672": 91, "3c80682cc6": 92, "3ce634a1c1": 180, "3d6a761295": 96, "3da878c317": 91, "3db571b7ee": 96, "3e2336812c": 180, "3f16b04d6d": 96, "3fbbc75c5e": 180, "4015a1e1cc": 87, "406cd7bd48": 91, "407b87ba26": 91, "40a5628dcc": 91, "41af239f5e": 180, "42c671b285": 180, "42de37f462": 180, "4381c60a2f": 180, "4445dc0af5": 180, "44a3419d24": 180, "4566034eaf": 51, "45877fd086": 180, "4595935b88": 91, "4923010cfe": 96, "49b6d81ee8": 180, "4a39c34139": 180, "4a5a9fde01": 144, "4a90394892": 180, "4af10534e4": 180, "4af307f5bc": 180, "4be0ac97df": 91, "4be9025726": 91, "4c18a7bfab": 91, "4c269afea9": 91, "4c3db058db": 179, "4e1ef26a1e": 96, "50f4c0195b": 150, "50f89963c0": 96, "5105c5e4b8": 180, "51d60e4f93": 46, "51ee638399": 96, "522ea1a892": 180, "528e9f30e7": 91, "532efb206a": 180, "544b1486ac": 91, "5592eb680c": 180, "562fadda3a": 91, "568b30cf93": 150, "575f0e2d8e": 91, "5767fe466c": 150, "581c78d558": 180, "5a0ddcf128": 96, "5adf056317": 144, "5b33c701ce": 180, "5b8f636b33": 150, "5b9d26b1d7": 180, "5c24813a0b": 180, "5d0b35f30f": 46, "5e130392e1": 96, "5e41efe5bc": 180, "5e75de78ae": 91, "5fc34880f7": 180, "60912d6bab": 96, "612c96383d": 180, "61e5fd2205": 144, "620e350d23": 180, "62c27fcaaf": 180, "637c22d967": 91, "63eaebe4a2": 96, "63fd6b311e": 180, "64099f32ab": 180, "65643c4b34": 96, "660a88feb5": 180, "664b8d0c9f": 150, "665a7947b0": 180, "66affc2e86": 180, "673b1c03c9": 96, "67780f49c2": 91, "679a24b7bd": 180, "680d35b75b": 144, "68364a69ef": 180, "683bfaf498": 180, "68e883ff28": 180, "691f63f681": 180, "69f2d3146c": 96, "6c5c018237": 91, "6caa33f43a": 96, "6d2c7cc107": 180, "6d55effbbe": 144, "6d6b09b420": 51, "6d715acc3e": 180, "6e89b7359d": 96, "6e9428d555": 150, "6e9feafa2b": 91, "6eced45fee": 180, "6ef0b3282c": 96, "6f9019f0ea": 91, "6fe0ee9b7c": 180, "6ff74d4995": 180, "712b6ec68e": 96, "71680a627f": 96, "716aad4b56": 180, "721c2cda07": 180, "72218d52ac": 96, "7286b8aac9": 91, "728ba7998d": 91, "73b2b9af5f": 96, "7452941f4f": 180, "759d8249dd": 91, "75a55907dc": 150, "75f3a2a19e": 150, "77e7e4b1a1": 144, "7898e6542c": 180, "78e639c2c4": 91, "79091168f8": 180, "7ad5af3fe6": 180, "7b1a7dec16": 150, "7b36c4c3db": 180, "7b455d07cc": 150, "7bce4cfa48": 180, "7c064444d0": 144, "7c8014406a": 91, "7cb70182e5": 96, "7d04e540f5": 91, "7d5df020bf": 96, "7dfda4322c": 96, "7e6a27cc7c": 96, "7e9e344bf4": 180, "7eb9424a53": 180, "7ec8ea61f4": 91, "7fd2806fb0": 180, "8006501830": 150, "8014aeb412": 180, "80d1d22999": 180, "812f31be15": 144, "81312af68f": 92, "82843a1676": 150, "835aea9584": 36, "8366c67e9b": 180, "8467aa6c5c": 180, "8470ee5f48": 180, "8473ae2c60": 180, "8519765a65": 150, "851f73e4fc": 96, "85621c2c81": 150, "85b045995c": 180, "860c0a7cf8": 92, "861bd4b31e": 180, "8639adb930": 180, "8683e4d414": 150, "8687e892ff": 180, "86c5907811": 180, "870c197c8b": 180, "87de455fb7": 180, "87e1975888": 96, "87f5d4903c": 96, "883ede763d": 150, "88b84fe107": 91, "88ee198ce0": 91, "89d148a39f": 96, "89f3d789c5": 180, "8a22bb6c32": 180, "8a76048654": 180, "8a99d63296": 97, "8b0697f61a": 96, "8b722babfb": 180, "8ba5691030": 180, "8bdd52a66b": 150, "8c427b6a57": 180, "8cb68f36f6": 91, "8cbf0d6194": 180, "8d1ab4a2ed": 91, "8d55a5aebb": 180, "8d8c5906bd": 180, "8eb95e2e56": 150, "8f99788aa7": 180, "8fa5b3778f": 91, "9009ab4811": 91, "90c10e44cf": 91, "90c2c5c336": 96, "9124189275": 91, "91ee8300e7": 144, "9246556dfd": 91, "9323741e3b": 150, "94a33d3d20": 180, "9584210f86": 91, "9637e3b658": 51, "966c4c022e": 180, "9781e083b5": 180, "990d358980": 180, "995c087687": 150, "99a7d42674": 144, "99f056c109": 180, "9a29032b9c": 180, "9b07fc4cf6": 180, "9b5aa49509": 96, "9b5abb8108": 91, "9be210e984": 150, "9c3c28740e": 180, "9cace717c5": 180, "9d3ff7c1c1": 91, "9d8c66d92c": 150, "9eaa2f1fcc": 91, "9f1967f60f": 96, "9fa359e1cb": 150, "9fca469ddd": 96, "9ff11b620a": 180, "9ff655b9a3": 180, "a029b21901": 180, "a0c7eedeb8": 144, "a15e70486b": 180, "a35bef8bbf": 180, "a4309379a2": 91, "a51335af59": 96, "a5690fb3bf": 180, "a5b71f76fb": 86, "a5c8b1f945": 150, "a635426233": 150, "a73cc75b81": 144, "a7863d3903": 180, "a88f1fd4e3": 144, "aa2e90aa98": 144, "aab5ecf878": 91, "aafc5edf08": 96, "ab49400ffe": 180, "acd7b890f6": 91, "ad3ee9b86b": 180, "ad5fda372c": 144, "adb2040e5f": 91, "ae30aed29d": 180, "ae57b941a0": 180, "aeb9de8f66": 41, "af658a277c": 91, "af881cd801": 150, "b016a85236": 180, "b0313efe37": 96, "b19d6e149a": 120, "b19f091836": 180, "b2304e81df": 144, "b2d23dcf3a": 150, "b3cee57f31": 36, "b41a7ebfc6": 180, "b455f801b5": 46, "b47336c07b": 96, "b499ce791f": 180, "b52d26ddf9": 96, "b5c525cb08": 180, "b5d3b9be03": 91, "b6386bc3ce": 96, "b748b0f3be": 180, "b75e9ea782": 180, "b8237af453": 180, "b8a2104720": 96, "b8d6f92a65": 96, "b8f93a4094": 180, "bb0a1708ea": 180, "bb2245ab94": 180, "bb4ae8019f": 180, "bbdc38baa0": 76, "bbfe438d63": 96, "bc2be9fdc8": 96, "bcc00265f4": 96, "bd42cc48e4": 150, "bd43315417": 180, "bd85b04982": 51, "bda3146a46": 96, "be2b40d82a": 150, "c0f856e4de": 96, "c1bfacba4a": 91, "c1dcd30fb2": 96, "c285ede7f3": 180, "c2a6163d39": 150, "c3517ebed5": 86, "c3aabac30c": 180, "c3bb62a2f7": 144, "c454f19e90": 150, "c4c410ccd7": 180, "c5b94822e3": 180, "c64e9d1f7e": 91, "c682d1748f": 150, "c6d04b1ca3": 180, "c6dda81d86": 180, "c71623ab0c": 180, "c7db88a9db": 144, "c80ecb97d6": 150, "c8dd4de705": 180, "c915c8cbba": 150, "cb25a994d8": 144, "cba3e31e88": 91, "cc43a853e2": 180, "cc6c653874": 180, "cc718c7746": 180, "cc7e050f7f": 144, "cd14ed8653": 144, "cd5e4efaad": 46, "cddf78284d": 86, "cde37afe57": 144, "ce358eaf23": 150, "ce45145721": 91, "ce7d4af66d": 180, "ce9fb4bd8e": 91, "cec4db17a0": 180, "cecdd82d3c": 180, "ceea39e735": 180, "cf3e28c92a": 180, "cf8c671dab": 150, "cfd1e8166f": 96, "cfe7d98e50": 150, "cff0bbcba8": 96, "d1219663b7": 180, "d18ea7cd51": 180, "d1ed509b94": 91, "d22c5d5908": 81, "d2c6c7d8f6": 96, "d380084b7c": 91, "d3a2586e34": 180, "d3b1039c67": 180, "d3b25a44b3": 180, "d3f1d615b1": 180, "d7203fdab6": 96, "d76e963754": 96, "d7b3892660": 66, "d8b3e257da": 150, "d8b93e6bb1": 180, "d949468ad6": 180, "da553b619f": 180, "daac20af89": 180, "db8bf2430a": 180, "dbd729449a": 180, "dc0928b157": 91, "dc9aa0b8c0": 180, "dcc0637430": 180, "dcd3e1b53e": 86, "de1854f657": 101, "deb31e46cf": 96, "debccf2743": 150, "decf924833": 150, "e08b241b91": 180, "e0daa3b339": 180, "e1a52251b7": 180, "e1fc6d5237": 91, "e228ce16fd": 96, "e36dbb2ab7": 91, "e3dcf7a45e": 180, "e411e957af": 180, "e412e6a76b": 180, "e45a003b97": 179, "e60826ddf9": 91, "e6295c843b": 96, "e62c23b62b": 150, "e6b7a8fe73": 180, "e6f0e3131c": 180, "e7a3f8884e": 180, "e7c176739c": 180, "e965cd989b": 86, "e989440f7b": 150, "e98d115b9c": 81, "ea5f8c74d6": 180, "ea8a5b5a78": 96, "eaad295e8c": 150, "eaf4947f74": 180, "eb65451f4b": 92, "eb79c39e8e": 180, "eb92c92912": 96, "ebbb88e5f5": 180, "ec9b46eb6c": 180, "eca0be379d": 180, "ed33e8efb7": 66, "eda3a7bbb1": 150, "ee3ff10184": 180, "eec8403cc8": 91, "eee2db8829": 150, "ef22b8a227": 91, "ef8737ca22": 180, "eff7c1c098": 180, "f00dc892b2": 96, "f019c9ff98": 96, "f01edcbffb": 179, "f0866da89c": 180, "f12eb5256e": 180, "f1df2ea2dc": 180, "f29119c644": 180, "f3419f3a62": 150, "f35029f76d": 180, "f39dc2240d": 180, "f3aa63fa74": 150, "f3f3c201bd": 180, "f4865471b4": 96, "f505ae958c": 91, "f7605e73cd": 150, "f7917687d6": 180, "f7d310e219": 180, "f7e25f87b2": 180, "f94cd39525": 91, "f9f9aa431c": 180, "fa666fcc95": 66, "fb10740465": 180, "fb25b14e48": 91, "fb28ec1ba3": 150, "fbdda5ec7b": 96, "fbdf2180ee": 150, "fc0db37221": 91, "fd237cf4fb": 180, "fe36582e18": 180, "fef14bb2f2": 180, "ffe59ed1c1": 150}
--------------------------------------------------------------------------------
/datasets/zip_dir.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Choose one path to compress videos
4 | folder='./datasets/davis/JPEGImages'
5 | # folder='./datasets/youtube-vos/JPEGImages'
6 |
7 | if [ -f $folder ];then
8 | for file in $folder/*
9 | do
10 | if test -f $file
11 | then
12 | echo $file is file
13 | else
14 | echo compressing \"$file\" ...
15 | zip -q -r -j $file.zip $file/
16 | rm -rf $file/
17 | fi
18 | done
19 | else
20 | echo '['$folder']' 'is not exist. Please check the directory.'
21 | fi
22 |
23 | echo 'Done!'
24 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: e2fgvi
2 | channels:
3 | - pytorch
4 | - defaults
5 | dependencies:
6 | - _libgcc_mutex=0.1=main
7 | - _openmp_mutex=4.5=1_gnu
8 | - absl-py=0.15.0=pyhd3eb1b0_0
9 | - aiohttp=3.8.1=py37h7f8727e_1
10 | - aiosignal=1.2.0=pyhd3eb1b0_0
11 | - async-timeout=4.0.1=pyhd3eb1b0_0
12 | - asynctest=0.13.0=py_0
13 | - attrs=21.4.0=pyhd3eb1b0_0
14 | - blas=1.0=mkl
15 | - blinker=1.4=py37h06a4308_0
16 | - brotli=1.0.9=he6710b0_2
17 | - brotlipy=0.7.0=py37h27cfd23_1003
18 | - c-ares=1.18.1=h7f8727e_0
19 | - ca-certificates=2022.3.29=h06a4308_0
20 | - cachetools=4.2.2=pyhd3eb1b0_0
21 | - certifi=2021.10.8=py37h06a4308_2
22 | - cffi=1.15.0=py37hd667e15_1
23 | - charset-normalizer=2.0.4=pyhd3eb1b0_0
24 | - click=8.0.4=py37h06a4308_0
25 | - cloudpickle=2.0.0=pyhd3eb1b0_0
26 | - cryptography=3.4.8=py37hd23ed53_0
27 | - cudatoolkit=10.1.243=h6bb024c_0
28 | - cycler=0.11.0=pyhd3eb1b0_0
29 | - cytoolz=0.11.0=py37h7b6447c_0
30 | - dask-core=2021.10.0=pyhd3eb1b0_0
31 | - dataclasses=0.8=pyh6d0b6a4_7
32 | - dbus=1.13.18=hb2f20db_0
33 | - expat=2.4.4=h295c915_0
34 | - fontconfig=2.13.1=h6c09931_0
35 | - fonttools=4.25.0=pyhd3eb1b0_0
36 | - freetype=2.11.0=h70c0345_0
37 | - frozenlist=1.2.0=py37h7f8727e_0
38 | - fsspec=2022.2.0=pyhd3eb1b0_0
39 | - giflib=5.2.1=h7b6447c_0
40 | - glib=2.69.1=h4ff587b_1
41 | - google-auth=2.6.0=pyhd3eb1b0_0
42 | - google-auth-oauthlib=0.4.1=py_2
43 | - grpcio=1.42.0=py37hce63b2e_0
44 | - gst-plugins-base=1.14.0=h8213a91_2
45 | - gstreamer=1.14.0=h28cd5cc_2
46 | - icu=58.2=he6710b0_3
47 | - idna=3.3=pyhd3eb1b0_0
48 | - imageio=2.9.0=pyhd3eb1b0_0
49 | - importlib-metadata=4.11.3=py37h06a4308_0
50 | - intel-openmp=2021.4.0=h06a4308_3561
51 | - jpeg=9d=h7f8727e_0
52 | - kiwisolver=1.3.2=py37h295c915_0
53 | - lcms2=2.12=h3be6417_0
54 | - ld_impl_linux-64=2.35.1=h7274673_9
55 | - libffi=3.3=he6710b0_2
56 | - libgcc-ng=9.3.0=h5101ec6_17
57 | - libgfortran-ng=7.5.0=ha8ba4b0_17
58 | - libgfortran4=7.5.0=ha8ba4b0_17
59 | - libgomp=9.3.0=h5101ec6_17
60 | - libpng=1.6.37=hbc83047_0
61 | - libprotobuf=3.19.1=h4ff587b_0
62 | - libstdcxx-ng=9.3.0=hd4cf53a_17
63 | - libtiff=4.2.0=h85742a9_0
64 | - libuuid=1.0.3=h7f8727e_2
65 | - libwebp=1.2.2=h55f646e_0
66 | - libwebp-base=1.2.2=h7f8727e_0
67 | - libxcb=1.14=h7b6447c_0
68 | - libxml2=2.9.12=h03d6c58_0
69 | - locket=0.2.1=py37h06a4308_2
70 | - lz4-c=1.9.3=h295c915_1
71 | - markdown=3.3.4=py37h06a4308_0
72 | - matplotlib=3.5.1=py37h06a4308_1
73 | - matplotlib-base=3.5.1=py37ha18d171_1
74 | - mkl=2021.4.0=h06a4308_640
75 | - mkl-service=2.4.0=py37h7f8727e_0
76 | - mkl_fft=1.3.1=py37hd3c417c_0
77 | - mkl_random=1.2.2=py37h51133e4_0
78 | - multidict=5.2.0=py37h7f8727e_2
79 | - munkres=1.1.4=py_0
80 | - ncurses=6.3=h7f8727e_2
81 | - networkx=2.6.3=pyhd3eb1b0_0
82 | - ninja=1.10.2=py37hd09550d_3
83 | - numpy=1.21.2=py37h20f2e39_0
84 | - numpy-base=1.21.2=py37h79a1101_0
85 | - oauthlib=3.2.0=pyhd3eb1b0_0
86 | - openssl=1.1.1n=h7f8727e_0
87 | - packaging=21.3=pyhd3eb1b0_0
88 | - partd=1.2.0=pyhd3eb1b0_1
89 | - pcre=8.45=h295c915_0
90 | - pillow=9.0.1=py37h22f2fdc_0
91 | - pip=21.2.2=py37h06a4308_0
92 | - protobuf=3.19.1=py37h295c915_0
93 | - pyasn1=0.4.8=pyhd3eb1b0_0
94 | - pyasn1-modules=0.2.8=py_0
95 | - pycparser=2.21=pyhd3eb1b0_0
96 | - pyjwt=2.1.0=py37h06a4308_0
97 | - pyopenssl=21.0.0=pyhd3eb1b0_1
98 | - pyqt=5.9.2=py37h05f1152_2
99 | - pysocks=1.7.1=py37_1
100 | - python=3.7.13=h12debd9_0
101 | - python-dateutil=2.8.2=pyhd3eb1b0_0
102 | - pytorch=1.5.1=py3.7_cuda10.1.243_cudnn7.6.3_0
103 | - pywavelets=1.3.0=py37h7f8727e_0
104 | - qt=5.9.7=h5867ecd_1
105 | - readline=8.1.2=h7f8727e_1
106 | - requests=2.27.1=pyhd3eb1b0_0
107 | - requests-oauthlib=1.3.0=py_0
108 | - rsa=4.7.2=pyhd3eb1b0_1
109 | - scikit-image=0.16.2=py37h0573a6f_0
110 | - scipy=1.7.3=py37hc147768_0
111 | - setuptools=58.0.4=py37h06a4308_0
112 | - sip=4.19.8=py37hf484d3e_0
113 | - six=1.16.0=pyhd3eb1b0_1
114 | - sqlite=3.38.2=hc218d9a_0
115 | - tensorboard=2.6.0=py_1
116 | - tensorboard-data-server=0.6.0=py37hca6d32c_0
117 | - tensorboard-plugin-wit=1.6.0=py_0
118 | - tk=8.6.11=h1ccaba5_0
119 | - toolz=0.11.2=pyhd3eb1b0_0
120 | - torchvision=0.6.1=py37_cu101
121 | - tornado=6.1=py37h27cfd23_0
122 | - typing-extensions=4.1.1=hd3eb1b0_0
123 | - typing_extensions=4.1.1=pyh06a4308_0
124 | - urllib3=1.26.8=pyhd3eb1b0_0
125 | - werkzeug=2.0.3=pyhd3eb1b0_0
126 | - wheel=0.37.1=pyhd3eb1b0_0
127 | - xz=5.2.5=h7b6447c_0
128 | - yaml=0.2.5=h7b6447c_0
129 | - yarl=1.6.3=py37h27cfd23_0
130 | - zipp=3.7.0=pyhd3eb1b0_0
131 | - zlib=1.2.11=h7f8727e_4
132 | - zstd=1.4.9=haebb681_0
133 | - pip:
134 | - addict==2.4.0
135 | - mmcv-full==1.4.8
136 | - opencv-python==4.5.5.64
137 | - pyparsing==3.0.8
138 | - pyyaml==6.0
139 | - tqdm==4.64.0
140 | - yapf==0.32.0
--------------------------------------------------------------------------------
/evaluate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import cv2
3 | import numpy as np
4 | import importlib
5 | import os
6 | import argparse
7 | from PIL import Image
8 |
9 | import torch
10 | from torch.utils.data import DataLoader
11 |
12 | from core.dataset import TestDataset
13 | from core.metrics import calc_psnr_and_ssim, calculate_i3d_activations, calculate_vfid, init_i3d_model
14 |
15 | # global variables
16 | w, h = 432, 240
17 | ref_length = 10
18 | neighbor_stride = 5
19 | default_fps = 24
20 |
21 |
22 | # sample reference frames from the whole video
23 | def get_ref_index(neighbor_ids, length):
24 | ref_index = []
25 | for i in range(0, length, ref_length):
26 | if i not in neighbor_ids:
27 | ref_index.append(i)
28 | return ref_index
29 |
30 |
31 | def main_worker(args):
32 | args.size = (w, h)
33 | # set up datasets and data loader
34 | assert (args.dataset == 'davis') or args.dataset == 'youtube-vos', \
35 | f"{args.dataset} dataset is not supported"
36 | test_dataset = TestDataset(args)
37 |
38 | test_loader = DataLoader(test_dataset,
39 | batch_size=1,
40 | shuffle=False,
41 | num_workers=args.num_workers)
42 |
43 | # set up models
44 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
45 | net = importlib.import_module('model.' + args.model)
46 | model = net.InpaintGenerator().to(device)
47 | data = torch.load(args.ckpt, map_location=device)
48 | model.load_state_dict(data)
49 | print(f'Loading from: {args.ckpt}')
50 | model.eval()
51 |
52 | total_frame_psnr = []
53 | total_frame_ssim = []
54 |
55 | output_i3d_activations = []
56 | real_i3d_activations = []
57 |
58 | print('Start evaluation...')
59 |
60 | # create results directory
61 | result_path = os.path.join('results', f'{args.model}_{args.dataset}')
62 | if not os.path.exists(result_path):
63 | os.makedirs(result_path)
64 | eval_summary = open(
65 | os.path.join(result_path, f"{args.model}_{args.dataset}_metrics.txt"),
66 | "w")
67 |
68 | i3d_model = init_i3d_model()
69 |
70 | for index, items in enumerate(test_loader):
71 | frames, masks, video_name, frames_PIL = items
72 |
73 | video_length = frames.size(1)
74 | frames, masks = frames.to(device), masks.to(device)
75 | ori_frames = frames_PIL
76 | ori_frames = [
77 | ori_frames[i].squeeze().cpu().numpy() for i in range(video_length)
78 | ]
79 | comp_frames = [None] * video_length
80 |
81 | # complete holes by our model
82 | for f in range(0, video_length, neighbor_stride):
83 | neighbor_ids = [
84 | i for i in range(max(0, f - neighbor_stride),
85 | min(video_length, f + neighbor_stride + 1))
86 | ]
87 | ref_ids = get_ref_index(neighbor_ids, video_length)
88 | selected_imgs = frames[:1, neighbor_ids + ref_ids, :, :, :]
89 | selected_masks = masks[:1, neighbor_ids + ref_ids, :, :, :]
90 | with torch.no_grad():
91 | masked_frames = selected_imgs * (1 - selected_masks)
92 | pred_img, _ = model(masked_frames, len(neighbor_ids))
93 |
94 | pred_img = (pred_img + 1) / 2
95 | pred_img = pred_img.cpu().permute(0, 2, 3, 1).numpy() * 255
96 | binary_masks = masks[0, neighbor_ids, :, :, :].cpu().permute(
97 | 0, 2, 3, 1).numpy().astype(np.uint8)
98 | for i in range(len(neighbor_ids)):
99 | idx = neighbor_ids[i]
100 | img = np.array(pred_img[i]).astype(np.uint8) * binary_masks[i] \
101 | + ori_frames[idx] * (1 - binary_masks[i])
102 | if comp_frames[idx] is None:
103 | comp_frames[idx] = img
104 | else:
105 | comp_frames[idx] = comp_frames[idx].astype(
106 | np.float32) * 0.5 + img.astype(np.float32) * 0.5
107 |
108 | # calculate metrics
109 | cur_video_psnr = []
110 | cur_video_ssim = []
111 | comp_PIL = [] # to calculate VFID
112 | frames_PIL = []
113 | for ori, comp in zip(ori_frames, comp_frames):
114 | psnr, ssim = calc_psnr_and_ssim(ori, comp)
115 |
116 | cur_video_psnr.append(psnr)
117 | cur_video_ssim.append(ssim)
118 |
119 | total_frame_psnr.append(psnr)
120 | total_frame_ssim.append(ssim)
121 |
122 | frames_PIL.append(Image.fromarray(ori.astype(np.uint8)))
123 | comp_PIL.append(Image.fromarray(comp.astype(np.uint8)))
124 | cur_psnr = sum(cur_video_psnr) / len(cur_video_psnr)
125 | cur_ssim = sum(cur_video_ssim) / len(cur_video_ssim)
126 |
127 | # saving i3d activations
128 | frames_i3d, comp_i3d = calculate_i3d_activations(frames_PIL,
129 | comp_PIL,
130 | i3d_model,
131 | device=device)
132 | real_i3d_activations.append(frames_i3d)
133 | output_i3d_activations.append(comp_i3d)
134 |
135 | print(
136 | f'[{index+1:3}/{len(test_loader)}] Name: {str(video_name):25} | PSNR/SSIM: {cur_psnr:.4f}/{cur_ssim:.4f}'
137 | )
138 | eval_summary.write(
139 | f'[{index+1:3}/{len(test_loader)}] Name: {str(video_name):25} | PSNR/SSIM: {cur_psnr:.4f}/{cur_ssim:.4f}\n'
140 | )
141 |
142 | # saving images for evaluating warpping errors
143 | if args.save_results:
144 | save_frame_path = os.path.join(result_path, video_name[0])
145 | os.makedirs(save_frame_path, exist_ok=False)
146 |
147 | for i, frame in enumerate(comp_frames):
148 | cv2.imwrite(
149 | os.path.join(save_frame_path,
150 | str(i).zfill(5) + '.png'),
151 | cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2BGR))
152 |
153 | avg_frame_psnr = sum(total_frame_psnr) / len(total_frame_psnr)
154 | avg_frame_ssim = sum(total_frame_ssim) / len(total_frame_ssim)
155 |
156 | fid_score = calculate_vfid(real_i3d_activations, output_i3d_activations)
157 | print('Finish evaluation... Average Frame PSNR/SSIM/VFID: '
158 | f'{avg_frame_psnr:.2f}/{avg_frame_ssim:.4f}/{fid_score:.3f}')
159 | eval_summary.write(
160 | 'Finish evaluation... Average Frame PSNR/SSIM/VFID: '
161 | f'{avg_frame_psnr:.2f}/{avg_frame_ssim:.4f}/{fid_score:.3f}')
162 | eval_summary.close()
163 |
164 |
165 | if __name__ == '__main__':
166 | parser = argparse.ArgumentParser(description='E2FGVI')
167 | parser.add_argument('--dataset',
168 | choices=['davis', 'youtube-vos'],
169 | type=str)
170 | parser.add_argument('--data_root', type=str, required=True)
171 | parser.add_argument('--model', choices=['e2fgvi', 'e2fgvi_hq'], type=str)
172 | parser.add_argument('--ckpt', type=str, required=True)
173 | parser.add_argument('--save_results', action='store_true', default=False)
174 | parser.add_argument('--num_workers', default=4, type=int)
175 | args = parser.parse_args()
176 | main_worker(args)
177 |
--------------------------------------------------------------------------------
/examples/schoolgirls.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls.mp4
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00000.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00001.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00002.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00003.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00004.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00005.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00006.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00007.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00008.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00009.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00010.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00011.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00012.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00013.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00014.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00015.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00016.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00017.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00018.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00019.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00020.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00021.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00022.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00023.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00024.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00025.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00026.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00027.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00028.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00029.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00030.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00031.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00032.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00033.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00034.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00035.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00036.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00037.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00038.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00039.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00040.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00041.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00042.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00043.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00044.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00045.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00046.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00047.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00048.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00049.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00050.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00051.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00052.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00053.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00054.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00055.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00056.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00057.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00058.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00059.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00060.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00060.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00061.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00061.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00062.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00062.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00063.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00063.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00064.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00064.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00065.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00065.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00066.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00066.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00067.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00067.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00068.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00068.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00069.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00069.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00070.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00070.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00071.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00071.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00072.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00072.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00073.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00073.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00074.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00074.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00075.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00075.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00076.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00076.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00077.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00077.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00078.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00078.png
--------------------------------------------------------------------------------
/examples/schoolgirls_mask/00079.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/schoolgirls_mask/00079.png
--------------------------------------------------------------------------------
/examples/tennis/00000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00000.png
--------------------------------------------------------------------------------
/examples/tennis/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00001.png
--------------------------------------------------------------------------------
/examples/tennis/00002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00002.png
--------------------------------------------------------------------------------
/examples/tennis/00003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00003.png
--------------------------------------------------------------------------------
/examples/tennis/00004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00004.png
--------------------------------------------------------------------------------
/examples/tennis/00005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00005.png
--------------------------------------------------------------------------------
/examples/tennis/00006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00006.png
--------------------------------------------------------------------------------
/examples/tennis/00007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00007.png
--------------------------------------------------------------------------------
/examples/tennis/00008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00008.png
--------------------------------------------------------------------------------
/examples/tennis/00009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00009.png
--------------------------------------------------------------------------------
/examples/tennis/00010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00010.png
--------------------------------------------------------------------------------
/examples/tennis/00011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00011.png
--------------------------------------------------------------------------------
/examples/tennis/00012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00012.png
--------------------------------------------------------------------------------
/examples/tennis/00013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00013.png
--------------------------------------------------------------------------------
/examples/tennis/00014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00014.png
--------------------------------------------------------------------------------
/examples/tennis/00015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00015.png
--------------------------------------------------------------------------------
/examples/tennis/00016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00016.png
--------------------------------------------------------------------------------
/examples/tennis/00017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00017.png
--------------------------------------------------------------------------------
/examples/tennis/00018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00018.png
--------------------------------------------------------------------------------
/examples/tennis/00019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00019.png
--------------------------------------------------------------------------------
/examples/tennis/00020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00020.png
--------------------------------------------------------------------------------
/examples/tennis/00021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00021.png
--------------------------------------------------------------------------------
/examples/tennis/00022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00022.png
--------------------------------------------------------------------------------
/examples/tennis/00023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00023.png
--------------------------------------------------------------------------------
/examples/tennis/00024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00024.png
--------------------------------------------------------------------------------
/examples/tennis/00025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00025.png
--------------------------------------------------------------------------------
/examples/tennis/00026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00026.png
--------------------------------------------------------------------------------
/examples/tennis/00027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00027.png
--------------------------------------------------------------------------------
/examples/tennis/00028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00028.png
--------------------------------------------------------------------------------
/examples/tennis/00029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00029.png
--------------------------------------------------------------------------------
/examples/tennis/00030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00030.png
--------------------------------------------------------------------------------
/examples/tennis/00031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00031.png
--------------------------------------------------------------------------------
/examples/tennis/00032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00032.png
--------------------------------------------------------------------------------
/examples/tennis/00033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00033.png
--------------------------------------------------------------------------------
/examples/tennis/00034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00034.png
--------------------------------------------------------------------------------
/examples/tennis/00035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00035.png
--------------------------------------------------------------------------------
/examples/tennis/00036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00036.png
--------------------------------------------------------------------------------
/examples/tennis/00037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00037.png
--------------------------------------------------------------------------------
/examples/tennis/00038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00038.png
--------------------------------------------------------------------------------
/examples/tennis/00039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00039.png
--------------------------------------------------------------------------------
/examples/tennis/00040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00040.png
--------------------------------------------------------------------------------
/examples/tennis/00041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00041.png
--------------------------------------------------------------------------------
/examples/tennis/00042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00042.png
--------------------------------------------------------------------------------
/examples/tennis/00043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00043.png
--------------------------------------------------------------------------------
/examples/tennis/00044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00044.png
--------------------------------------------------------------------------------
/examples/tennis/00045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00045.png
--------------------------------------------------------------------------------
/examples/tennis/00046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00046.png
--------------------------------------------------------------------------------
/examples/tennis/00047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00047.png
--------------------------------------------------------------------------------
/examples/tennis/00048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00048.png
--------------------------------------------------------------------------------
/examples/tennis/00049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00049.png
--------------------------------------------------------------------------------
/examples/tennis/00050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00050.png
--------------------------------------------------------------------------------
/examples/tennis/00051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00051.png
--------------------------------------------------------------------------------
/examples/tennis/00052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00052.png
--------------------------------------------------------------------------------
/examples/tennis/00053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00053.png
--------------------------------------------------------------------------------
/examples/tennis/00054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00054.png
--------------------------------------------------------------------------------
/examples/tennis/00055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00055.png
--------------------------------------------------------------------------------
/examples/tennis/00056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00056.png
--------------------------------------------------------------------------------
/examples/tennis/00057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00057.png
--------------------------------------------------------------------------------
/examples/tennis/00058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00058.png
--------------------------------------------------------------------------------
/examples/tennis/00059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00059.png
--------------------------------------------------------------------------------
/examples/tennis/00060.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00060.png
--------------------------------------------------------------------------------
/examples/tennis/00061.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00061.png
--------------------------------------------------------------------------------
/examples/tennis/00062.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00062.png
--------------------------------------------------------------------------------
/examples/tennis/00063.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00063.png
--------------------------------------------------------------------------------
/examples/tennis/00064.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00064.png
--------------------------------------------------------------------------------
/examples/tennis/00065.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00065.png
--------------------------------------------------------------------------------
/examples/tennis/00066.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00066.png
--------------------------------------------------------------------------------
/examples/tennis/00067.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00067.png
--------------------------------------------------------------------------------
/examples/tennis/00068.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00068.png
--------------------------------------------------------------------------------
/examples/tennis/00069.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis/00069.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00000.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00001.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00002.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00003.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00004.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00005.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00006.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00007.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00008.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00009.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00010.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00011.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00012.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00013.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00014.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00015.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00016.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00017.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00018.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00019.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00020.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00021.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00022.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00023.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00024.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00025.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00026.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00027.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00028.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00029.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00030.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00031.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00032.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00033.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00034.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00035.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00036.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00037.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00038.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00039.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00040.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00041.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00042.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00043.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00044.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00045.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00046.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00047.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00048.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00049.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00050.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00051.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00052.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00053.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00054.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00055.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00056.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00057.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00058.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00059.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00060.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00060.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00061.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00061.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00062.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00062.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00063.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00063.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00064.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00064.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00065.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00065.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00066.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00066.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00067.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00067.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00068.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00068.png
--------------------------------------------------------------------------------
/examples/tennis_mask/00069.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/examples/tennis_mask/00069.png
--------------------------------------------------------------------------------
/figs/demo_coco.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/figs/demo_coco.gif
--------------------------------------------------------------------------------
/figs/demo_tennis.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/figs/demo_tennis.gif
--------------------------------------------------------------------------------
/figs/framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/figs/framework.png
--------------------------------------------------------------------------------
/figs/quantitative_results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/figs/quantitative_results.png
--------------------------------------------------------------------------------
/figs/teaser.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MCG-NKU/E2FGVI/709cbe319edc21b8a365a28e14cba595a93d62cf/figs/teaser.gif
--------------------------------------------------------------------------------
/model/e2fgvi.py:
--------------------------------------------------------------------------------
1 | ''' Towards An End-to-End Framework for Video Inpainting
2 | '''
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 | from model.modules.flow_comp import SPyNet
9 | from model.modules.feat_prop import BidirectionalPropagation, SecondOrderDeformableAlignment
10 | from model.modules.tfocal_transformer import TemporalFocalTransformerBlock, SoftSplit, SoftComp
11 | from model.modules.spectral_norm import spectral_norm as _spectral_norm
12 |
13 |
14 | class BaseNetwork(nn.Module):
15 | def __init__(self):
16 | super(BaseNetwork, self).__init__()
17 |
18 | def print_network(self):
19 | if isinstance(self, list):
20 | self = self[0]
21 | num_params = 0
22 | for param in self.parameters():
23 | num_params += param.numel()
24 | print(
25 | 'Network [%s] was created. Total number of parameters: %.1f million. '
26 | 'To see the architecture, do print(network).' %
27 | (type(self).__name__, num_params / 1000000))
28 |
29 | def init_weights(self, init_type='normal', gain=0.02):
30 | '''
31 | initialize network's weights
32 | init_type: normal | xavier | kaiming | orthogonal
33 | https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
34 | '''
35 | def init_func(m):
36 | classname = m.__class__.__name__
37 | if classname.find('InstanceNorm2d') != -1:
38 | if hasattr(m, 'weight') and m.weight is not None:
39 | nn.init.constant_(m.weight.data, 1.0)
40 | if hasattr(m, 'bias') and m.bias is not None:
41 | nn.init.constant_(m.bias.data, 0.0)
42 | elif hasattr(m, 'weight') and (classname.find('Conv') != -1
43 | or classname.find('Linear') != -1):
44 | if init_type == 'normal':
45 | nn.init.normal_(m.weight.data, 0.0, gain)
46 | elif init_type == 'xavier':
47 | nn.init.xavier_normal_(m.weight.data, gain=gain)
48 | elif init_type == 'xavier_uniform':
49 | nn.init.xavier_uniform_(m.weight.data, gain=1.0)
50 | elif init_type == 'kaiming':
51 | nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
52 | elif init_type == 'orthogonal':
53 | nn.init.orthogonal_(m.weight.data, gain=gain)
54 | elif init_type == 'none': # uses pytorch's default init method
55 | m.reset_parameters()
56 | else:
57 | raise NotImplementedError(
58 | 'initialization method [%s] is not implemented' %
59 | init_type)
60 | if hasattr(m, 'bias') and m.bias is not None:
61 | nn.init.constant_(m.bias.data, 0.0)
62 |
63 | self.apply(init_func)
64 |
65 | # propagate to children
66 | for m in self.children():
67 | if hasattr(m, 'init_weights'):
68 | m.init_weights(init_type, gain)
69 |
70 |
71 | class Encoder(nn.Module):
72 | def __init__(self):
73 | super(Encoder, self).__init__()
74 | self.group = [1, 2, 4, 8, 1]
75 | self.layers = nn.ModuleList([
76 | nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
77 | nn.LeakyReLU(0.2, inplace=True),
78 | nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
79 | nn.LeakyReLU(0.2, inplace=True),
80 | nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
81 | nn.LeakyReLU(0.2, inplace=True),
82 | nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
83 | nn.LeakyReLU(0.2, inplace=True),
84 | nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1, groups=1),
85 | nn.LeakyReLU(0.2, inplace=True),
86 | nn.Conv2d(640, 512, kernel_size=3, stride=1, padding=1, groups=2),
87 | nn.LeakyReLU(0.2, inplace=True),
88 | nn.Conv2d(768, 384, kernel_size=3, stride=1, padding=1, groups=4),
89 | nn.LeakyReLU(0.2, inplace=True),
90 | nn.Conv2d(640, 256, kernel_size=3, stride=1, padding=1, groups=8),
91 | nn.LeakyReLU(0.2, inplace=True),
92 | nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1, groups=1),
93 | nn.LeakyReLU(0.2, inplace=True)
94 | ])
95 |
96 | def forward(self, x):
97 | bt, c, h, w = x.size()
98 | h, w = h // 4, w // 4
99 | out = x
100 | for i, layer in enumerate(self.layers):
101 | if i == 8:
102 | x0 = out
103 | if i > 8 and i % 2 == 0:
104 | g = self.group[(i - 8) // 2]
105 | x = x0.view(bt, g, -1, h, w)
106 | o = out.view(bt, g, -1, h, w)
107 | out = torch.cat([x, o], 2).view(bt, -1, h, w)
108 | out = layer(out)
109 | return out
110 |
111 |
112 | class deconv(nn.Module):
113 | def __init__(self,
114 | input_channel,
115 | output_channel,
116 | kernel_size=3,
117 | padding=0):
118 | super().__init__()
119 | self.conv = nn.Conv2d(input_channel,
120 | output_channel,
121 | kernel_size=kernel_size,
122 | stride=1,
123 | padding=padding)
124 |
125 | def forward(self, x):
126 | x = F.interpolate(x,
127 | scale_factor=2,
128 | mode='bilinear',
129 | align_corners=True)
130 | return self.conv(x)
131 |
132 |
133 | class InpaintGenerator(BaseNetwork):
134 | def __init__(self, init_weights=True):
135 | super(InpaintGenerator, self).__init__()
136 | channel = 256
137 | hidden = 512
138 |
139 | # encoder
140 | self.encoder = Encoder()
141 |
142 | # decoder
143 | self.decoder = nn.Sequential(
144 | deconv(channel // 2, 128, kernel_size=3, padding=1),
145 | nn.LeakyReLU(0.2, inplace=True),
146 | nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
147 | nn.LeakyReLU(0.2, inplace=True),
148 | deconv(64, 64, kernel_size=3, padding=1),
149 | nn.LeakyReLU(0.2, inplace=True),
150 | nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1))
151 |
152 | # feature propagation module
153 | self.feat_prop_module = BidirectionalPropagation(channel // 2)
154 |
155 | # soft split and soft composition
156 | kernel_size = (7, 7)
157 | padding = (3, 3)
158 | stride = (3, 3)
159 | output_size = (60, 108)
160 | t2t_params = {
161 | 'kernel_size': kernel_size,
162 | 'stride': stride,
163 | 'padding': padding,
164 | 'output_size': output_size
165 | }
166 | self.ss = SoftSplit(channel // 2,
167 | hidden,
168 | kernel_size,
169 | stride,
170 | padding,
171 | t2t_param=t2t_params)
172 | self.sc = SoftComp(channel // 2, hidden, output_size, kernel_size,
173 | stride, padding)
174 |
175 | n_vecs = 1
176 | for i, d in enumerate(kernel_size):
177 | n_vecs *= int((output_size[i] + 2 * padding[i] -
178 | (d - 1) - 1) / stride[i] + 1)
179 |
180 | blocks = []
181 | depths = 8
182 | num_heads = [4] * depths
183 | window_size = [(5, 9)] * depths
184 | focal_windows = [(5, 9)] * depths
185 | focal_levels = [2] * depths
186 | pool_method = "fc"
187 |
188 | for i in range(depths):
189 | blocks.append(
190 | TemporalFocalTransformerBlock(dim=hidden,
191 | num_heads=num_heads[i],
192 | window_size=window_size[i],
193 | focal_level=focal_levels[i],
194 | focal_window=focal_windows[i],
195 | n_vecs=n_vecs,
196 | t2t_params=t2t_params,
197 | pool_method=pool_method))
198 | self.transformer = nn.Sequential(*blocks)
199 |
200 | if init_weights:
201 | self.init_weights()
202 | # Need to initial the weights of MSDeformAttn specifically
203 | for m in self.modules():
204 | if isinstance(m, SecondOrderDeformableAlignment):
205 | m.init_offset()
206 |
207 | # flow completion network
208 | self.update_spynet = SPyNet()
209 |
210 | def forward_bidirect_flow(self, masked_local_frames):
211 | b, l_t, c, h, w = masked_local_frames.size()
212 |
213 | # compute forward and backward flows of masked frames
214 | masked_local_frames = F.interpolate(masked_local_frames.view(
215 | -1, c, h, w),
216 | scale_factor=1 / 4,
217 | mode='bilinear',
218 | align_corners=True,
219 | recompute_scale_factor=True)
220 | masked_local_frames = masked_local_frames.view(b, l_t, c, h // 4,
221 | w // 4)
222 | mlf_1 = masked_local_frames[:, :-1, :, :, :].reshape(
223 | -1, c, h // 4, w // 4)
224 | mlf_2 = masked_local_frames[:, 1:, :, :, :].reshape(
225 | -1, c, h // 4, w // 4)
226 | pred_flows_forward = self.update_spynet(mlf_1, mlf_2)
227 | pred_flows_backward = self.update_spynet(mlf_2, mlf_1)
228 |
229 | pred_flows_forward = pred_flows_forward.view(b, l_t - 1, 2, h // 4,
230 | w // 4)
231 | pred_flows_backward = pred_flows_backward.view(b, l_t - 1, 2, h // 4,
232 | w // 4)
233 |
234 | return pred_flows_forward, pred_flows_backward
235 |
236 | def forward(self, masked_frames, num_local_frames):
237 | l_t = num_local_frames
238 | b, t, ori_c, ori_h, ori_w = masked_frames.size()
239 |
240 | # normalization before feeding into the flow completion module
241 | masked_local_frames = (masked_frames[:, :l_t, ...] + 1) / 2
242 | pred_flows = self.forward_bidirect_flow(masked_local_frames)
243 |
244 | # extracting features and performing the feature propagation on local features
245 | enc_feat = self.encoder(masked_frames.view(b * t, ori_c, ori_h, ori_w))
246 | _, c, h, w = enc_feat.size()
247 | local_feat = enc_feat.view(b, t, c, h, w)[:, :l_t, ...]
248 | ref_feat = enc_feat.view(b, t, c, h, w)[:, l_t:, ...]
249 | local_feat = self.feat_prop_module(local_feat, pred_flows[0],
250 | pred_flows[1])
251 | enc_feat = torch.cat((local_feat, ref_feat), dim=1)
252 |
253 | # content hallucination through stacking multiple temporal focal transformer blocks
254 | trans_feat = self.ss(enc_feat.view(-1, c, h, w), b)
255 | trans_feat = self.transformer(trans_feat)
256 | trans_feat = self.sc(trans_feat, t)
257 | trans_feat = trans_feat.view(b, t, -1, h, w)
258 | enc_feat = enc_feat + trans_feat
259 |
260 | # decode frames from features
261 | output = self.decoder(enc_feat.view(b * t, c, h, w))
262 | output = torch.tanh(output)
263 | return output, pred_flows
264 |
265 |
266 | # ######################################################################
267 | # Discriminator for Temporal Patch GAN
268 | # ######################################################################
269 |
270 |
271 | class Discriminator(BaseNetwork):
272 | def __init__(self,
273 | in_channels=3,
274 | use_sigmoid=False,
275 | use_spectral_norm=True,
276 | init_weights=True):
277 | super(Discriminator, self).__init__()
278 | self.use_sigmoid = use_sigmoid
279 | nf = 32
280 |
281 | self.conv = nn.Sequential(
282 | spectral_norm(
283 | nn.Conv3d(in_channels=in_channels,
284 | out_channels=nf * 1,
285 | kernel_size=(3, 5, 5),
286 | stride=(1, 2, 2),
287 | padding=1,
288 | bias=not use_spectral_norm), use_spectral_norm),
289 | # nn.InstanceNorm2d(64, track_running_stats=False),
290 | nn.LeakyReLU(0.2, inplace=True),
291 | spectral_norm(
292 | nn.Conv3d(nf * 1,
293 | nf * 2,
294 | kernel_size=(3, 5, 5),
295 | stride=(1, 2, 2),
296 | padding=(1, 2, 2),
297 | bias=not use_spectral_norm), use_spectral_norm),
298 | # nn.InstanceNorm2d(128, track_running_stats=False),
299 | nn.LeakyReLU(0.2, inplace=True),
300 | spectral_norm(
301 | nn.Conv3d(nf * 2,
302 | nf * 4,
303 | kernel_size=(3, 5, 5),
304 | stride=(1, 2, 2),
305 | padding=(1, 2, 2),
306 | bias=not use_spectral_norm), use_spectral_norm),
307 | # nn.InstanceNorm2d(256, track_running_stats=False),
308 | nn.LeakyReLU(0.2, inplace=True),
309 | spectral_norm(
310 | nn.Conv3d(nf * 4,
311 | nf * 4,
312 | kernel_size=(3, 5, 5),
313 | stride=(1, 2, 2),
314 | padding=(1, 2, 2),
315 | bias=not use_spectral_norm), use_spectral_norm),
316 | # nn.InstanceNorm2d(256, track_running_stats=False),
317 | nn.LeakyReLU(0.2, inplace=True),
318 | spectral_norm(
319 | nn.Conv3d(nf * 4,
320 | nf * 4,
321 | kernel_size=(3, 5, 5),
322 | stride=(1, 2, 2),
323 | padding=(1, 2, 2),
324 | bias=not use_spectral_norm), use_spectral_norm),
325 | # nn.InstanceNorm2d(256, track_running_stats=False),
326 | nn.LeakyReLU(0.2, inplace=True),
327 | nn.Conv3d(nf * 4,
328 | nf * 4,
329 | kernel_size=(3, 5, 5),
330 | stride=(1, 2, 2),
331 | padding=(1, 2, 2)))
332 |
333 | if init_weights:
334 | self.init_weights()
335 |
336 | def forward(self, xs):
337 | # T, C, H, W = xs.shape (old)
338 | # B, T, C, H, W (new)
339 | xs_t = torch.transpose(xs, 1, 2)
340 | feat = self.conv(xs_t)
341 | if self.use_sigmoid:
342 | feat = torch.sigmoid(feat)
343 | out = torch.transpose(feat, 1, 2) # B, T, C, H, W
344 | return out
345 |
346 |
347 | def spectral_norm(module, mode=True):
348 | if mode:
349 | return _spectral_norm(module)
350 | return module
351 |
--------------------------------------------------------------------------------
/model/e2fgvi_hq.py:
--------------------------------------------------------------------------------
1 | ''' Towards An End-to-End Framework for Video Inpainting
2 | '''
3 |
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 |
8 | from model.modules.flow_comp import SPyNet
9 | from model.modules.feat_prop import BidirectionalPropagation, SecondOrderDeformableAlignment
10 | from model.modules.tfocal_transformer_hq import TemporalFocalTransformerBlock, SoftSplit, SoftComp
11 | from model.modules.spectral_norm import spectral_norm as _spectral_norm
12 |
13 |
14 | class BaseNetwork(nn.Module):
15 | def __init__(self):
16 | super(BaseNetwork, self).__init__()
17 |
18 | def print_network(self):
19 | if isinstance(self, list):
20 | self = self[0]
21 | num_params = 0
22 | for param in self.parameters():
23 | num_params += param.numel()
24 | print(
25 | 'Network [%s] was created. Total number of parameters: %.1f million. '
26 | 'To see the architecture, do print(network).' %
27 | (type(self).__name__, num_params / 1000000))
28 |
29 | def init_weights(self, init_type='normal', gain=0.02):
30 | '''
31 | initialize network's weights
32 | init_type: normal | xavier | kaiming | orthogonal
33 | https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
34 | '''
35 | def init_func(m):
36 | classname = m.__class__.__name__
37 | if classname.find('InstanceNorm2d') != -1:
38 | if hasattr(m, 'weight') and m.weight is not None:
39 | nn.init.constant_(m.weight.data, 1.0)
40 | if hasattr(m, 'bias') and m.bias is not None:
41 | nn.init.constant_(m.bias.data, 0.0)
42 | elif hasattr(m, 'weight') and (classname.find('Conv') != -1
43 | or classname.find('Linear') != -1):
44 | if init_type == 'normal':
45 | nn.init.normal_(m.weight.data, 0.0, gain)
46 | elif init_type == 'xavier':
47 | nn.init.xavier_normal_(m.weight.data, gain=gain)
48 | elif init_type == 'xavier_uniform':
49 | nn.init.xavier_uniform_(m.weight.data, gain=1.0)
50 | elif init_type == 'kaiming':
51 | nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
52 | elif init_type == 'orthogonal':
53 | nn.init.orthogonal_(m.weight.data, gain=gain)
54 | elif init_type == 'none': # uses pytorch's default init method
55 | m.reset_parameters()
56 | else:
57 | raise NotImplementedError(
58 | 'initialization method [%s] is not implemented' %
59 | init_type)
60 | if hasattr(m, 'bias') and m.bias is not None:
61 | nn.init.constant_(m.bias.data, 0.0)
62 |
63 | self.apply(init_func)
64 |
65 | # propagate to children
66 | for m in self.children():
67 | if hasattr(m, 'init_weights'):
68 | m.init_weights(init_type, gain)
69 |
70 |
71 | class Encoder(nn.Module):
72 | def __init__(self):
73 | super(Encoder, self).__init__()
74 | self.group = [1, 2, 4, 8, 1]
75 | self.layers = nn.ModuleList([
76 | nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
77 | nn.LeakyReLU(0.2, inplace=True),
78 | nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
79 | nn.LeakyReLU(0.2, inplace=True),
80 | nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
81 | nn.LeakyReLU(0.2, inplace=True),
82 | nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
83 | nn.LeakyReLU(0.2, inplace=True),
84 | nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1, groups=1),
85 | nn.LeakyReLU(0.2, inplace=True),
86 | nn.Conv2d(640, 512, kernel_size=3, stride=1, padding=1, groups=2),
87 | nn.LeakyReLU(0.2, inplace=True),
88 | nn.Conv2d(768, 384, kernel_size=3, stride=1, padding=1, groups=4),
89 | nn.LeakyReLU(0.2, inplace=True),
90 | nn.Conv2d(640, 256, kernel_size=3, stride=1, padding=1, groups=8),
91 | nn.LeakyReLU(0.2, inplace=True),
92 | nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1, groups=1),
93 | nn.LeakyReLU(0.2, inplace=True)
94 | ])
95 |
96 | def forward(self, x):
97 | bt, c, _, _ = x.size()
98 | # h, w = h//4, w//4
99 | out = x
100 | for i, layer in enumerate(self.layers):
101 | if i == 8:
102 | x0 = out
103 | _, _, h, w = x0.size()
104 | if i > 8 and i % 2 == 0:
105 | g = self.group[(i - 8) // 2]
106 | x = x0.view(bt, g, -1, h, w)
107 | o = out.view(bt, g, -1, h, w)
108 | out = torch.cat([x, o], 2).view(bt, -1, h, w)
109 | out = layer(out)
110 | return out
111 |
112 |
113 | class deconv(nn.Module):
114 | def __init__(self,
115 | input_channel,
116 | output_channel,
117 | kernel_size=3,
118 | padding=0):
119 | super().__init__()
120 | self.conv = nn.Conv2d(input_channel,
121 | output_channel,
122 | kernel_size=kernel_size,
123 | stride=1,
124 | padding=padding)
125 |
126 | def forward(self, x):
127 | x = F.interpolate(x,
128 | scale_factor=2,
129 | mode='bilinear',
130 | align_corners=True)
131 | return self.conv(x)
132 |
133 |
134 | class InpaintGenerator(BaseNetwork):
135 | def __init__(self, init_weights=True):
136 | super(InpaintGenerator, self).__init__()
137 | channel = 256
138 | hidden = 512
139 |
140 | # encoder
141 | self.encoder = Encoder()
142 |
143 | # decoder
144 | self.decoder = nn.Sequential(
145 | deconv(channel // 2, 128, kernel_size=3, padding=1),
146 | nn.LeakyReLU(0.2, inplace=True),
147 | nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
148 | nn.LeakyReLU(0.2, inplace=True),
149 | deconv(64, 64, kernel_size=3, padding=1),
150 | nn.LeakyReLU(0.2, inplace=True),
151 | nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1))
152 |
153 | # feature propagation module
154 | self.feat_prop_module = BidirectionalPropagation(channel // 2)
155 |
156 | # soft split and soft composition
157 | kernel_size = (7, 7)
158 | padding = (3, 3)
159 | stride = (3, 3)
160 | output_size = (60, 108)
161 | t2t_params = {
162 | 'kernel_size': kernel_size,
163 | 'stride': stride,
164 | 'padding': padding
165 | }
166 | self.ss = SoftSplit(channel // 2,
167 | hidden,
168 | kernel_size,
169 | stride,
170 | padding,
171 | t2t_param=t2t_params)
172 | self.sc = SoftComp(channel // 2, hidden, kernel_size, stride, padding)
173 |
174 | n_vecs = 1
175 | for i, d in enumerate(kernel_size):
176 | n_vecs *= int((output_size[i] + 2 * padding[i] -
177 | (d - 1) - 1) / stride[i] + 1)
178 |
179 | blocks = []
180 | depths = 8
181 | num_heads = [4] * depths
182 | window_size = [(5, 9)] * depths
183 | focal_windows = [(5, 9)] * depths
184 | focal_levels = [2] * depths
185 | pool_method = "fc"
186 |
187 | for i in range(depths):
188 | blocks.append(
189 | TemporalFocalTransformerBlock(dim=hidden,
190 | num_heads=num_heads[i],
191 | window_size=window_size[i],
192 | focal_level=focal_levels[i],
193 | focal_window=focal_windows[i],
194 | n_vecs=n_vecs,
195 | t2t_params=t2t_params,
196 | pool_method=pool_method))
197 | self.transformer = nn.Sequential(*blocks)
198 |
199 | if init_weights:
200 | self.init_weights()
201 | # Need to initial the weights of MSDeformAttn specifically
202 | for m in self.modules():
203 | if isinstance(m, SecondOrderDeformableAlignment):
204 | m.init_offset()
205 |
206 | # flow completion network
207 | self.update_spynet = SPyNet()
208 |
209 | def forward_bidirect_flow(self, masked_local_frames):
210 | b, l_t, c, h, w = masked_local_frames.size()
211 |
212 | # compute forward and backward flows of masked frames
213 | masked_local_frames = F.interpolate(masked_local_frames.view(
214 | -1, c, h, w),
215 | scale_factor=1 / 4,
216 | mode='bilinear',
217 | align_corners=True,
218 | recompute_scale_factor=True)
219 | masked_local_frames = masked_local_frames.view(b, l_t, c, h // 4,
220 | w // 4)
221 | mlf_1 = masked_local_frames[:, :-1, :, :, :].reshape(
222 | -1, c, h // 4, w // 4)
223 | mlf_2 = masked_local_frames[:, 1:, :, :, :].reshape(
224 | -1, c, h // 4, w // 4)
225 | pred_flows_forward = self.update_spynet(mlf_1, mlf_2)
226 | pred_flows_backward = self.update_spynet(mlf_2, mlf_1)
227 |
228 | pred_flows_forward = pred_flows_forward.view(b, l_t - 1, 2, h // 4,
229 | w // 4)
230 | pred_flows_backward = pred_flows_backward.view(b, l_t - 1, 2, h // 4,
231 | w // 4)
232 |
233 | return pred_flows_forward, pred_flows_backward
234 |
235 | def forward(self, masked_frames, num_local_frames):
236 | l_t = num_local_frames
237 | b, t, ori_c, ori_h, ori_w = masked_frames.size()
238 |
239 | # normalization before feeding into the flow completion module
240 | masked_local_frames = (masked_frames[:, :l_t, ...] + 1) / 2
241 | pred_flows = self.forward_bidirect_flow(masked_local_frames)
242 |
243 | # extracting features and performing the feature propagation on local features
244 | enc_feat = self.encoder(masked_frames.view(b * t, ori_c, ori_h, ori_w))
245 | _, c, h, w = enc_feat.size()
246 | fold_output_size = (h, w)
247 | local_feat = enc_feat.view(b, t, c, h, w)[:, :l_t, ...]
248 | ref_feat = enc_feat.view(b, t, c, h, w)[:, l_t:, ...]
249 | local_feat = self.feat_prop_module(local_feat, pred_flows[0],
250 | pred_flows[1])
251 | enc_feat = torch.cat((local_feat, ref_feat), dim=1)
252 |
253 | # content hallucination through stacking multiple temporal focal transformer blocks
254 | trans_feat = self.ss(enc_feat.view(-1, c, h, w), b, fold_output_size)
255 | trans_feat = self.transformer([trans_feat, fold_output_size])
256 | trans_feat = self.sc(trans_feat[0], t, fold_output_size)
257 | trans_feat = trans_feat.view(b, t, -1, h, w)
258 | enc_feat = enc_feat + trans_feat
259 |
260 | # decode frames from features
261 | output = self.decoder(enc_feat.view(b * t, c, h, w))
262 | output = torch.tanh(output)
263 | return output, pred_flows
264 |
265 |
266 | # ######################################################################
267 | # Discriminator for Temporal Patch GAN
268 | # ######################################################################
269 |
270 |
271 | class Discriminator(BaseNetwork):
272 | def __init__(self,
273 | in_channels=3,
274 | use_sigmoid=False,
275 | use_spectral_norm=True,
276 | init_weights=True):
277 | super(Discriminator, self).__init__()
278 | self.use_sigmoid = use_sigmoid
279 | nf = 32
280 |
281 | self.conv = nn.Sequential(
282 | spectral_norm(
283 | nn.Conv3d(in_channels=in_channels,
284 | out_channels=nf * 1,
285 | kernel_size=(3, 5, 5),
286 | stride=(1, 2, 2),
287 | padding=1,
288 | bias=not use_spectral_norm), use_spectral_norm),
289 | # nn.InstanceNorm2d(64, track_running_stats=False),
290 | nn.LeakyReLU(0.2, inplace=True),
291 | spectral_norm(
292 | nn.Conv3d(nf * 1,
293 | nf * 2,
294 | kernel_size=(3, 5, 5),
295 | stride=(1, 2, 2),
296 | padding=(1, 2, 2),
297 | bias=not use_spectral_norm), use_spectral_norm),
298 | # nn.InstanceNorm2d(128, track_running_stats=False),
299 | nn.LeakyReLU(0.2, inplace=True),
300 | spectral_norm(
301 | nn.Conv3d(nf * 2,
302 | nf * 4,
303 | kernel_size=(3, 5, 5),
304 | stride=(1, 2, 2),
305 | padding=(1, 2, 2),
306 | bias=not use_spectral_norm), use_spectral_norm),
307 | # nn.InstanceNorm2d(256, track_running_stats=False),
308 | nn.LeakyReLU(0.2, inplace=True),
309 | spectral_norm(
310 | nn.Conv3d(nf * 4,
311 | nf * 4,
312 | kernel_size=(3, 5, 5),
313 | stride=(1, 2, 2),
314 | padding=(1, 2, 2),
315 | bias=not use_spectral_norm), use_spectral_norm),
316 | # nn.InstanceNorm2d(256, track_running_stats=False),
317 | nn.LeakyReLU(0.2, inplace=True),
318 | spectral_norm(
319 | nn.Conv3d(nf * 4,
320 | nf * 4,
321 | kernel_size=(3, 5, 5),
322 | stride=(1, 2, 2),
323 | padding=(1, 2, 2),
324 | bias=not use_spectral_norm), use_spectral_norm),
325 | # nn.InstanceNorm2d(256, track_running_stats=False),
326 | nn.LeakyReLU(0.2, inplace=True),
327 | nn.Conv3d(nf * 4,
328 | nf * 4,
329 | kernel_size=(3, 5, 5),
330 | stride=(1, 2, 2),
331 | padding=(1, 2, 2)))
332 |
333 | if init_weights:
334 | self.init_weights()
335 |
336 | def forward(self, xs):
337 | # T, C, H, W = xs.shape (old)
338 | # B, T, C, H, W (new)
339 | xs_t = torch.transpose(xs, 1, 2)
340 | feat = self.conv(xs_t)
341 | if self.use_sigmoid:
342 | feat = torch.sigmoid(feat)
343 | out = torch.transpose(feat, 1, 2) # B, T, C, H, W
344 | return out
345 |
346 |
347 | def spectral_norm(module, mode=True):
348 | if mode:
349 | return _spectral_norm(module)
350 | return module
351 |
--------------------------------------------------------------------------------
/model/modules/feat_prop.py:
--------------------------------------------------------------------------------
1 | """
2 | BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation and Alignment, CVPR 2022
3 | """
4 | import torch
5 | import torch.nn as nn
6 |
7 | from mmcv.ops import ModulatedDeformConv2d, modulated_deform_conv2d
8 | from mmcv.cnn import constant_init
9 |
10 | from model.modules.flow_comp import flow_warp
11 |
12 |
13 | class SecondOrderDeformableAlignment(ModulatedDeformConv2d):
14 | """Second-order deformable alignment module."""
15 | def __init__(self, *args, **kwargs):
16 | self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
17 |
18 | super(SecondOrderDeformableAlignment, self).__init__(*args, **kwargs)
19 |
20 | self.conv_offset = nn.Sequential(
21 | nn.Conv2d(3 * self.out_channels + 4, self.out_channels, 3, 1, 1),
22 | nn.LeakyReLU(negative_slope=0.1, inplace=True),
23 | nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
24 | nn.LeakyReLU(negative_slope=0.1, inplace=True),
25 | nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
26 | nn.LeakyReLU(negative_slope=0.1, inplace=True),
27 | nn.Conv2d(self.out_channels, 27 * self.deform_groups, 3, 1, 1),
28 | )
29 |
30 | self.init_offset()
31 |
32 | def init_offset(self):
33 | constant_init(self.conv_offset[-1], val=0, bias=0)
34 |
35 | def forward(self, x, extra_feat, flow_1, flow_2):
36 | extra_feat = torch.cat([extra_feat, flow_1, flow_2], dim=1)
37 | out = self.conv_offset(extra_feat)
38 | o1, o2, mask = torch.chunk(out, 3, dim=1)
39 |
40 | # offset
41 | offset = self.max_residue_magnitude * torch.tanh(
42 | torch.cat((o1, o2), dim=1))
43 | offset_1, offset_2 = torch.chunk(offset, 2, dim=1)
44 | offset_1 = offset_1 + flow_1.flip(1).repeat(1,
45 | offset_1.size(1) // 2, 1,
46 | 1)
47 | offset_2 = offset_2 + flow_2.flip(1).repeat(1,
48 | offset_2.size(1) // 2, 1,
49 | 1)
50 | offset = torch.cat([offset_1, offset_2], dim=1)
51 |
52 | # mask
53 | mask = torch.sigmoid(mask)
54 |
55 | return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
56 | self.stride, self.padding,
57 | self.dilation, self.groups,
58 | self.deform_groups)
59 |
60 |
61 | class BidirectionalPropagation(nn.Module):
62 | def __init__(self, channel):
63 | super(BidirectionalPropagation, self).__init__()
64 | modules = ['backward_', 'forward_']
65 | self.deform_align = nn.ModuleDict()
66 | self.backbone = nn.ModuleDict()
67 | self.channel = channel
68 |
69 | for i, module in enumerate(modules):
70 | self.deform_align[module] = SecondOrderDeformableAlignment(
71 | 2 * channel, channel, 3, padding=1, deform_groups=16)
72 |
73 | self.backbone[module] = nn.Sequential(
74 | nn.Conv2d((2 + i) * channel, channel, 3, 1, 1),
75 | nn.LeakyReLU(negative_slope=0.1, inplace=True),
76 | nn.Conv2d(channel, channel, 3, 1, 1),
77 | )
78 |
79 | self.fusion = nn.Conv2d(2 * channel, channel, 1, 1, 0)
80 |
81 | def forward(self, x, flows_backward, flows_forward):
82 | """
83 | x shape : [b, t, c, h, w]
84 | return [b, t, c, h, w]
85 | """
86 | b, t, c, h, w = x.shape
87 | feats = {}
88 | feats['spatial'] = [x[:, i, :, :, :] for i in range(0, t)]
89 |
90 | for module_name in ['backward_', 'forward_']:
91 |
92 | feats[module_name] = []
93 |
94 | frame_idx = range(0, t)
95 | flow_idx = range(-1, t - 1)
96 | mapping_idx = list(range(0, len(feats['spatial'])))
97 | mapping_idx += mapping_idx[::-1]
98 |
99 | if 'backward' in module_name:
100 | frame_idx = frame_idx[::-1]
101 | flows = flows_backward
102 | else:
103 | flows = flows_forward
104 |
105 | feat_prop = x.new_zeros(b, self.channel, h, w)
106 | for i, idx in enumerate(frame_idx):
107 | feat_current = feats['spatial'][mapping_idx[idx]]
108 |
109 | if i > 0:
110 | flow_n1 = flows[:, flow_idx[i], :, :, :]
111 | cond_n1 = flow_warp(feat_prop, flow_n1.permute(0, 2, 3, 1))
112 |
113 | # initialize second-order features
114 | feat_n2 = torch.zeros_like(feat_prop)
115 | flow_n2 = torch.zeros_like(flow_n1)
116 | cond_n2 = torch.zeros_like(cond_n1)
117 | if i > 1:
118 | feat_n2 = feats[module_name][-2]
119 | flow_n2 = flows[:, flow_idx[i - 1], :, :, :]
120 | flow_n2 = flow_n1 + flow_warp(
121 | flow_n2, flow_n1.permute(0, 2, 3, 1))
122 | cond_n2 = flow_warp(feat_n2,
123 | flow_n2.permute(0, 2, 3, 1))
124 |
125 | cond = torch.cat([cond_n1, feat_current, cond_n2], dim=1)
126 | feat_prop = torch.cat([feat_prop, feat_n2], dim=1)
127 | feat_prop = self.deform_align[module_name](feat_prop, cond,
128 | flow_n1,
129 | flow_n2)
130 |
131 | feat = [feat_current] + [
132 | feats[k][idx]
133 | for k in feats if k not in ['spatial', module_name]
134 | ] + [feat_prop]
135 |
136 | feat = torch.cat(feat, dim=1)
137 | feat_prop = feat_prop + self.backbone[module_name](feat)
138 | feats[module_name].append(feat_prop)
139 |
140 | if 'backward' in module_name:
141 | feats[module_name] = feats[module_name][::-1]
142 |
143 | outputs = []
144 | for i in range(0, t):
145 | align_feats = [feats[k].pop(0) for k in feats if k != 'spatial']
146 | align_feats = torch.cat(align_feats, dim=1)
147 | outputs.append(self.fusion(align_feats))
148 |
149 | return torch.stack(outputs, dim=1) + x
150 |
--------------------------------------------------------------------------------
/model/modules/spectral_norm.py:
--------------------------------------------------------------------------------
1 | """
2 | Spectral Normalization from https://arxiv.org/abs/1802.05957
3 | """
4 | import torch
5 | from torch.nn.functional import normalize
6 |
7 |
8 | class SpectralNorm(object):
9 | # Invariant before and after each forward call:
10 | # u = normalize(W @ v)
11 | # NB: At initialization, this invariant is not enforced
12 |
13 | _version = 1
14 |
15 | # At version 1:
16 | # made `W` not a buffer,
17 | # added `v` as a buffer, and
18 | # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.
19 |
20 | def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
21 | self.name = name
22 | self.dim = dim
23 | if n_power_iterations <= 0:
24 | raise ValueError(
25 | 'Expected n_power_iterations to be positive, but '
26 | 'got n_power_iterations={}'.format(n_power_iterations))
27 | self.n_power_iterations = n_power_iterations
28 | self.eps = eps
29 |
30 | def reshape_weight_to_matrix(self, weight):
31 | weight_mat = weight
32 | if self.dim != 0:
33 | # permute dim to front
34 | weight_mat = weight_mat.permute(
35 | self.dim,
36 | *[d for d in range(weight_mat.dim()) if d != self.dim])
37 | height = weight_mat.size(0)
38 | return weight_mat.reshape(height, -1)
39 |
40 | def compute_weight(self, module, do_power_iteration):
41 | # NB: If `do_power_iteration` is set, the `u` and `v` vectors are
42 | # updated in power iteration **in-place**. This is very important
43 | # because in `DataParallel` forward, the vectors (being buffers) are
44 | # broadcast from the parallelized module to each module replica,
45 | # which is a new module object created on the fly. And each replica
46 | # runs its own spectral norm power iteration. So simply assigning
47 | # the updated vectors to the module this function runs on will cause
48 | # the update to be lost forever. And the next time the parallelized
49 | # module is replicated, the same randomly initialized vectors are
50 | # broadcast and used!
51 | #
52 | # Therefore, to make the change propagate back, we rely on two
53 | # important behaviors (also enforced via tests):
54 | # 1. `DataParallel` doesn't clone storage if the broadcast tensor
55 | # is already on correct device; and it makes sure that the
56 | # parallelized module is already on `device[0]`.
57 | # 2. If the out tensor in `out=` kwarg has correct shape, it will
58 | # just fill in the values.
59 | # Therefore, since the same power iteration is performed on all
60 | # devices, simply updating the tensors in-place will make sure that
61 | # the module replica on `device[0]` will update the _u vector on the
62 | # parallized module (by shared storage).
63 | #
64 | # However, after we update `u` and `v` in-place, we need to **clone**
65 | # them before using them to normalize the weight. This is to support
66 | # backproping through two forward passes, e.g., the common pattern in
67 | # GAN training: loss = D(real) - D(fake). Otherwise, engine will
68 | # complain that variables needed to do backward for the first forward
69 | # (i.e., the `u` and `v` vectors) are changed in the second forward.
70 | weight = getattr(module, self.name + '_orig')
71 | u = getattr(module, self.name + '_u')
72 | v = getattr(module, self.name + '_v')
73 | weight_mat = self.reshape_weight_to_matrix(weight)
74 |
75 | if do_power_iteration:
76 | with torch.no_grad():
77 | for _ in range(self.n_power_iterations):
78 | # Spectral norm of weight equals to `u^T W v`, where `u` and `v`
79 | # are the first left and right singular vectors.
80 | # This power iteration produces approximations of `u` and `v`.
81 | v = normalize(torch.mv(weight_mat.t(), u),
82 | dim=0,
83 | eps=self.eps,
84 | out=v)
85 | u = normalize(torch.mv(weight_mat, v),
86 | dim=0,
87 | eps=self.eps,
88 | out=u)
89 | if self.n_power_iterations > 0:
90 | # See above on why we need to clone
91 | u = u.clone()
92 | v = v.clone()
93 |
94 | sigma = torch.dot(u, torch.mv(weight_mat, v))
95 | weight = weight / sigma
96 | return weight
97 |
98 | def remove(self, module):
99 | with torch.no_grad():
100 | weight = self.compute_weight(module, do_power_iteration=False)
101 | delattr(module, self.name)
102 | delattr(module, self.name + '_u')
103 | delattr(module, self.name + '_v')
104 | delattr(module, self.name + '_orig')
105 | module.register_parameter(self.name,
106 | torch.nn.Parameter(weight.detach()))
107 |
108 | def __call__(self, module, inputs):
109 | setattr(
110 | module, self.name,
111 | self.compute_weight(module, do_power_iteration=module.training))
112 |
113 | def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
114 | # Tries to returns a vector `v` s.t. `u = normalize(W @ v)`
115 | # (the invariant at top of this class) and `u @ W @ v = sigma`.
116 | # This uses pinverse in case W^T W is not invertible.
117 | v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(),
118 | weight_mat.t(), u.unsqueeze(1)).squeeze(1)
119 | return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
120 |
121 | @staticmethod
122 | def apply(module, name, n_power_iterations, dim, eps):
123 | for k, hook in module._forward_pre_hooks.items():
124 | if isinstance(hook, SpectralNorm) and hook.name == name:
125 | raise RuntimeError(
126 | "Cannot register two spectral_norm hooks on "
127 | "the same parameter {}".format(name))
128 |
129 | fn = SpectralNorm(name, n_power_iterations, dim, eps)
130 | weight = module._parameters[name]
131 |
132 | with torch.no_grad():
133 | weight_mat = fn.reshape_weight_to_matrix(weight)
134 |
135 | h, w = weight_mat.size()
136 | # randomly initialize `u` and `v`
137 | u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
138 | v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
139 |
140 | delattr(module, fn.name)
141 | module.register_parameter(fn.name + "_orig", weight)
142 | # We still need to assign weight back as fn.name because all sorts of
143 | # things may assume that it exists, e.g., when initializing weights.
144 | # However, we can't directly assign as it could be an nn.Parameter and
145 | # gets added as a parameter. Instead, we register weight.data as a plain
146 | # attribute.
147 | setattr(module, fn.name, weight.data)
148 | module.register_buffer(fn.name + "_u", u)
149 | module.register_buffer(fn.name + "_v", v)
150 |
151 | module.register_forward_pre_hook(fn)
152 |
153 | module._register_state_dict_hook(SpectralNormStateDictHook(fn))
154 | module._register_load_state_dict_pre_hook(
155 | SpectralNormLoadStateDictPreHook(fn))
156 | return fn
157 |
158 |
159 | # This is a top level class because Py2 pickle doesn't like inner class nor an
160 | # instancemethod.
161 | class SpectralNormLoadStateDictPreHook(object):
162 | # See docstring of SpectralNorm._version on the changes to spectral_norm.
163 | def __init__(self, fn):
164 | self.fn = fn
165 |
166 | # For state_dict with version None, (assuming that it has gone through at
167 | # least one training forward), we have
168 | #
169 | # u = normalize(W_orig @ v)
170 | # W = W_orig / sigma, where sigma = u @ W_orig @ v
171 | #
172 | # To compute `v`, we solve `W_orig @ x = u`, and let
173 | # v = x / (u @ W_orig @ x) * (W / W_orig).
174 | def __call__(self, state_dict, prefix, local_metadata, strict,
175 | missing_keys, unexpected_keys, error_msgs):
176 | fn = self.fn
177 | version = local_metadata.get('spectral_norm',
178 | {}).get(fn.name + '.version', None)
179 | if version is None or version < 1:
180 | with torch.no_grad():
181 | weight_orig = state_dict[prefix + fn.name + '_orig']
182 | # weight = state_dict.pop(prefix + fn.name)
183 | # sigma = (weight_orig / weight).mean()
184 | weight_mat = fn.reshape_weight_to_matrix(weight_orig)
185 | u = state_dict[prefix + fn.name + '_u']
186 | # v = fn._solve_v_and_rescale(weight_mat, u, sigma)
187 | # state_dict[prefix + fn.name + '_v'] = v
188 |
189 |
190 | # This is a top level class because Py2 pickle doesn't like inner class nor an
191 | # instancemethod.
192 | class SpectralNormStateDictHook(object):
193 | # See docstring of SpectralNorm._version on the changes to spectral_norm.
194 | def __init__(self, fn):
195 | self.fn = fn
196 |
197 | def __call__(self, module, state_dict, prefix, local_metadata):
198 | if 'spectral_norm' not in local_metadata:
199 | local_metadata['spectral_norm'] = {}
200 | key = self.fn.name + '.version'
201 | if key in local_metadata['spectral_norm']:
202 | raise RuntimeError(
203 | "Unexpected key in metadata['spectral_norm']: {}".format(key))
204 | local_metadata['spectral_norm'][key] = self.fn._version
205 |
206 |
207 | def spectral_norm(module,
208 | name='weight',
209 | n_power_iterations=1,
210 | eps=1e-12,
211 | dim=None):
212 | r"""Applies spectral normalization to a parameter in the given module.
213 |
214 | .. math::
215 | \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
216 | \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
217 |
218 | Spectral normalization stabilizes the training of discriminators (critics)
219 | in Generative Adversarial Networks (GANs) by rescaling the weight tensor
220 | with spectral norm :math:`\sigma` of the weight matrix calculated using
221 | power iteration method. If the dimension of the weight tensor is greater
222 | than 2, it is reshaped to 2D in power iteration method to get spectral
223 | norm. This is implemented via a hook that calculates spectral norm and
224 | rescales weight before every :meth:`~Module.forward` call.
225 |
226 | See `Spectral Normalization for Generative Adversarial Networks`_ .
227 |
228 | .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
229 |
230 | Args:
231 | module (nn.Module): containing module
232 | name (str, optional): name of weight parameter
233 | n_power_iterations (int, optional): number of power iterations to
234 | calculate spectral norm
235 | eps (float, optional): epsilon for numerical stability in
236 | calculating norms
237 | dim (int, optional): dimension corresponding to number of outputs,
238 | the default is ``0``, except for modules that are instances of
239 | ConvTranspose{1,2,3}d, when it is ``1``
240 |
241 | Returns:
242 | The original module with the spectral norm hook
243 |
244 | Example::
245 |
246 | >>> m = spectral_norm(nn.Linear(20, 40))
247 | >>> m
248 | Linear(in_features=20, out_features=40, bias=True)
249 | >>> m.weight_u.size()
250 | torch.Size([40])
251 |
252 | """
253 | if dim is None:
254 | if isinstance(module,
255 | (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d,
256 | torch.nn.ConvTranspose3d)):
257 | dim = 1
258 | else:
259 | dim = 0
260 | SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
261 | return module
262 |
263 |
264 | def remove_spectral_norm(module, name='weight'):
265 | r"""Removes the spectral normalization reparameterization from a module.
266 |
267 | Args:
268 | module (Module): containing module
269 | name (str, optional): name of weight parameter
270 |
271 | Example:
272 | >>> m = spectral_norm(nn.Linear(40, 10))
273 | >>> remove_spectral_norm(m)
274 | """
275 | for k, hook in module._forward_pre_hooks.items():
276 | if isinstance(hook, SpectralNorm) and hook.name == name:
277 | hook.remove(module)
278 | del module._forward_pre_hooks[k]
279 | return module
280 |
281 | raise ValueError("spectral_norm of '{}' not found in {}".format(
282 | name, module))
283 |
284 |
285 | def use_spectral_norm(module, use_sn=False):
286 | if use_sn:
287 | return spectral_norm(module)
288 | return module
--------------------------------------------------------------------------------
/release_model/README.md:
--------------------------------------------------------------------------------
1 | Place the downloaded model here.
2 |
3 | :link: **Download Links:** [[Google Drive](https://drive.google.com/file/d/1tNJMTJ2gmWdIXJoHVi5-H504uImUiJW9/view?usp=sharing)] [[Baidu Disk](https://pan.baidu.com/s/1qXAErbilY_n_Fh9KB8UF7w?pwd=lsjw)]
4 |
5 | The directory structure will be arranged as:
6 | ```
7 | release_model
8 | |- E2FGVI-CVPR22.pth
9 | |- i3d_rgb_imagenet.pt (for evaluating VFID metric)
10 | |- README.md
11 | ```
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import cv2
3 | from PIL import Image
4 | import numpy as np
5 | import importlib
6 | import os
7 | import argparse
8 | from tqdm import tqdm
9 | import matplotlib.pyplot as plt
10 | from matplotlib import animation
11 | import torch
12 |
13 | from core.utils import to_tensors
14 |
15 | parser = argparse.ArgumentParser(description="E2FGVI")
16 | parser.add_argument("-v", "--video", type=str, required=True)
17 | parser.add_argument("-c", "--ckpt", type=str, required=True)
18 | parser.add_argument("-m", "--mask", type=str, required=True)
19 | parser.add_argument("--model", type=str, choices=['e2fgvi', 'e2fgvi_hq'])
20 | parser.add_argument("--step", type=int, default=10)
21 | parser.add_argument("--num_ref", type=int, default=-1)
22 | parser.add_argument("--neighbor_stride", type=int, default=5)
23 | parser.add_argument("--savefps", type=int, default=24)
24 |
25 | # args for e2fgvi_hq (which can handle videos with arbitrary resolution)
26 | parser.add_argument("--set_size", action='store_true', default=False)
27 | parser.add_argument("--width", type=int)
28 | parser.add_argument("--height", type=int)
29 |
30 | args = parser.parse_args()
31 |
32 | ref_length = args.step # ref_step
33 | num_ref = args.num_ref
34 | neighbor_stride = args.neighbor_stride
35 | default_fps = args.savefps
36 |
37 |
38 | # sample reference frames from the whole video
39 | def get_ref_index(f, neighbor_ids, length):
40 | ref_index = []
41 | if num_ref == -1:
42 | for i in range(0, length, ref_length):
43 | if i not in neighbor_ids:
44 | ref_index.append(i)
45 | else:
46 | start_idx = max(0, f - ref_length * (num_ref // 2))
47 | end_idx = min(length, f + ref_length * (num_ref // 2))
48 | for i in range(start_idx, end_idx + 1, ref_length):
49 | if i not in neighbor_ids:
50 | if len(ref_index) > num_ref:
51 | break
52 | ref_index.append(i)
53 | return ref_index
54 |
55 |
56 | # read frame-wise masks
57 | def read_mask(mpath, size):
58 | masks = []
59 | mnames = os.listdir(mpath)
60 | mnames.sort()
61 | for mp in mnames:
62 | m = Image.open(os.path.join(mpath, mp))
63 | m = m.resize(size, Image.NEAREST)
64 | m = np.array(m.convert('L'))
65 | m = np.array(m > 0).astype(np.uint8)
66 | m = cv2.dilate(m,
67 | cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)),
68 | iterations=4)
69 | masks.append(Image.fromarray(m * 255))
70 | return masks
71 |
72 |
73 | # read frames from video
74 | def read_frame_from_videos(args):
75 | vname = args.video
76 | frames = []
77 | if args.use_mp4:
78 | vidcap = cv2.VideoCapture(vname)
79 | success, image = vidcap.read()
80 | count = 0
81 | while success:
82 | image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
83 | frames.append(image)
84 | success, image = vidcap.read()
85 | count += 1
86 | else:
87 | lst = os.listdir(vname)
88 | lst.sort()
89 | fr_lst = [vname + '/' + name for name in lst]
90 | for fr in fr_lst:
91 | image = cv2.imread(fr)
92 | image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
93 | frames.append(image)
94 | return frames
95 |
96 |
97 | # resize frames
98 | def resize_frames(frames, size=None):
99 | if size is not None:
100 | frames = [f.resize(size) for f in frames]
101 | else:
102 | size = frames[0].size
103 | return frames, size
104 |
105 |
106 | def main_worker():
107 | # set up models
108 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
109 |
110 | if args.model == "e2fgvi":
111 | size = (432, 240)
112 | elif args.set_size:
113 | size = (args.width, args.height)
114 | else:
115 | size = None
116 |
117 | net = importlib.import_module('model.' + args.model)
118 | model = net.InpaintGenerator().to(device)
119 | data = torch.load(args.ckpt, map_location=device)
120 | model.load_state_dict(data)
121 | print(f'Loading model from: {args.ckpt}')
122 | model.eval()
123 |
124 | # prepare datset
125 | args.use_mp4 = True if args.video.endswith('.mp4') else False
126 | print(
127 | f'Loading videos and masks from: {args.video} | INPUT MP4 format: {args.use_mp4}'
128 | )
129 | frames = read_frame_from_videos(args)
130 | frames, size = resize_frames(frames, size)
131 | h, w = size[1], size[0]
132 | video_length = len(frames)
133 | imgs = to_tensors()(frames).unsqueeze(0) * 2 - 1
134 | frames = [np.array(f).astype(np.uint8) for f in frames]
135 |
136 | masks = read_mask(args.mask, size)
137 | binary_masks = [
138 | np.expand_dims((np.array(m) != 0).astype(np.uint8), 2) for m in masks
139 | ]
140 | masks = to_tensors()(masks).unsqueeze(0)
141 | imgs, masks = imgs.to(device), masks.to(device)
142 | comp_frames = [None] * video_length
143 |
144 | # completing holes by e2fgvi
145 | print(f'Start test...')
146 | for f in tqdm(range(0, video_length, neighbor_stride)):
147 | neighbor_ids = [
148 | i for i in range(max(0, f - neighbor_stride),
149 | min(video_length, f + neighbor_stride + 1))
150 | ]
151 | ref_ids = get_ref_index(f, neighbor_ids, video_length)
152 | selected_imgs = imgs[:1, neighbor_ids + ref_ids, :, :, :]
153 | selected_masks = masks[:1, neighbor_ids + ref_ids, :, :, :]
154 | with torch.no_grad():
155 | masked_imgs = selected_imgs * (1 - selected_masks)
156 | mod_size_h = 60
157 | mod_size_w = 108
158 | h_pad = (mod_size_h - h % mod_size_h) % mod_size_h
159 | w_pad = (mod_size_w - w % mod_size_w) % mod_size_w
160 | masked_imgs = torch.cat(
161 | [masked_imgs, torch.flip(masked_imgs, [3])],
162 | 3)[:, :, :, :h + h_pad, :]
163 | masked_imgs = torch.cat(
164 | [masked_imgs, torch.flip(masked_imgs, [4])],
165 | 4)[:, :, :, :, :w + w_pad]
166 | pred_imgs, _ = model(masked_imgs, len(neighbor_ids))
167 | pred_imgs = pred_imgs[:, :, :h, :w]
168 | pred_imgs = (pred_imgs + 1) / 2
169 | pred_imgs = pred_imgs.cpu().permute(0, 2, 3, 1).numpy() * 255
170 | for i in range(len(neighbor_ids)):
171 | idx = neighbor_ids[i]
172 | img = np.array(pred_imgs[i]).astype(
173 | np.uint8) * binary_masks[idx] + frames[idx] * (
174 | 1 - binary_masks[idx])
175 | if comp_frames[idx] is None:
176 | comp_frames[idx] = img
177 | else:
178 | comp_frames[idx] = comp_frames[idx].astype(
179 | np.float32) * 0.5 + img.astype(np.float32) * 0.5
180 |
181 | # saving videos
182 | print('Saving videos...')
183 | save_dir_name = 'results'
184 | ext_name = '_results.mp4'
185 | save_base_name = args.video.split('/')[-1]
186 | save_name = save_base_name.replace(
187 | '.mp4', ext_name) if args.use_mp4 else save_base_name + ext_name
188 | if not os.path.exists(save_dir_name):
189 | os.makedirs(save_dir_name)
190 | save_path = os.path.join(save_dir_name, save_name)
191 | writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"),
192 | default_fps, size)
193 | for f in range(video_length):
194 | comp = comp_frames[f].astype(np.uint8)
195 | writer.write(cv2.cvtColor(comp, cv2.COLOR_BGR2RGB))
196 | writer.release()
197 | print(f'Finish test! The result video is saved in: {save_path}.')
198 |
199 | # show results
200 | print('Let us enjoy the result!')
201 | fig = plt.figure('Let us enjoy the result')
202 | ax1 = fig.add_subplot(1, 2, 1)
203 | ax1.axis('off')
204 | ax1.set_title('Original Video')
205 | ax2 = fig.add_subplot(1, 2, 2)
206 | ax2.axis('off')
207 | ax2.set_title('Our Result')
208 | imdata1 = ax1.imshow(frames[0])
209 | imdata2 = ax2.imshow(comp_frames[0].astype(np.uint8))
210 |
211 | def update(idx):
212 | imdata1.set_data(frames[idx])
213 | imdata2.set_data(comp_frames[idx].astype(np.uint8))
214 |
215 | fig.tight_layout()
216 | anim = animation.FuncAnimation(fig,
217 | update,
218 | frames=len(frames),
219 | interval=50)
220 | plt.show()
221 |
222 |
223 | if __name__ == '__main__':
224 | main_worker()
225 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 | from shutil import copyfile
5 |
6 | import torch
7 | import torch.multiprocessing as mp
8 |
9 | from core.trainer import Trainer
10 | from core.dist import (
11 | get_world_size,
12 | get_local_rank,
13 | get_global_rank,
14 | get_master_ip,
15 | )
16 |
17 | parser = argparse.ArgumentParser(description='E2FGVI')
18 | parser.add_argument('-c',
19 | '--config',
20 | default='configs/train_e2fgvi.json',
21 | type=str)
22 | parser.add_argument('-p', '--port', default='23455', type=str)
23 | args = parser.parse_args()
24 |
25 |
26 | def main_worker(rank, config):
27 | if 'local_rank' not in config:
28 | config['local_rank'] = config['global_rank'] = rank
29 | if config['distributed']:
30 | torch.cuda.set_device(int(config['local_rank']))
31 | torch.distributed.init_process_group(backend='nccl',
32 | init_method=config['init_method'],
33 | world_size=config['world_size'],
34 | rank=config['global_rank'],
35 | group_name='mtorch')
36 | print('using GPU {}-{} for training'.format(int(config['global_rank']),
37 | int(config['local_rank'])))
38 |
39 | config['save_dir'] = os.path.join(
40 | config['save_dir'],
41 | '{}_{}'.format(config['model']['net'],
42 | os.path.basename(args.config).split('.')[0]))
43 |
44 | config['save_metric_dir'] = os.path.join(
45 | './scores',
46 | '{}_{}'.format(config['model']['net'],
47 | os.path.basename(args.config).split('.')[0]))
48 |
49 | if torch.cuda.is_available():
50 | config['device'] = torch.device("cuda:{}".format(config['local_rank']))
51 | else:
52 | config['device'] = 'cpu'
53 |
54 | if (not config['distributed']) or config['global_rank'] == 0:
55 | os.makedirs(config['save_dir'], exist_ok=True)
56 | os.makedirs(config['save_metric_dir'], exist_ok=True)
57 | config_path = os.path.join(config['save_dir'],
58 | args.config.split('/')[-1])
59 | if not os.path.isfile(config_path):
60 | copyfile(args.config, config_path)
61 | print('[**] create folder {}'.format(config['save_dir']))
62 |
63 | trainer = Trainer(config)
64 | trainer.train()
65 |
66 |
67 | if __name__ == "__main__":
68 |
69 | torch.backends.cudnn.benchmark = True
70 |
71 | mp.set_sharing_strategy('file_system')
72 |
73 | # loading configs
74 | config = json.load(open(args.config))
75 |
76 | # setting distributed configurations
77 | config['world_size'] = get_world_size()
78 | config['init_method'] = f"tcp://{get_master_ip()}:{args.port}"
79 | config['distributed'] = True if config['world_size'] > 1 else False
80 | print(config['world_size'])
81 | # setup distributed parallel training environments
82 | if get_master_ip() == "127.0.0.1":
83 | # manually launch distributed processes
84 | mp.spawn(main_worker, nprocs=config['world_size'], args=(config, ))
85 | else:
86 | # multiple processes have been launched by openmpi
87 | config['local_rank'] = get_local_rank()
88 | config['global_rank'] = get_global_rank()
89 | main_worker(-1, config)
90 |
--------------------------------------------------------------------------------