├── .gitignore
├── LICENSE
├── README.md
├── assets
└── images
│ ├── all-datasets.png
│ ├── motivation.gif
│ └── tartanair-ll.gif
├── config
├── config.yaml
└── ll-config.yaml
├── data
├── BAtest
│ ├── KF.txt
│ ├── MP.txt
│ └── Match.txt
└── office2 tiny
│ ├── depth_left
│ ├── 000000_left_depth.npy
│ └── 000001_left_depth.npy
│ ├── image_left
│ ├── 000000_left.png
│ └── 000001_left.png
│ └── pose_left.txt
├── datasets
├── __init__.py
├── augment.py
├── base.py
├── nordland.py
├── robotcar.py
└── tartanair.py
├── losses
├── __init__.py
├── lifelong.py
└── loss.py
├── main.py
├── main_single.py
├── models
├── __init__.py
├── featurenet.py
└── memory.py
└── utils
├── __init__.py
├── evaluation.py
├── geometry.py
├── misc.py
├── utils.py
└── visualization.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | .vscode/
132 | data/
133 | vidout
134 | logs
135 | unused
136 | playground
137 | saved_models
138 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, Chen Wang
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AirLoop
2 |
3 | This repo contains the source code for paper:
4 |
5 | [Dasong Gao](http://theairlab.org/team/dasongg/), [Chen Wang](https://chenwang.site), [Sebastian Scherer](http://theairlab.org/team/sebastian/). "[AirLoop: Lifelong Loop Closure Detection](https://arxiv.org/pdf/2109.08975)." International Conference on Robotics and Automation (ICRA), 2022.
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | ## Demo
14 |
15 | Examples of loop closure detection on each dataset. Note that our model is able to handle cross-environment loop closure detection despite only trained in individual environments sequentially:
16 |
17 |
18 |
19 |
20 | Improved loop closure detection on TartanAir after extended training:
21 |
22 |
23 |
24 |
25 | ## Usage
26 | ### Dependencies
27 |
28 | - Python >= 3.5
29 | - PyTorch < 1.8
30 | - OpenCV >= 3.4
31 | - NumPy >= 1.19
32 | - Matplotlib
33 | - ConfigArgParse
34 | - PyYAML
35 | - tqdm
36 |
37 | ### Data
38 | We used the following subsets of datasets in our expriments:
39 | - [TartanAir](https://theairlab.org/tartanair-dataset/), download with [tartanair_tools](https://github.com/castacks/tartanair_tools)
40 | - Train/Test: `abandonedfactory_night`, `carwelding`, `neighborhood`, `office2`, `westerndesert`;
41 | - [RobotCar](https://robotcar-dataset.robots.ox.ac.uk/), download with [RobotCarDataset-Scraper](https://github.com/mttgdd/RobotCarDataset-Scraper)
42 | - Train: `2014-11-28-12-07-13`, `2014-12-10-18-10-50`, `2014-12-16-09-14-09`;
43 | - Test: `2014-06-24-14-47-45`, `2014-12-05-15-42-07`, `2014-12-16-18-44-24`;
44 | - [Nordland](https://webdiis.unizar.es/~jmfacil/pr-nordland/), download with [gdown](https://github.com/wkentaro/gdown) from [Google Drive](https://drive.google.com/drive/folders/1SmrDOeUgBnJbpW187VFWxGjS7XdbZK5t)
45 | - Train/Test: All four seasons with recommended splits.
46 |
47 | The datasets are aranged as follows:
48 | ```
49 | $DATASET_ROOT/
50 | ├── tartanair/
51 | │ ├── abandonedfactory_night/
52 | | | ├── Easy/
53 | | | | └── ...
54 | │ │ └── Hard/
55 | │ │ └── ...
56 | │ └── ...
57 | ├── robotcar/
58 | │ ├── train/
59 | │ │ ├── 2014-11-28-12-07-13/
60 | │ │ └── ...
61 | │ └── test/
62 | │ ├── 2014-06-24-14-47-45/
63 | │ └── ...
64 | └── nordland/
65 | ├── train/
66 | │ ├── fall_images_train/
67 | │ └── ...
68 | └── test/
69 | ├── fall_images_test/
70 | └── ...
71 | ```
72 |
73 | > **Note**: For TartanAir, only `//_left.zip` is required. After `unzip`ing downloaded zip files, make sure to remove the duplicate `` directory level (`tartanair/abandonedfactory/abandonedfactory/Easy/...` -> `tartanair/abandonedfactory/Easy/...`).
74 |
75 | ### Configuration
76 | The following values in [`config/config.yaml`](config/config.yaml) need to be set:
77 | - `dataset-root`: The parent directory to all datasets (`$DATASET_ROOT` above);
78 | - `catalog-dir`: An (initially empty) directory for caching processed dataset index;
79 | - `eval-gt-dir`: An (initially empty) directory for groundtruth produced during evaluation.
80 |
81 | ### Commandline
82 | The following command trains the model with the specified method on TartanAir with default configuration and evaluate the performance:
83 |
84 | ```sh
85 | $ python main.py --method
86 | ```
87 |
88 | Extra options*:
89 | - `--dataset `: dataset to use.
90 | - `--envs `: order of environments.**
91 | - `--epochs `: number of epochs to train in each environment.**
92 | - `--eval-save `: save path for predicted pairwise similarities generated during evaluation.
93 | - `--out-dir `: output directory for model checkpoints and importance weights.
94 | - `--log-dir `: Tensorboard `logdir`.
95 | - `--skip-train`: perform evaluation only.
96 | - `--skip-eval`: perform training only.
97 |
98 | \* See [`main_single.py`](main_single.py) for more settings.
99 | \** See [`main.py`](main.py) for defaults.
100 |
101 | Evaluation results (R@100P in each environment) will be logged to console. `--eval-save` can be specified to save the predicted similarities in `.npz` format.
102 |
--------------------------------------------------------------------------------
/assets/images/all-datasets.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/assets/images/all-datasets.png
--------------------------------------------------------------------------------
/assets/images/motivation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/assets/images/motivation.gif
--------------------------------------------------------------------------------
/assets/images/tartanair-ll.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/assets/images/tartanair-ll.gif
--------------------------------------------------------------------------------
/config/config.yaml:
--------------------------------------------------------------------------------
1 | dataset-root: /data/datasets
2 | catalog-dir: /data/catalog
3 | eval-gt-dir: /data/gt/
4 | log-dir: ./logs
5 | ll-config: ./config/ll-config.yaml
6 | viz-start: 0
7 | viz-freq: 100
8 |
--------------------------------------------------------------------------------
/config/ll-config.yaml:
--------------------------------------------------------------------------------
1 | si:
2 | ll-method: ['si']
3 | ll-strength: [0.1]
4 |
5 | ewc:
6 | ll-method: ['ewc']
7 | ll-strength: [200]
8 |
9 | rmas:
10 | ll-method: ['rmas']
11 | ll-strength: [100]
12 |
13 | rkd:
14 | ll-method: ['rkd']
15 | ll-strength: [2]
16 |
17 | airloop:
18 | ll-method: ['rmas', 'rkd']
19 | ll-strength: [100, 2]
20 |
--------------------------------------------------------------------------------
/data/BAtest/KF.txt:
--------------------------------------------------------------------------------
1 | 16 0.811594 0.00233575 0.584217 0.576966 -0.000128627 0.999993 -0.00381936 0.12653 -0.584222 0.00302462 0.811588 -0.341099 0 0 0 1
2 | 14 0.79396 0.00206879 0.607967 0.523258 0.000362615 0.999992 -0.00387632 0.136354 -0.60797 0.0032981 0.793953 -0.28544 0 0 0 1
3 | 8 0.916927 0.00099024 0.399053 0.266238 -0.00029577 0.999998 -0.00180186 0.0742108 -0.399054 0.00153415 0.916926 -0.045619 0 0 0 1
4 | 2 0.989029 -0.000786339 0.14772 0.051101 0.000957559 0.999999 -0.00108798 -0.00288055 -0.147719 0.00121749 0.989029 0.0381547 0 0 0 1
5 | 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1
6 | 1 0.993764 -0.000789647 0.111499 0.0184998 0.000915543 0.999999 -0.00107793 -0.00909869 -0.111498 0.00117329 0.993764 0.0287297 0 0 0 1
7 | 7 0.93101 0.000793972 0.364992 0.236363 -0.000131226 0.999998 -0.00184058 0.0638614 -0.364993 0.0016657 0.931009 -0.0239727 0 0 0 1
8 | 11 0.826548 0.000446937 0.562866 0.414706 0.000320615 0.999999 -0.00126485 0.110815 -0.562866 0.00122592 0.826547 -0.190093 0 0 0 1
9 | 3 0.982795 -0.000149927 0.184702 0.0908425 0.000501897 0.999998 -0.00185885 0.0097209 -0.184702 0.00191957 0.982793 0.039484 0 0 0 1
10 | 167 -0.58446 -0.00362973 -0.811414 -0.017011 0.00116652 0.999985 -0.00531351 0.126348 0.811422 -0.00405207 -0.584447 -2.97805 0 0 0 1
11 | 5 0.966165 0.000680542 0.257924 0.17274 -0.000213489 0.999998 -0.00183881 0.0390201 -0.257925 0.00172153 0.966163 0.0190489 0 0 0 1
12 | 13 0.782245 0.000963703 0.622971 0.473322 0.00139338 0.999994 -0.00329656 0.128897 -0.62297 0.00344674 0.782238 -0.262109 0 0 0 1
13 | 10 0.866044 0.00103566 0.499967 0.353605 -8.45731e-05 0.999998 -0.00192496 0.100243 -0.499968 0.00162481 0.866042 -0.125841 0 0 0 1
14 | 6 0.955905 0.000657791 0.293676 0.198194 -0.00025194 0.999999 -0.00141979 0.0480791 -0.293677 0.0012832 0.955904 0.00554862 0 0 0 1
15 | 4 0.975077 0.000423212 0.221864 0.131422 -7.10839e-06 0.999998 -0.00187628 0.0237689 -0.221865 0.00182794 0.975076 0.0324542 0 0 0 1
16 | 12 0.804998 0.00054926 0.593277 0.444945 0.00111351 0.999996 -0.00243669 0.119719 -0.593276 0.00262215 0.804995 -0.22369 0 0 0 1
17 | 9 0.883954 0.00186191 0.467571 0.323334 -0.000663521 0.999996 -0.00272768 0.093852 -0.467574 0.0021009 0.883951 -0.0966325 0 0 0 1
18 | 23 0.88074 0.00122496 0.473598 0.6838 -5.45718e-05 0.999997 -0.00248501 0.169045 -0.4736 0.00216281 0.880738 -0.479867 0 0 0 1
19 | 22 0.870803 0.00223873 0.491628 0.66706 -0.000389072 0.999992 -0.00386453 0.165606 -0.491633 0.00317396 0.870797 -0.472944 0 0 0 1
20 | 18 0.828816 0.0031345 0.559512 0.605663 -0.000904829 0.999991 -0.0042618 0.12443 -0.55952 0.00302599 0.828811 -0.429098 0 0 0 1
21 | 19 0.839883 0.00208533 0.542764 0.626209 -0.000550072 0.999995 -0.00299085 0.147466 -0.542767 0.0022134 0.83988 -0.465157 0 0 0 1
22 | 24 0.869564 0.00114338 0.493819 0.680975 1.54119e-05 0.999997 -0.00234251 0.173266 -0.49382 0.00204457 0.869562 -0.526077 0 0 0 1
23 | 27 0.75757 0.00191618 0.652752 0.585331 -0.000405992 0.999997 -0.00246434 0.176806 -0.652754 0.0016019 0.757568 -0.804761 0 0 0 1
24 | 25 0.8358 0.00268256 0.549027 0.652338 -0.000670267 0.999992 -0.00386562 0.176972 -0.549033 0.00286289 0.835796 -0.592775 0 0 0 1
25 | 26 0.798201 0.0020325 0.602388 0.624585 -0.000185725 0.999995 -0.00312797 0.179306 -0.602391 0.00238487 0.798197 -0.705865 0 0 0 1
26 | 32 0.482198 0.00164445 0.87606 0.201263 -0.00144747 0.999998 -0.00108038 0.151422 -0.876061 -0.000747117 0.4822 -1.24751 0 0 0 1
27 | 30 0.591539 0.00214254 0.806274 0.372569 -0.000830844 0.999998 -0.00204777 0.163937 -0.806276 0.000541448 0.591539 -1.09226 0 0 0 1
28 | 28 0.690188 0.00229201 0.723626 0.50439 -0.000373417 0.999996 -0.00281122 0.176036 -0.72363 0.00167005 0.690186 -0.890081 0 0 0 1
29 | 41 -0.126932 -0.00120122 0.991911 -0.857417 -0.000489624 0.999999 0.00114836 0.116179 -0.991911 -0.0003399 -0.126932 -1.34663 0 0 0 1
30 | 33 0.421716 0.00142153 0.906727 0.102635 -0.00149749 0.999999 -0.000871282 0.147112 -0.906727 -0.000990377 0.421718 -1.30276 0 0 0 1
31 | 29 0.642553 0.00225542 0.766238 0.444493 -0.000442165 0.999997 -0.00257269 0.171566 -0.766241 0.00131429 0.642552 -0.99179 0 0 0 1
32 | 31 0.537899 0.0019831 0.843007 0.291014 -0.0011434 0.999998 -0.00162284 0.157597 -0.843009 -9.09712e-05 0.5379 -1.17703 0 0 0 1
33 | 38 0.0835143 0.00183281 0.996505 -0.422207 -0.00174035 0.999997 -0.00169338 0.135375 -0.996505 -0.00159285 0.0835172 -1.34157 0 0 0 1
34 | 39 0.0132616 0.000602186 0.999912 -0.550861 -0.000892318 0.999999 -0.000590404 0.131806 -0.999912 -0.00088441 0.0132621 -1.34335 0 0 0 1
35 | 44 -0.27826 -0.00115115 0.960505 -1.18298 -0.000808104 0.999999 0.000964379 0.0987704 -0.960505 -0.00050784 -0.278261 -1.26738 0 0 0 1
36 | 35 0.290989 0.00126602 0.956726 -0.110882 -0.00123365 0.999999 -0.000948061 0.140859 -0.956726 -0.000904392 0.29099 -1.36733 0 0 0 1
37 | 50 -0.466287 -0.0010063 0.884633 -1.64731 -0.000402319 0.999999 0.000925476 0.0496564 -0.884633 7.56328e-05 -0.466288 -0.984557 0 0 0 1
38 | 45 -0.310683 -0.000939867 0.950513 -1.28113 -0.000368874 1 0.000868229 0.0936547 -0.950513 -8.08757e-05 -0.310683 -1.25227 0 0 0 1
39 | 42 -0.195348 -0.00156977 0.980733 -1.00624 -0.000902647 0.999999 0.00142081 0.106321 -0.980734 -0.000607703 -0.195349 -1.32413 0 0 0 1
40 | 40 -0.0567775 0.000121735 0.998387 -0.698242 -0.000899592 1 -0.00017309 0.123648 -0.998386 -0.000907968 -0.0567774 -1.3502 0 0 0 1
41 | 49 -0.435831 -0.00147805 0.900027 -1.60249 -0.000891282 0.999999 0.00121063 0.0694875 -0.900028 -0.000274547 -0.435832 -1.08627 0 0 0 1
42 | 54 -0.509147 -0.000899795 0.860679 -1.50432 -0.000225831 1 0.000911854 0.0244191 -0.86068 0.000269899 -0.509147 -0.920699 0 0 0 1
43 | 57 -0.522319 -0.00109272 0.852749 -1.42213 -0.000224025 0.999999 0.00114418 0.0176084 -0.85275 0.000406592 -0.522319 -0.915848 0 0 0 1
44 | 52 -0.50033 -0.00103442 0.865834 -1.58117 0.000484627 0.999999 0.00147475 0.0335034 -0.865835 0.00115747 -0.500329 -0.911353 0 0 0 1
45 | 51 -0.496467 -0.0006272 0.868056 -1.62855 0.000412336 0.999999 0.000958361 0.0389227 -0.868056 0.000833724 -0.496466 -0.911928 0 0 0 1
46 | 56 -0.51784 -0.000461208 0.855477 -1.44437 -1.85031e-05 1 0.000527923 0.0201283 -0.855478 0.000257551 -0.51784 -0.921086 0 0 0 1
47 | 55 -0.513599 -0.000978197 0.85803 -1.47225 -0.000372098 1 0.000917319 0.0212276 -0.85803 0.000151863 -0.513599 -0.921828 0 0 0 1
48 | 61 -0.572423 -0.00248579 0.819955 -1.40066 -0.00135576 0.999997 0.00208514 0.00800051 -0.819958 8.19209e-05 -0.572424 -0.827708 0 0 0 1
49 | 58 -0.526626 -0.000700667 0.850097 -1.40308 -6.88836e-06 1 0.000819953 0.0155465 -0.850097 0.000425953 -0.526626 -0.909174 0 0 0 1
50 | 65 -0.862255 -0.00212669 0.506469 -1.44284 -0.00123108 0.999997 0.00210314 0.00889606 -0.506472 0.00118994 -0.862255 -0.204989 0 0 0 1
51 | 64 -0.762302 -0.00232417 0.647218 -1.45001 -0.000871692 0.999996 0.00256431 0.00188156 -0.647222 0.00139061 -0.762301 -0.445586 0 0 0 1
52 | 59 -0.530296 -0.00117865 0.847812 -1.39161 -0.000232814 0.999999 0.0012446 0.0134902 -0.847812 0.000462627 -0.530296 -0.903311 0 0 0 1
53 | 62 -0.640694 -0.00283829 0.767791 -1.44072 -0.00159181 0.999996 0.00236838 0.00452781 -0.767794 0.000295231 -0.640696 -0.701705 0 0 0 1
54 | 71 -0.997335 7.57558e-05 0.0729632 -1.0923 0.000183524 0.999999 0.00147031 0.0924233 -0.072963 0.00147979 -0.997334 0.169432 0 0 0 1
55 | 67 -0.936396 -0.00103395 0.350943 -1.34641 -0.000483146 0.999999 0.00165705 0.0543375 -0.350944 0.0013821 -0.936395 -0.0415264 0 0 0 1
56 | 63 -0.703955 -0.00282986 0.710239 -1.46662 -0.00142434 0.999996 0.00257262 0.00244027 -0.710243 0.000799383 -0.703956 -0.572946 0 0 0 1
57 | 69 -0.990753 -0.00102448 0.135671 -1.24407 -0.000775676 0.999998 0.00188675 0.089082 -0.135672 0.00176407 -0.990752 0.168486 0 0 0 1
58 | 60 -0.534996 -0.00174216 0.844853 -1.38167 -0.000534929 0.999998 0.00172334 0.0114167 -0.844855 0.000470044 -0.534996 -0.893372 0 0 0 1
59 | 72 -0.999369 -0.000145263 0.0355293 -1.05459 -9.2281e-05 0.999999 0.00149286 0.0920417 -0.0355294 0.00148864 -0.999368 0.19649 0 0 0 1
60 | 68 -0.973691 -0.00173605 0.227867 -1.29567 -0.00131214 0.999997 0.00201182 0.0767523 -0.22787 0.00165989 -0.97369 0.0844545 0 0 0 1
61 | 75 -0.999205 -0.00105723 -0.039847 -0.928331 -0.00114088 0.999997 0.00207661 0.0881874 0.0398446 0.00212042 -0.999204 0.215463 0 0 0 1
62 | 70 -0.995165 -0.000358769 0.0982158 -1.16147 -0.000171029 0.999998 0.00191993 0.0930439 -0.0982163 0.00189385 -0.995163 0.170515 0 0 0 1
63 | 82 -0.994369 0.000371024 0.105974 -0.677897 0.000399941 1 0.000251623 0.0463973 -0.105974 0.00029259 -0.994369 -0.0470717 0 0 0 1
64 | 80 -0.997575 0.000133249 0.069607 -0.739995 0.000172248 1 0.000554269 0.0667525 -0.0696069 0.000564915 -0.997574 0.0144239 0 0 0 1
65 | 81 -0.996158 0.000546029 0.0875734 -0.699736 0.000522667 1 -0.000289702 0.0544799 -0.0875736 -0.000242817 -0.996158 -0.021637 0 0 0 1
66 | 77 -0.999907 -0.00156085 0.0135266 -0.914259 -0.00152494 0.999995 0.00266431 0.0870359 -0.0135307 0.00264344 -0.999905 0.152558 0 0 0 1
67 | 76 -0.999079 -0.00100201 -0.0428914 -0.925302 -0.00109576 0.999997 0.00216228 0.088385 0.0428892 0.00220729 -0.999077 0.216687 0 0 0 1
68 | 86 -0.985478 0.00247228 0.169784 -0.496942 0.00204173 0.999994 -0.00271044 -0.0248455 -0.16979 -0.00232442 -0.985478 -0.236131 0 0 0 1
69 | 78 -0.999512 -0.000412834 0.0312478 -0.8731 -0.000354663 0.999998 0.00186713 0.0853056 -0.0312485 0.00185514 -0.99951 0.114006 0 0 0 1
70 | 89 -0.976741 0.00172273 0.214414 -0.364168 0.00134392 0.999997 -0.00191247 -0.0124192 -0.214416 -0.00157983 -0.976741 -0.34527 0 0 0 1
71 | 87 -0.98359 0.00310951 0.18039 -0.450803 0.00255383 0.999991 -0.00331259 -0.025199 -0.180398 -0.00279754 -0.98359 -0.277968 0 0 0 1
72 | 91 -0.983026 0.000604098 0.183465 -0.312564 0.000559135 1 -0.000296806 0.00561355 -0.183465 -0.000189186 -0.983026 -0.371026 0 0 0 1
73 | 96 -0.951815 -0.00189047 -0.306666 -0.311101 -0.00153972 0.999998 -0.00138565 0.0430722 0.306668 -0.000846705 -0.951816 -0.394512 0 0 0 1
74 | 90 -0.974222 0.00207368 0.225581 -0.344509 0.00165769 0.999997 -0.00203351 -0.00880286 -0.225585 -0.00160714 -0.974222 -0.358872 0 0 0 1
75 | 85 -0.987281 0.00109367 0.158981 -0.549916 0.0010643 0.999999 -0.000269859 -0.0137977 -0.158981 -9.72229e-05 -0.987282 -0.182182 0 0 0 1
76 | 84 -0.989845 -0.00106701 0.142144 -0.577753 -0.00100452 0.999999 0.000511393 -0.00665977 -0.142145 0.000363414 -0.989846 -0.150066 0 0 0 1
77 | 88 -0.979065 0.00212244 0.203534 -0.409368 0.0015222 0.999994 -0.00310557 -0.022186 -0.20354 -0.00273074 -0.979063 -0.313611 0 0 0 1
78 | 83 -0.991422 0.000681067 0.130696 -0.630015 0.000707643 1 0.000156898 0.023021 -0.130696 0.000248038 -0.991422 -0.0952834 0 0 0 1
79 | 92 -0.996404 0.000157927 0.0847245 -0.320926 0.00010308 1 -0.000651729 0.0115623 -0.0847245 -0.000640652 -0.996404 -0.356049 0 0 0 1
80 | 94 -0.993502 -0.00118345 -0.113806 -0.303427 -0.00110947 0.999999 -0.00071339 0.0300872 0.113806 -0.000582492 -0.993503 -0.370505 0 0 0 1
81 | 93 -0.999898 0.000477643 -0.0142617 -0.308185 0.000505793 0.999998 -0.0019703 0.01943 0.0142607 -0.00197732 -0.999896 -0.362996 0 0 0 1
82 | 97 -0.916781 -0.00159209 -0.399386 -0.31621 -0.00124711 0.999999 -0.00112363 0.0511563 0.399387 -0.000532048 -0.916782 -0.428891 0 0 0 1
83 | 95 -0.977555 -0.00273336 -0.210663 -0.30805 -0.00250011 0.999996 -0.0013735 0.0364232 0.210666 -0.000815987 -0.977558 -0.375526 0 0 0 1
84 | 99 -0.847588 -0.00161439 -0.530652 -0.351678 -0.00101882 0.999998 -0.00141495 0.0577852 0.530653 -0.000658654 -0.847589 -0.453399 0 0 0 1
85 | 98 -0.895868 -0.000908729 -0.444319 -0.321071 -0.000534506 0.999999 -0.000967508 0.0550165 0.44432 -0.000629268 -0.895868 -0.451086 0 0 0 1
86 | 106 -0.658957 -0.00108708 -0.75218 -0.466853 -0.000594377 0.999999 -0.000924533 0.058381 0.75218 -0.00016215 -0.658958 -0.335851 0 0 0 1
87 | 108 -0.591946 -0.00122675 -0.805977 -0.496765 -0.000628839 0.999999 -0.00106022 0.060394 0.805978 -0.000120764 -0.591946 -0.32621 0 0 0 1
88 | 107 -0.63712 -0.000961191 -0.770764 -0.476843 -0.00126759 0.999999 -0.000199259 0.0604503 0.770764 0.000850063 -0.637121 -0.328823 0 0 0 1
89 | 110 -0.544814 -0.000993722 -0.838556 -0.52025 -4.62443e-05 0.999999 -0.00115499 0.0629908 0.838557 -0.000590478 -0.544814 -0.335783 0 0 0 1
90 | 118 -0.313488 -0.00177015 -0.949591 -0.68513 -0.0002195 0.999998 -0.00179165 0.0775969 0.949592 -0.000353226 -0.313488 -0.419607 0 0 0 1
91 | 111 -0.520595 -0.000701754 -0.853803 -0.533822 -0.000193787 1 -0.000703756 0.0655202 0.853804 -0.000200916 -0.520595 -0.345812 0 0 0 1
92 | 113 -0.471693 -0.00223837 -0.88176 -0.565553 -0.000847456 0.999997 -0.00208518 0.0689806 0.881763 -0.000236311 -0.471693 -0.374702 0 0 0 1
93 | 116 -0.367955 -0.00231038 -0.929841 -0.635813 -0.00113629 0.999997 -0.00203504 0.0749034 0.929843 0.000307761 -0.367957 -0.394393 0 0 0 1
94 | 112 -0.496737 -0.00176971 -0.867899 -0.548018 -0.00125903 0.999998 -0.00131847 0.0678707 0.8679 0.00043778 -0.496739 -0.358854 0 0 0 1
95 | 115 -0.420271 -0.00187047 -0.907397 -0.603221 -0.000893305 0.999998 -0.00164761 0.0730866 0.907398 0.000118138 -0.420272 -0.399151 0 0 0 1
96 | 109 -0.56857 -0.00123668 -0.822634 -0.507937 -0.000883975 0.999999 -0.000892347 0.0623546 0.822634 0.000219826 -0.568571 -0.329432 0 0 0 1
97 | 123 -0.296521 -0.00185507 -0.955025 -0.806168 0.000479036 0.999998 -0.00209116 0.0873676 0.955026 -0.00107756 -0.296519 -0.671895 0 0 0 1
98 | 119 -0.311116 -0.0013338 -0.950371 -0.714951 0.00032794 0.999999 -0.0015108 0.0807151 0.950372 -0.000781699 -0.311115 -0.488793 0 0 0 1
99 | 127 -0.28357 -0.00133114 -0.958951 -0.902453 0.00190887 0.999996 -0.00195259 0.0904344 0.95895 -0.00238421 -0.283566 -0.857147 0 0 0 1
100 | 124 -0.293434 -0.00187426 -0.955978 -0.825272 0.000611106 0.999997 -0.00214815 0.0883031 0.955979 -0.00121454 -0.293432 -0.708278 0 0 0 1
101 | 120 -0.307728 -0.00214222 -0.951472 -0.749861 -0.000147046 0.999998 -0.00220392 0.0836587 0.951474 -0.000538297 -0.307727 -0.56307 0 0 0 1
102 | 121 -0.304953 -0.00154652 -0.952366 -0.765268 0.000502005 0.999998 -0.00178461 0.0853289 0.952367 -0.00102232 -0.304952 -0.594328 0 0 0 1
103 | 117 -0.341109 -0.00204878 -0.940022 -0.659139 -0.00055114 0.999998 -0.00197951 0.0760399 0.940024 -0.000157144 -0.341109 -0.406814 0 0 0 1
104 | 125 -0.29017 -0.00164872 -0.956974 -0.848363 0.00151843 0.999996 -0.00218325 0.0880233 0.956974 -0.00208661 -0.290167 -0.752695 0 0 0 1
105 | 126 -0.287307 -0.00143445 -0.957837 -0.873004 0.00142719 0.999997 -0.00192568 0.0896384 0.957837 -0.00192028 -0.287304 -0.802564 0 0 0 1
106 | 122 -0.300668 -0.00205253 -0.953727 -0.78278 0.000255336 0.999997 -0.00223261 0.0857666 0.953729 -0.000914796 -0.300667 -0.625917 0 0 0 1
107 | 129 -0.273855 -0.00118386 -0.96177 -0.951086 0.00222919 0.999996 -0.00186566 0.0915018 0.961768 -0.00265489 -0.273851 -0.935913 0 0 0 1
108 | 131 -0.266473 -0.00189971 -0.96384 -1.00381 0.00226583 0.999994 -0.0025974 0.0923814 0.96384 -0.00287604 -0.266467 -1.02822 0 0 0 1
109 | 135 -0.265854 -0.00121369 -0.964013 -1.08714 0.0034243 0.999992 -0.00220334 0.09257 0.964007 -0.00388684 -0.265847 -1.20951 0 0 0 1
110 | 130 -0.27061 -0.00105155 -0.962689 -0.972809 0.00288882 0.999994 -0.00190435 0.0913213 0.962685 -0.00329637 -0.270605 -0.975528 0 0 0 1
111 | 132 -0.262961 -0.00211013 -0.964804 -1.04086 0.00228613 0.999993 -0.00281018 0.0928823 0.964804 -0.00294463 -0.262955 -1.09602 0 0 0 1
112 | 133 -0.259302 -0.00135176 -0.965795 -1.07086 0.00291173 0.999993 -0.00218138 0.0936094 0.965792 -0.00337777 -0.259297 -1.14748 0 0 0 1
113 | 136 -0.296943 -0.00116439 -0.954894 -1.06941 0.00356435 0.999991 -0.00232778 0.0927849 0.954889 -0.0040948 -0.296936 -1.29558 0 0 0 1
114 | 139 -0.38868 -0.00150514 -0.921372 -1.00145 0.00329294 0.99999 -0.00302269 0.0946735 0.921367 -0.00420889 -0.388671 -1.60283 0 0 0 1
115 | 143 -0.50575 -0.00139993 -0.862679 -0.815646 0.00331335 0.999988 -0.00356522 0.0948007 0.862674 -0.00466147 -0.505739 -1.9346 0 0 0 1
116 | 134 -0.253626 -0.00148953 -0.967301 -1.09281 0.00297093 0.999993 -0.00231885 0.0934992 0.967298 -0.0034619 -0.25362 -1.17349 0 0 0 1
117 | 150 -0.467496 -0.00268522 -0.883991 -0.973117 0.00243017 0.999988 -0.00432276 0.113773 0.883992 -0.00416912 -0.467483 -2.21031 0 0 0 1
118 | 149 -0.482754 -0.00230327 -0.875753 -0.925495 0.00309397 0.999986 -0.00433554 0.103674 0.875751 -0.00480256 -0.48274 -2.16036 0 0 0 1
119 | 141 -0.447827 -0.00159538 -0.894119 -0.921685 0.00308502 0.99999 -0.00332945 0.0960335 0.894115 -0.00424939 -0.447818 -1.78144 0 0 0 1
120 | 138 -0.35814 -0.00157502 -0.933667 -1.03219 0.00350647 0.999989 -0.00303193 0.0936168 0.933661 -0.00435973 -0.35813 -1.5046 0 0 0 1
121 | 137 -0.326983 -0.00108787 -0.94503 -1.05655 0.00389357 0.999989 -0.00249832 0.0927435 0.945022 -0.00449645 -0.326975 -1.40237 0 0 0 1
122 | 140 -0.417946 -0.00152481 -0.908471 -0.966124 0.00392155 0.999986 -0.00348253 0.0932943 0.908463 -0.00501812 -0.417934 -1.69495 0 0 0 1
123 | 142 -0.476559 -0.000951948 -0.879142 -0.873076 0.00393848 0.999987 -0.00321774 0.0944978 0.879134 -0.00499592 -0.476549 -1.8614 0 0 0 1
124 | 145 -0.52354 -0.00133236 -0.852 -0.782744 0.00346288 0.999987 -0.00369167 0.0901349 0.851994 -0.00488311 -0.523529 -2.02918 0 0 0 1
125 | 144 -0.533953 -0.00130869 -0.845513 -0.755048 0.00280788 0.999991 -0.00332101 0.0951723 0.845509 -0.00414737 -0.533945 -2.00269 0 0 0 1
126 | 146 -0.51304 -0.00136294 -0.858364 -0.811277 0.00376313 0.999986 -0.00383702 0.0874513 0.858356 -0.00519868 -0.513028 -2.0487 0 0 0 1
127 | 148 -0.492527 -0.00152184 -0.870296 -0.891216 0.00328329 0.999988 -0.00360674 0.0985421 0.870291 -0.00463384 -0.492516 -2.12784 0 0 0 1
128 | 154 -0.43737 -0.00499376 -0.899268 -1.06415 0.000437803 0.999983 -0.00576598 0.13338 0.899282 -0.00291557 -0.43736 -2.37333 0 0 0 1
129 | 155 -0.438911 -0.00437025 -0.89852 -1.06019 0.000563058 0.999987 -0.00513881 0.139421 0.89853 -0.0027614 -0.438903 -2.43824 0 0 0 1
130 | 147 -0.502811 -0.0014648 -0.864395 -0.855987 0.00343614 0.999987 -0.00369334 0.0930094 0.864389 -0.00482724 -0.5028 -2.10662 0 0 0 1
131 | 158 -0.449595 -0.00497091 -0.893219 -0.94095 -0.000426696 0.999986 -0.00535031 0.147853 0.893233 -0.00202434 -0.44959 -2.68225 0 0 0 1
132 | 164 -0.520369 -0.00473376 -0.853929 -0.380692 2.13797e-05 0.999985 -0.00555645 0.139668 0.853942 -0.00290966 -0.520361 -2.92581 0 0 0 1
133 | 165 -0.527671 -0.00428111 -0.849438 -0.313508 0.000629643 0.999985 -0.00543099 0.135613 0.849449 -0.00340062 -0.527661 -2.9441 0 0 0 1
134 | 159 -0.460585 -0.00446465 -0.887605 -0.866017 -0.000575825 0.999989 -0.00473114 0.150136 0.887616 -0.00166798 -0.460582 -2.72583 0 0 0 1
135 | 162 -0.501715 -0.00525867 -0.865017 -0.524505 -0.000629285 0.999983 -0.00571418 0.145445 0.865033 -0.00232254 -0.50171 -2.88279 0 0 0 1
136 | 161 -0.480149 -0.00497885 -0.877173 -0.657969 -7.0956e-05 0.999984 -0.00563709 0.145563 0.877187 -0.0026444 -0.480142 -2.83445 0 0 0 1
137 | 168 -0.656327 -0.0033453 -0.754469 0.267735 0.000862667 0.999986 -0.00518437 0.125937 0.754476 -0.0040535 -0.656315 -2.97021 0 0 0 1
138 | 157 -0.440905 -0.00614891 -0.897533 -1.02359 -0.000779024 0.999979 -0.00646807 0.145438 0.897554 -0.0021526 -0.4409 -2.62014 0 0 0 1
139 | 152 -0.436255 -0.00266058 -0.899819 -1.05528 0.00261477 0.999988 -0.00422446 0.118315 0.899819 -0.00419576 -0.436242 -2.217 0 0 0 1
140 | 156 -0.440048 -0.00539695 -0.897958 -1.05721 -0.000622438 0.999984 -0.00570512 0.146739 0.897974 -0.0019516 -0.440044 -2.54617 0 0 0 1
141 | 160 -0.472385 -0.00593899 -0.881372 -0.73385 -0.00169888 0.999982 -0.00582768 0.151704 0.881391 -0.00125556 -0.472386 -2.79894 0 0 0 1
142 | 166 -0.533163 -0.00372449 -0.846004 -0.250741 0.00106267 0.999987 -0.0050721 0.131952 0.846012 -0.00360328 -0.533152 -2.95859 0 0 0 1
143 | 174 -0.88431 -0.00168743 -0.466897 1.57589 0.000145866 0.999992 -0.00389037 0.0903188 0.4669 -0.0035084 -0.884303 -2.54475 0 0 0 1
144 | 172 -0.820931 -0.0027142 -0.571022 1.18114 -2.50701e-05 0.999989 -0.00471714 0.103989 0.571028 -0.00385813 -0.820921 -2.75917 0 0 0 1
145 | 173 -0.85441 -0.0023153 -0.519594 1.39731 0.000195967 0.999989 -0.00477818 0.0932471 0.519599 -0.00418435 -0.8544 -2.65089 0 0 0 1
146 | 177 -0.981854 -0.000405073 -0.189639 2.28249 0.00031786 0.999993 -0.00378172 0.0478075 0.189639 -0.00377337 -0.981847 -1.79689 0 0 0 1
147 | 169 -0.701377 -0.00326285 -0.712783 0.473667 0.000778835 0.999985 -0.00534392 0.123848 0.71279 -0.00430324 -0.701364 -2.9497 0 0 0 1
148 | 175 -0.924662 -0.00138557 -0.380788 1.81855 0.000261072 0.999991 -0.00427263 0.0863348 0.38079 -0.00405015 -0.924653 -2.37565 0 0 0 1
149 | 176 -0.958236 -0.000667316 -0.285977 2.08233 0.000480138 0.999992 -0.00394227 0.0703647 0.285977 -0.00391493 -0.958229 -2.11274 0 0 0 1
150 | 179 -0.999975 0.00164543 0.00688316 2.49145 0.00161736 0.99999 -0.00408151 0.00122963 -0.00688981 -0.00407027 -0.999968 -1.15445 0 0 0 1
151 | 198 -0.981966 0.000335244 0.189058 1.72545 0.000116558 0.999999 -0.00116783 0.177618 -0.189059 -0.00112473 -0.981965 -0.269612 0 0 0 1
152 | 185 -0.961232 0.000455083 0.275741 2.41699 -0.00026471 0.999997 -0.00257317 -0.0362566 -0.275741 -0.00254641 -0.961228 -0.268806 0 0 0 1
153 | 181 -0.984995 0.00162088 0.172572 2.52769 0.00109577 0.999994 -0.00313802 -0.023287 -0.172576 -0.00290184 -0.984992 -0.637336 0 0 0 1
154 | 178 -0.995843 0.000257454 -0.0910895 2.41548 0.000565869 0.999994 -0.00336004 0.0259939 0.0910881 -0.00339762 -0.995837 -1.4723 0 0 0 1
155 | 188 -0.947285 0.00277447 0.32038 2.32862 0.00222123 0.999995 -0.00209227 -0.0322137 -0.320385 -0.00127034 -0.947287 -0.0657931 0 0 0 1
156 | 183 -0.97295 8.90052e-05 0.231017 2.46251 -0.000808853 0.999992 -0.00379183 -0.0349163 -0.231016 -0.00387612 -0.972942 -0.425815 0 0 0 1
157 | 191 -0.928639 0.00189631 0.370979 2.23176 0.00195384 0.999998 -0.000220758 -0.00293461 -0.370979 0.000519828 -0.928641 0.109745 0 0 0 1
158 | 190 -0.932375 0.00173378 0.361489 2.25696 0.00191784 0.999998 0.000150385 -0.0104483 -0.361488 0.000833492 -0.932377 0.076839 0 0 0 1
159 | 182 -0.977094 0.00211602 0.212799 2.50189 0.00144222 0.999993 -0.00332155 -0.0335515 -0.212804 -0.00293857 -0.97709 -0.503439 0 0 0 1
160 | 200 -0.933791 0.000975695 0.357817 1.61028 0.000747667 0.999999 -0.000775621 0.18531 -0.357818 -0.00045674 -0.933791 -0.0570947 0 0 0 1
161 | 180 -0.994304 0.00183537 0.106564 2.52604 0.00149888 0.999994 -0.00323761 -0.0137883 -0.106569 -0.00305944 -0.994301 -0.841785 0 0 0 1
162 | 186 -0.955941 0.000737403 0.293559 2.38558 -7.96291e-05 0.999996 -0.00277123 -0.035551 -0.29356 -0.00267251 -0.955937 -0.190107 0 0 0 1
163 | 187 -0.952979 0.000433834 0.303035 2.36067 -9.81074e-05 0.999999 -0.00174015 -0.0318621 -0.303036 -0.00168806 -0.952978 -0.13884 0 0 0 1
164 | 184 -0.968411 3.58591e-05 0.249358 2.42397 -0.000729179 0.999995 -0.00297566 -0.0357699 -0.249357 -0.00306349 -0.968407 -0.346902 0 0 0 1
165 | 189 -0.938768 0.00240606 0.344543 2.30236 0.00221921 0.999997 -0.000936707 -0.0246878 -0.344544 -0.000114738 -0.93877 0.0116999 0 0 0 1
166 | 192 -0.936607 0.00189685 0.350377 2.18115 0.00144419 0.999998 -0.00155319 0.0153054 -0.350379 -0.000948721 -0.936608 0.0792481 0 0 0 1
167 | 194 -0.947926 0.00302306 0.318477 2.09382 0.00215873 0.999993 -0.00306684 0.0476512 -0.318484 -0.00221963 -0.947926 0.0305685 0 0 0 1
168 | 196 -0.961164 0.00227163 0.275969 1.95713 0.00164248 0.999995 -0.00251089 0.110285 -0.275973 -0.00196011 -0.961163 -0.046009 0 0 0 1
169 | 193 -0.940157 0.00128153 0.34074 2.15433 0.000533577 0.999997 -0.0022888 0.0270659 -0.340742 -0.00197002 -0.940155 0.0648794 0 0 0 1
170 | 209 -0.935402 -4.06129e-06 0.353585 0.927618 -0.000266247 1 -0.000692865 0.192596 -0.353585 -0.000742248 -0.935402 -0.150567 0 0 0 1
171 | 197 -0.972484 0.00169763 0.232962 1.82252 0.00123843 0.999997 -0.00211738 0.16266 -0.232965 -0.00177061 -0.972484 -0.162083 0 0 0 1
172 | 201 -0.90478 -1.00451e-06 0.42588 1.54676 -0.000438575 0.999999 -0.000929391 0.191261 -0.42588 -0.00102767 -0.904779 0.00693102 0 0 0 1
173 | 195 -0.954848 0.00196734 0.297087 2.0284 0.00112431 0.999995 -0.0030085 0.0777235 -0.297092 -0.00253864 -0.954846 -0.00532187 0 0 0 1
174 | 204 -0.815893 0.000553691 0.578203 1.25583 7.29864e-05 1 -0.000854617 0.193506 -0.578203 -0.000655075 -0.815893 0.154259 0 0 0 1
175 | 203 -0.831065 0.00117871 0.556174 1.36397 0.000680258 0.999999 -0.00110284 0.19507 -0.556175 -0.000538186 -0.831065 0.14548 0 0 0 1
176 | 199 -0.950249 -0.000482921 0.311489 1.64773 -0.000549298 1 -0.000125364 0.184755 -0.311489 -0.000290228 -0.95025 -0.10592 0 0 0 1
177 | 212 -0.986958 0.00124323 0.160972 0.757561 0.000941119 0.999998 -0.00195303 0.188989 -0.160974 -0.00177607 -0.986957 -0.291058 0 0 0 1
178 | 211 -0.980378 0.00061682 0.197124 0.788001 0.000459597 1 -0.000843334 0.190325 -0.197124 -0.000736189 -0.980378 -0.268132 0 0 0 1
179 | 206 -0.865416 0.000308907 0.501054 1.12457 -0.000492995 0.999999 -0.00146801 0.191277 -0.501054 -0.00151746 -0.865415 0.0075685 0 0 0 1
180 | 202 -0.870474 0.000799464 0.492213 1.46617 0.000267636 0.999999 -0.00115091 0.194559 -0.492214 -0.000870103 -0.870474 0.0704 0 0 0 1
181 | 208 -0.908765 -5.14659e-05 0.417307 1.00108 -0.000650206 0.999999 -0.00129262 0.192164 -0.417307 -0.00144602 -0.908765 -0.0925594 0 0 0 1
182 | 205 -0.841804 0.00148518 0.539781 1.19121 0.000931367 0.999999 -0.00129895 0.191306 -0.539782 -0.000590725 -0.841805 0.0744685 0 0 0 1
183 | 207 -0.88821 0.000106147 0.459437 1.05919 -0.00013768 1 -0.000497208 0.192299 -0.459437 -0.000504881 -0.88821 -0.0466206 0 0 0 1
184 | 213 -0.993844 0.00150336 0.110782 0.729263 0.0013175 0.999998 -0.00175085 0.189055 -0.110785 -0.00159411 -0.993843 -0.32355 0 0 0 1
185 | 214 -0.997301 0.00126251 0.0734056 0.70913 0.00111257 0.999997 -0.0020834 0.188807 -0.073408 -0.00199611 -0.9973 -0.346441 0 0 0 1
186 | 215 -0.999361 0.000499547 0.0357351 0.66788 0.000455019 0.999999 -0.00125417 0.190197 -0.0357357 -0.00123711 -0.999361 -0.359283 0 0 0 1
187 | 216 -0.999996 0.00160158 -0.00245079 0.61719 0.00160478 0.999998 -0.00130601 0.188714 0.00244869 -0.00130994 -0.999996 -0.365811 0 0 0 1
188 | 220 -0.990257 0.0022441 -0.139233 0.150119 0.00235346 0.999997 -0.000620819 0.131727 0.139231 -0.00094245 -0.99026 -0.183384 0 0 0 1
189 | 221 -0.984044 0.00172528 -0.177915 0.062191 0.00175439 0.999998 -6.30269e-06 0.0494858 0.177915 -0.000318335 -0.984046 -0.0823989 0 0 0 1
190 | 222 -0.988976 0.000848951 -0.148071 0.0367849 0.000772119 1 0.000576369 0.00273422 0.148071 0.000455687 -0.988977 -0.0448393 0 0 0 1
191 | 217 -0.999144 0.000842887 -0.0413637 0.562101 0.000900802 0.999999 -0.0013815 0.189354 0.0413625 -0.00141758 -0.999143 -0.367612 0 0 0 1
192 | 218 -0.997453 0.00215478 -0.0712955 0.436644 0.00227092 0.999996 -0.00154792 0.186398 0.0712919 -0.00170589 -0.997454 -0.331549 0 0 0 1
193 | 219 -0.994804 0.0015144 -0.1018 0.299297 0.0015259 0.999999 -3.50168e-05 0.177375 0.1018 -0.000190172 -0.994805 -0.290883 0 0 0 1
194 |
--------------------------------------------------------------------------------
/data/office2 tiny/depth_left/000000_left_depth.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/data/office2 tiny/depth_left/000000_left_depth.npy
--------------------------------------------------------------------------------
/data/office2 tiny/depth_left/000001_left_depth.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/data/office2 tiny/depth_left/000001_left_depth.npy
--------------------------------------------------------------------------------
/data/office2 tiny/image_left/000000_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/data/office2 tiny/image_left/000000_left.png
--------------------------------------------------------------------------------
/data/office2 tiny/image_left/000001_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sair-lab/AirLoop/512bc361f9aec0401d2b89bc5c33c1c1df13c579/data/office2 tiny/image_left/000001_left.png
--------------------------------------------------------------------------------
/data/office2 tiny/pose_left.txt:
--------------------------------------------------------------------------------
1 | 8.164753913879394531e+00 6.764795780181884766e+00 -1.301551759243011475e-01 -0.000000000000000000e+00 -0.000000000000000000e+00 9.677474498748779297e-01 2.519223093986511230e-01
2 | 8.136226654052734375e+00 6.695598125457763672e+00 -1.305037587881088257e-01 -2.794180531054735184e-03 6.599640473723411560e-03 9.666700363159179688e-01 2.559250593185424805e-01
--------------------------------------------------------------------------------
/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from torch.utils.data import DataLoader
4 |
5 | from .base import DefaultSampler
6 | from .tartanair import TartanAir
7 | from .nordland import Nordland
8 | from .robotcar import RobotCar
9 |
10 |
11 | def get_dataset(args):
12 | if args.dataset == 'tartanair':
13 | tartanair = TartanAir(args.dataset_root, args.scale, catalog_dir=args.catalog_dir)
14 | train_data, eval_data = tartanair.rand_split(
15 | [1 - args.eval_percentage, args.eval_percentage], args.eval_split_seed)
16 | if 'train' in args.task:
17 | data = train_data
18 | elif 'eval' in args.task:
19 | data = eval_data
20 | else:
21 | if args.dataset == 'nordland':
22 | dataset_cls = Nordland
23 | elif args.dataset == 'robotcar':
24 | dataset_cls = RobotCar
25 | else:
26 | raise ValueError(f'Unrecognized dataset: {args.dataset}')
27 |
28 | split = 'train' if 'train' in args.task else 'test'
29 | data = dataset_cls(args.dataset_root, args.scale, split=split, catalog_dir=args.catalog_dir)
30 |
31 | seq_merge, env_merge = 'cat', 'cat'
32 | if 'joint' in args.task:
33 | env_merge = 'rand_interleave'
34 |
35 | data.include_exclude(args.include, args.exclude)
36 | sampler = DefaultSampler(data, args.batch_size, seq_merge=seq_merge, env_merge=env_merge, overlap=False)
37 | loader = DataLoader(data, batch_sampler=sampler, pin_memory=True, num_workers=args.num_workers)
38 |
39 | return loader
40 |
--------------------------------------------------------------------------------
/datasets/augment.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import torch
4 | import numpy as np
5 | import kornia as kn
6 | from torch import nn
7 | from PIL import Image
8 | from torchvision import transforms as T
9 | from torchvision.transforms import functional as F
10 |
11 |
12 | class AirAugment(nn.Module):
13 | def __init__(self, scale=1, size=[480, 640], resize_only=False):
14 | super().__init__()
15 | self.img_size = (np.array(size) * scale).round().astype(np.int32)
16 | self.resize_totensor = T.Compose([T.Resize(self.img_size.tolist()), np.array, T.ToTensor()])
17 | self.rand_crop = T.RandomResizedCrop(self.img_size.tolist(), scale=(0.1, 1.0))
18 | self.rand_rotate = T.RandomRotation(45, resample=Image.BILINEAR)
19 | self.rand_color = T.ColorJitter(0.8, 0.8, 0.8)
20 | self.p = [1, 0, 0, 0] if resize_only else [0.25]*4
21 |
22 | def apply_affine(self, K, translation=[0, 0], center=[0, 0], scale=[1, 1], angle=0):
23 | """Applies transformation to K in the order: (R, S), T. All coordinates are in (h, w) order.
24 | Center is for both scale and rotation.
25 | """
26 | translation = torch.tensor(translation[::-1].copy(), dtype=torch.float32)
27 | center = torch.tensor(center[::-1].copy(), dtype=torch.float32)
28 | scale = torch.tensor(scale[::-1].copy(), dtype=torch.float32)
29 | angle = torch.tensor([angle], dtype=torch.float32)
30 |
31 | scaled_rotation = torch.block_diag(kn.angle_to_rotation_matrix(angle)[0] @ torch.diag(scale), torch.ones(1))
32 | scaled_rotation[:2, 2] = center - scaled_rotation[:2, :2] @ center + translation
33 |
34 | return scaled_rotation.to(K) @ K
35 |
36 | def forward(self, image, K=None, depth=None):
37 | if isinstance(image, Image.Image):
38 | in_size = np.array(image.size[::-1])
39 | image = self.resize_totensor(image)
40 | depth = depth if depth is None else self.resize_totensor(depth)
41 | elif isinstance(image, torch.Tensor):
42 | in_size = np.array(image.shape[1:])
43 | image = self.resize_totensor.transforms[0](image)
44 | depth = depth if depth is None else self.resize_totensor.transforms[0](depth)
45 |
46 | center, scale, angle = in_size/2, self.img_size/in_size, 0
47 |
48 | transform = np.random.choice(np.arange(len(self.p)), p=self.p)
49 | if transform == 1:
50 | trans = self.rand_crop
51 | i, j, h, w = T.RandomResizedCrop.get_params(image, trans.scale, trans.ratio)
52 | center = np.array([i + h / 2, j + w / 2])
53 | scale = self.img_size / np.array([h, w])
54 | image = F.resized_crop(image, i, j, h, w, trans.size, trans.interpolation)
55 | depth = depth if depth is None else F.resized_crop(depth, i, j, h, w, trans.size, trans.interpolation)
56 |
57 | elif transform == 2:
58 | trans = self.rand_rotate
59 | angle = T.RandomRotation.get_params(trans.degrees)
60 | cur_size = np.array(image.shape[1:])
61 | # fill oob pix with reflection so that model can't detect rotation with boundary
62 | image = F.pad(image, padding=tuple(cur_size // 2), padding_mode='reflect')
63 | image = F.rotate(image, angle, trans.resample, trans.expand, trans.center, trans.fill)
64 | image = F.center_crop(image, tuple(cur_size))
65 | # fill oob depth with inf so that projector can mask them out
66 | if depth is not None and isinstance(depth, torch.Tensor):
67 | # torch 1.7.1: F.rotate doesn't support fill for Tensor
68 | device = depth.device
69 | depth = F.to_pil_image(depth, mode='F')
70 | depth = F.rotate(depth, angle, trans.resample, trans.expand, trans.center, float('inf'))
71 | depth = self.resize_totensor(depth).to(device)
72 |
73 | elif transform == 3:
74 | image = self.rand_color(image)
75 |
76 | ret = (image,)
77 |
78 | if K is not None:
79 | translation = self.img_size / 2 - center
80 | K = self.apply_affine(K, translation, center, scale, angle)
81 | ret = ret + (K,)
82 | if depth is not None:
83 | ret = ret + (depth,)
84 |
85 | return ret
86 |
--------------------------------------------------------------------------------
/datasets/base.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import bz2
4 | import itertools
5 | import re
6 | import pathlib
7 | import pickle
8 | from copy import copy
9 |
10 | import numpy as np
11 |
12 | from torch.utils.data import Dataset, Sampler
13 |
14 |
15 | class DatasetBase(Dataset):
16 | def __init__(self, root, name, catalog_dir=None):
17 | self.path_prefix = pathlib.Path(root)
18 | self._seqs = {}
19 |
20 | # load state dict if available
21 | if catalog_dir is None:
22 | self._populate()
23 | else:
24 | catalog_path = pathlib.Path(catalog_dir) / ('%s.pbz2' % name)
25 | if catalog_path.is_file():
26 | with bz2.BZ2File(catalog_path, 'rb') as f:
27 | state_dict = pickle.load(f)
28 | for k, v in state_dict.items():
29 | setattr(self, k, v)
30 | print('Loaded catalog %s' % catalog_path)
31 | else:
32 | catalog_attr = ['_seqs'] + self._populate()
33 | state_dict = {attr: getattr(self, attr) for attr in catalog_attr}
34 |
35 | catalog_path.parent.mkdir(parents=True, exist_ok=True)
36 | with bz2.BZ2File(catalog_path, 'wb') as f:
37 | pickle.dump(state_dict, f)
38 |
39 | @property
40 | def envs(self):
41 | return self._seqs.keys()
42 |
43 | @property
44 | def seqs(self):
45 | return self._seqs
46 |
47 | def _populate(self):
48 | raise NotImplementedError()
49 |
50 | def get_size(self, env, seq):
51 | raise NotImplementedError()
52 |
53 | def getitem_impl(self, env, seq, idx):
54 | raise NotImplementedError()
55 |
56 | def get_seq_id(self, env, seq):
57 | env, seq = np.atleast_1d(env).tolist(), np.atleast_1d(seq).tolist()
58 | return '_'.join(env + seq)
59 |
60 | def get_env_seqs(self):
61 | return [(env, seq) for env in self.envs for seq in self.seqs[env]]
62 |
63 | def __getitem__(self, index):
64 | env, seq, idx = index
65 | assert env in self.envs, 'No such environment: %s' % env
66 | assert seq in self.seqs[env], 'No such sequence in environment %s: %s' % (env, seq)
67 | assert 0 <= idx < self.get_size(env, seq), 'Index out of bound for (%s:%s): %d' % (env, seq, idx)
68 | item = self.getitem_impl(env, seq, idx)
69 | item = (item,) if not isinstance(item, tuple) else item
70 | return item + ((env, seq, idx),)
71 |
72 | def include_exclude(self, include=None, exclude=None):
73 | incl_pattern = re.compile(include) if include is not None else None
74 | excl_pattern = re.compile(exclude) if exclude is not None else None
75 | for env, seq in self.get_env_seqs():
76 | seq_id = self.get_seq_id(env, seq)
77 | if (incl_pattern and incl_pattern.search(seq_id) is None) or \
78 | (excl_pattern and excl_pattern.search(seq_id) is not None):
79 | self.seqs[env].remove(seq)
80 | if not self.seqs[env]:
81 | self.seqs.pop(env)
82 |
83 | def rand_split(self, ratio, seed=42):
84 | env_seqs = self.get_env_seqs()
85 | total, ratio = len(env_seqs), np.array(ratio)
86 | split_idx = np.cumsum(np.round(ratio / sum(ratio) * total), dtype=np.int)[:-1]
87 | subsets = []
88 | for perm in np.split(np.random.default_rng(seed=seed).permutation(total), split_idx):
89 | perm = sorted(perm)
90 | subset = copy(self)
91 | subset._seqs = {}
92 | for env, seq in np.take(np.array(env_seqs, dtype=object), perm, axis=0).tolist():
93 | subset.seqs.setdefault(env, []).append(seq)
94 | subsets.append(subset)
95 | return subsets
96 |
97 |
98 | class DefaultSampler(Sampler):
99 | def __init__(self, dataset: DatasetBase, batch_size, seq_merge='shuffle', env_merge='rand', shuffle_batch=False, overlap=True):
100 | self.seq_sizes = [(env_seq, dataset.get_size(*env_seq)) for env_seq in dataset.get_env_seqs()]
101 | self.bs = batch_size
102 | self.batches = []
103 | env_batches = {}
104 | for env_seq, size in self.seq_sizes:
105 | frame_idx = np.arange(0, size)
106 | b_start = np.arange(0, size - self.bs, 1 if overlap else self.bs)
107 | batch = [[env_seq + (idx,) for idx in frame_idx[st:st+self.bs]] for st in b_start]
108 | if shuffle_batch:
109 | np.random.shuffle(batch)
110 | env_batches.setdefault(env_seq[0], []).extend(batch)
111 |
112 | if seq_merge == 'cat':
113 | pass
114 | elif seq_merge == 'rand_pick':
115 | for env, batches in env_batches.copy().items():
116 | env_samples = list(itertools.chain(*batches))
117 | np.random.shuffle(env_samples)
118 | # slice back into chunks
119 | env_batches[env] = [list(batch) for batch in itertools.zip_longest(*([iter(env_samples)] * batch_size))]
120 |
121 | if env_merge == 'cat':
122 | # A1 A2 A3 B1 B2 B3
123 | self.batches = list(itertools.chain(*env_batches.values()))
124 | elif env_merge == 'rand_interleave':
125 | # A1 B1 A2 B2 B3 A3
126 | selection = sum([[env] * len(batch) for i, (env, batch) in enumerate(env_batches.items())], [])
127 | np.random.shuffle(selection)
128 | self.batches = [env_batches[env].pop() for env in selection]
129 | elif env_merge == 'rand_pick':
130 | # B1 B2 A2 A1 A3 B3
131 | self.batches = list(itertools.chain(*env_batches.values()))
132 | np.random.shuffle(self.batches)
133 |
134 | def __iter__(self):
135 | return iter(self.batches)
136 |
137 | def __len__(self):
138 | return len(self.batches)
139 |
--------------------------------------------------------------------------------
/datasets/nordland.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pathlib
4 |
5 | from PIL import Image
6 | import pandas as pd
7 | from torchvision.transforms import functional as F
8 |
9 | from .augment import AirAugment
10 | from .base import DatasetBase
11 |
12 | class Nordland(DatasetBase):
13 |
14 | def __init__(self, root, scale=1, split='train', catalog_dir=None):
15 | self.split = split
16 | super().__init__(pathlib.Path(root) / 'nordland', f'nordland-{split}', catalog_dir)
17 |
18 | self.augment = AirAugment(scale, size=[480, 640], resize_only=True)
19 | self.trimmed_size = [1080, 1440]
20 |
21 | def _populate(self):
22 | seqs = ['section1', 'section2'] if self.split == 'train' else ['section1', 'section2', 'section3']
23 | self.seqs.update({env: seqs.copy() for env in ['spring', 'summer', 'fall', 'winter']})
24 |
25 | self.seq_lims = {}
26 | for env_seq in self.get_env_seqs():
27 | indices = [int(f.stem) for f in self._get_paths(*env_seq, '*')]
28 | self.seq_lims[env_seq] = (min(indices), max(indices) + 1)
29 |
30 | return ['seq_lims']
31 |
32 | def get_size(self, env, seq):
33 | lims = self.seq_lims[env, seq]
34 | return lims[1] - lims[0]
35 |
36 | def getitem_impl(self, env, seq, idx):
37 | offset = self.seq_lims[env, seq][0] + idx
38 | try:
39 | image = F.center_crop(Image.open(list(self._get_paths(env, seq, offset))[0]), self.trimmed_size)
40 | except Exception as e:
41 | print('Bad image: %s:%s:%d: %s' % (env, seq, idx, str(e)))
42 | image = Image.new('RGB', self.trimmed_size)
43 | image = self.augment(image)[0]
44 | return image, offset
45 |
46 | def summary(self):
47 | return pd.DataFrame(data=[
48 | [env, seq, self.get_size(env, seq)] for env, seq in self.get_env_seqs()],
49 | columns=['env', 'seq', 'size'])
50 |
51 | def _get_paths(self, env, seq, idx):
52 | return (self.path_prefix / self.split / ('%s_images_%s' % (env, self.split)) / seq).glob(str(idx) + '.png')
53 |
--------------------------------------------------------------------------------
/datasets/robotcar.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import pathlib
5 |
6 | from PIL import Image
7 | import numpy as np
8 | import pandas as pd
9 | from scipy import interpolate
10 | import torch
11 | from torchvision.transforms import functional as F
12 |
13 | from .augment import AirAugment
14 | from .base import DatasetBase
15 |
16 | class RobotCar(DatasetBase):
17 | WEATHER_TAGS = ['sun', 'overcast', 'night']
18 |
19 | def __init__(self, root, scale=1, split='train', catalog_dir=None):
20 | self.gps = {}
21 | self.img_ts = {}
22 | # ! tmp
23 | self.split = split
24 | super().__init__(pathlib.Path(root) / 'robotcar' / split, f'robotcar-{split}', catalog_dir)
25 |
26 | self.augment = AirAugment(scale, size=[480, 640], resize_only=True)
27 | self.trimmed_size = [480, 640]
28 |
29 | def _populate(self):
30 | seqs = self.path_prefix.glob('*-*-*-*-*-*')
31 | for seq in seqs:
32 | tags = open(seq / 'tags.csv', 'r').readline().strip().split(',')
33 | env = [t for t in tags if t in self.WEATHER_TAGS]
34 | if len(env) == 1:
35 | self.seqs.setdefault(env[0], []).append(seq.name)
36 |
37 | self.img_gps, self.img_ts, self.vel, self.heading = {}, {}, {}, {}
38 | for env, seq in self.get_env_seqs():
39 | seq_path = self.path_prefix / seq
40 |
41 | # gps = np.loadtxt(seq_path / 'gps/gps.csv', delimiter=',', skiprows=1, usecols=(0, 8, 9),
42 | gps = np.loadtxt(seq_path / 'gps/ins.csv', delimiter=',', skiprows=1, usecols=(0, 5, 6, 9, 10, 11, 14),
43 | dtype=[('ts', 'i8'), ('loc', '2f8'), ('vel', '3f8'), ('yaw', 'f8')])
44 | gps = np.sort(gps, order=['ts'])
45 | # deduplicate timestamp
46 | selected = np.ones(len(gps), dtype=bool)
47 | selected[1:] = gps['ts'][1:] != gps['ts'][:-1]
48 | gps = gps[selected]
49 |
50 | img_ts = np.array([int(p.split('.')[-2])
51 | for p in os.listdir(seq_path / 'stereo/centre') if p.endswith('.png')])
52 | img_ts = np.sort(img_ts)
53 |
54 | # prevent precision cutoff
55 | offset = gps['ts'][0]
56 | gps_interp = interpolate.interp1d(gps['ts'] - offset, gps['loc'], axis=0, bounds_error=False,
57 | fill_value=np.nan, assume_sorted=True)
58 | img_gps = gps_interp((img_ts - offset))
59 |
60 | valid = np.isfinite(img_gps).all(axis=1)
61 | if valid.sum() >= 1000:
62 | # ! tmp
63 | if self.split == 'test':
64 | valid[min(len(valid), 12000):] = False
65 | self.img_gps[env, seq], self.img_ts[env, seq] = img_gps[valid], img_ts[valid].astype(np.int64)
66 | # self.vel[env, seq] = np.sqrt(np.sum(gps['vel'] ** 2, 1))
67 | self.heading[env, seq] = gps['yaw']
68 | else:
69 | self.seqs[env].remove(seq)
70 | if len(self.seqs[env]) == 0:
71 | self.seqs.pop(env)
72 |
73 | return ['img_gps', 'img_ts', 'heading']
74 |
75 | def get_size(self, env, seq):
76 | return len(self.img_ts[env, seq])
77 |
78 | def getitem_impl(self, env, seq, idx):
79 | try:
80 | img_path = self.path_prefix / seq / f'stereo/centre/{self.img_ts[env, seq][idx]}.png'
81 | arr = np.array(Image.open(img_path)) / 255.0
82 | g0, r, b, g1 = arr[0::2, 0::2], arr[1::2, 0::2], arr[0::2, 1::2], arr[1::2, 1::2]
83 | g = (g0 + g1) / 2
84 | image = torch.from_numpy(np.stack([r, g, b]).astype(np.float32))
85 | image = F.center_crop(image, self.trimmed_size)
86 | except Exception as e:
87 | print('Bad image: %s:%s:%d: %s' % (env, seq, idx, str(e)))
88 | image = Image.new('RGB', self.trimmed_size)
89 | image = self.augment(image)[0]
90 | aux = (self.img_gps[env, seq][idx], np.rad2deg(self.heading[env, seq][idx]))
91 | return image, aux
92 |
93 | def summary(self):
94 | return pd.DataFrame(data=[
95 | [env, seq, self.get_size(env, seq)] for env, seq in self.get_env_seqs()],
96 | columns=['env', 'seq', 'size'])
97 |
98 |
--------------------------------------------------------------------------------
/datasets/tartanair.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import bz2
5 | import glob
6 | import torch
7 | import pickle
8 | import pathlib
9 | import numpy as np
10 | from os import path
11 | import pandas as pd
12 | from PIL import Image
13 | from torch.utils.data import Dataset
14 | from torchvision.transforms import functional as F
15 | from utils.geometry import pose2mat
16 |
17 | from .augment import AirAugment
18 | from .base import DatasetBase
19 |
20 |
21 | class TartanAir(DatasetBase):
22 | NED2EDN = torch.FloatTensor([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
23 |
24 | def __init__(self, root, scale=1, catalog_dir=None):
25 | super().__init__(pathlib.Path(root) / 'tartanair', 'tartanair', catalog_dir)
26 | # Camera Intrinsics of TartanAir Dataset
27 | fx, fy, cx, cy = 320, 320, 320, 240
28 | self.K = torch.FloatTensor([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
29 |
30 | self.augment = AirAugment(scale, size=[480, 640], resize_only=True)
31 |
32 | def _populate(self):
33 | for env_path in sorted(self.path_prefix.glob('*')):
34 | env = env_path.stem
35 | self.seqs[env] = [tuple(p.parts[-2:]) for p in sorted(env_path.glob('[EH]a[sr][yd]/*'))]
36 |
37 | self.poses, self.size = {}, {}
38 | for env, seq in self.get_env_seqs():
39 | seq_path = self.path_prefix / env / seq[0] / seq[1]
40 |
41 | pose_q = np.loadtxt(seq_path / 'pose_left.txt', dtype=np.float32)
42 | self.poses[env, seq] = self.NED2EDN @ pose2mat(pose_q)
43 |
44 | self.size[env, seq] = len([p for p in os.listdir(seq_path / 'image_left/') if p.endswith('.png')])
45 |
46 | return ['poses', 'size']
47 |
48 | def get_size(self, env, seq):
49 | return self.size[env, seq]
50 |
51 | def getitem_impl(self, env, seq, idx):
52 | seq_path = self.path_prefix / env / seq[0] / seq[1]
53 | image = Image.open(seq_path / 'image_left' / ('%0.6d_left.png' % idx))
54 | depth = F.to_pil_image(np.load(seq_path / 'depth_left' / ('%0.6d_left_depth.npy' % idx)), mode='F')
55 | pose = self.poses[env, seq][idx]
56 | image, K, depth = self.augment(image, self.K, depth)
57 | return image, (depth, pose, K)
58 |
59 | def summary(self):
60 | return pd.DataFrame(data=[
61 | [env, seq[0], seq[1], self.get_size(env, seq)] for env, seq in self.get_env_seqs()],
62 | columns=['env', 'dif', 'id', 'size'])
63 |
--------------------------------------------------------------------------------
/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .loss import MemReplayLoss
2 |
--------------------------------------------------------------------------------
/losses/lifelong.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import copy
4 | import os
5 | from pathlib import Path
6 | from typing import Any, List, Tuple
7 |
8 | import numpy as np
9 | import torch
10 | import torch.autograd as ag
11 | import torch.nn as nn
12 | import torch.nn.functional as F
13 |
14 | from utils import PairwiseCosine
15 | from utils.misc import rectify_savepath
16 |
17 |
18 | class LifelongLoss():
19 | def __init__(self, name, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=1, post_backward=False):
20 | self.writer, self.counter = writer, counter
21 | self.viz, self.viz_start, self.viz_freq = viz, viz_start, viz_freq
22 | self.name = name
23 | self.initialized = False
24 | self.post_backward = post_backward
25 | self.lamb = lamb
26 |
27 | self._call_args, self._call_kwargs = None, None
28 |
29 | if args.ll_weight_dir is not None:
30 | self.weight_dir = Path(args.ll_weight_dir)
31 | self.load(args.ll_weight_load, args.devices[0])
32 | else:
33 | self.weight_dir = None
34 | self.args = args
35 |
36 | def init_loss(self, model: nn.Module) -> None:
37 | '''Called once upon first `__call__`.'''
38 |
39 | def calc_loss(self, *args, **kwargs) -> torch.Tensor:
40 | '''Called with arguments from `__call__`.'''
41 |
42 | def restore_states(self, state: List) -> List[int]:
43 | '''Called with loaded states.'''
44 | return list(range(len(state)))
45 |
46 | def get_states(self) -> Any:
47 | '''Called when saving.'''
48 | raise NotImplementedError()
49 |
50 | def _calc_log_loss(self, *args, **kwargs):
51 | '''Calculate loss and maybe log it.'''
52 | loss = self.lamb * self.calc_loss(*args, **kwargs)
53 |
54 | if self.writer is not None:
55 | n_iter = self.counter.steps if self.counter is not None else 0
56 | self.writer.add_scalars('Loss', {self.name.lower(): loss}, n_iter)
57 |
58 | return loss
59 |
60 | def __call__(self, *args, model: nn.Module = None, **kwargs):
61 | '''This loss will be called both before and after ``loss.backward()`` in case the method
62 | requires gradient information. Specifically, ``model = None`` if called as ```closure```
63 | after ``loss.backward()`` in ``optimizer.step(closure)``.'''
64 |
65 | if not self.initialized:
66 | self.init_loss(model)
67 | self.initialized = True
68 |
69 | if self.post_backward:
70 | if model is not None:
71 | # save context
72 | self._call_args, self._call_kwargs = args, kwargs
73 | return 0
74 | else:
75 | loss = self._calc_log_loss(*self._call_args, **self._call_kwargs)
76 | loss.backward()
77 | return loss
78 | elif model is not None:
79 | return self._calc_log_loss(*args, **kwargs)
80 | else:
81 | return 0
82 |
83 | def load(self, tasks, device):
84 | if self.weight_dir is None or tasks is None:
85 | return
86 | tasks = [tasks] if isinstance(tasks, str) else tasks
87 | paths = [str(self.weight_dir / f'{self.name.lower()}.{task}') for task in tasks]
88 | loaded_idx = self.restore_states([torch.load(path, map_location=device) for path in paths])
89 | print(f'Loaded {self.name} weights: {os.pathsep.join(np.take(paths, loaded_idx))}')
90 |
91 | def save(self, task=None, overwrite=True):
92 | if self.weight_dir is None:
93 | return
94 | path = str(self.weight_dir / f'{self.name.lower()}' if task is None else self.weight_dir / f'{self.name.lower()}.{task}')
95 | save_path = rectify_savepath(path, overwrite=overwrite)
96 | torch.save(self.get_states(), save_path)
97 | print(f'Saved {self.name} weights: {save_path}')
98 |
99 |
100 | class L2RegLoss(LifelongLoss):
101 | def __init__(self, name, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=1, post_backward=False, avg_method='avg'):
102 | # used in super().load
103 | self.cur_imp, self.old_imp = None, None
104 | super().__init__(name, args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=lamb, post_backward=post_backward)
105 | # buffers
106 | self.cur_param, self.old_param = None, None
107 | self.model = None
108 | self.n_observed = 0
109 | self.avg_method = avg_method
110 |
111 | def init_loss_sub(self) -> None:
112 | '''Loss specific initialization'''
113 |
114 | def get_importance(self, model: nn.Module, gd: torch.Tensor) -> Tuple[List[torch.Tensor], int]:
115 | '''Returns per-parameter importance as well as nominal batch size.'''
116 | raise NotImplementedError()
117 |
118 | def init_loss(self, model: nn.Module) -> None:
119 | assert model is not None
120 | self.model = model
121 | # save params from last task
122 | self.cur_param = list(model.parameters())
123 | self.old_param = [t.data.clone() for t in self.cur_param]
124 | # 0 importance if not set, ensure same device if loaded
125 | self.cur_imp = [torch.zeros_like(t.data) for t in self.cur_param]
126 | if self.old_imp is None:
127 | self.old_imp = [torch.zeros_like(t.data) for t in self.cur_param]
128 | elif self.old_imp[0].device != self.old_param[0].device:
129 | self.old_imp = [ow.to(self.old_param[0].device) for ow in self.old_imp]
130 |
131 | self.init_loss_sub()
132 |
133 | def calc_loss(self, *args, **kwargs) -> torch.Tensor:
134 | '''Collect weights for current task and penalize with those from previous tasks.'''
135 | gs, bs = self.get_importance(self.model, *args, **kwargs)
136 |
137 | loss = 0
138 | for imp, param, old_param, ow, w in zip(gs, self.cur_param, self.old_param, self.old_imp, self.cur_imp):
139 | if self.avg_method == 'avg':
140 | w.data = (w * self.n_observed + imp) / (self.n_observed + bs)
141 | elif self.avg_method == 'none':
142 | w.data = imp
143 | loss += (ow * (param - old_param) ** 2).sum()
144 | self.n_observed += bs
145 |
146 | return loss
147 |
148 | def restore_states(self, state: List) -> List[int]:
149 | self.old_imp = [torch.stack(ws).mean(dim=0) for ws in zip(*state)]
150 | return list(range(len(state)))
151 |
152 | def get_states(self) -> Any:
153 | return self.cur_imp
154 |
155 | class MASLoss(L2RegLoss):
156 | def __init__(self, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=100, relational=True):
157 | self.relational = relational
158 | name = 'RMAS' if relational else 'MAS'
159 | super().__init__(name, args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=lamb)
160 | self.cosine = PairwiseCosine()
161 |
162 | def get_importance(self, model, gd):
163 | if self.relational:
164 | pcos = self.cosine(gd[None], gd[None])[0]
165 | norm = pcos.square().sum().sqrt()
166 | else:
167 | norm = gd.square().sum(dim=1).sqrt().mean()
168 |
169 | gs = ag.grad(norm, model.parameters(), retain_graph=True)
170 | return [g.abs() for g in gs], len(gd)
171 |
172 |
173 | class EWCLoss(L2RegLoss):
174 | def __init__(self, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=100, ce=False):
175 | self.ce = ce
176 | post_backward = not ce
177 | super().__init__('CEWC' if ce else 'EWC', args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=lamb, post_backward=post_backward)
178 | self.cosine = PairwiseCosine()
179 |
180 | def get_importance(self, *args, **kwargs):
181 | if self.ce:
182 | return self.get_importance_ce(*args, **kwargs)
183 | else:
184 | return self.get_importance_grad(*args, **kwargs)
185 |
186 | def get_importance_grad(self, model, gd):
187 | b = len(gd)
188 | assert b % 3 == 0
189 | gs = [p.grad for p in self.cur_param]
190 |
191 | return [g ** 2 for g in gs], b // 3 * 2
192 |
193 | def get_importance_ce(self, model, gd):
194 | b = len(gd)
195 | assert b % 3 == 0
196 |
197 | ank, pos, neg = torch.split(gd, b // 3)
198 | logit_p = F.softplus(F.cosine_similarity(ank, pos), beta=5, threshold=4.5).clamp(min=0, max=1)
199 | logit_n = F.softplus(F.cosine_similarity(ank, neg), beta=5, threshold=4.5).clamp(min=0, max=1)
200 |
201 | loss = (F.binary_cross_entropy(logit_p, torch.ones_like(logit_p)) +
202 | F.binary_cross_entropy(logit_n, torch.zeros_like(logit_n))) / 2
203 |
204 | gs = ag.grad(loss, model.parameters(), retain_graph=True)
205 | return [g ** 2 for g in gs], b // 3 * 2
206 |
207 |
208 | class SILoss(L2RegLoss):
209 | def __init__(self, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=100):
210 | super().__init__('SI', args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=lamb, post_backward=True, avg_method='none')
211 | self.cosine = PairwiseCosine()
212 | self.last_param = None
213 | self.w = None
214 | self.eps = 1e-1
215 |
216 | def init_loss_sub(self) -> None:
217 | self.last_param = [w.data.clone() for w in self.cur_param]
218 | self.w = [torch.zeros_like(p) for p in self.cur_param]
219 |
220 | @torch.no_grad()
221 | def get_importance(self, model, gd):
222 | gs = [p.grad for p in self.cur_param]
223 |
224 | # path integral
225 | cur_param = [p.data.clone() for p in self.cur_param]
226 | for w, g, cur_p, last_p in zip(self.w, gs, cur_param, self.last_param):
227 | w -= g * (cur_p - last_p)
228 | self.last_param = cur_param
229 |
230 | omega = [pt - p0 for pt, p0 in zip(cur_param, self.old_param)]
231 | return [w / (omg ** 2 + self.eps) for w, omg in zip(self.w, omega)], len(gd)
232 |
233 |
234 | class KDLoss(LifelongLoss):
235 | def __init__(self, args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None, lamb=100, relational=True, last_only=True):
236 | self._model_t_states: List[nn.Module.T_destination] = []
237 | self.last_only = last_only
238 | self.relational = relational
239 | name = 'RKD' if relational else 'KD'
240 | name = ('' if last_only else 'C') + name
241 | name = 'IFGIR' if name == 'CRKD' else name
242 | super().__init__(name, args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=lamb, post_backward=False)
243 | self.cosine = PairwiseCosine()
244 | self.model_s: nn.Module = None
245 | self.model_t: List[nn.Module] = []
246 |
247 | def init_loss(self, model: nn.Module) -> None:
248 | self.model_s = model
249 | self.model_t = []
250 | for model_t_state in self._model_t_states:
251 | model_t = copy.deepcopy(model).eval()
252 | (model_t.module if isinstance(model_t, nn.DataParallel) else model_t).load_state_dict(model_t_state)
253 | self.model_t.append(model_t)
254 |
255 | def calc_loss(self, gd: torch.Tensor, img: torch.Tensor) -> torch.Tensor:
256 | gd_s = gd
257 | loss = 0
258 | # distill from each teacher
259 | for model_t in self.model_t:
260 | with torch.no_grad():
261 | gd_t = model_t(img=img)
262 |
263 | if self.relational:
264 | response_s = self.cosine(gd_s[None], gd_s[None])[0]
265 | response_t = self.cosine(gd_t[None], gd_t[None])[0]
266 | else:
267 | response_s = gd_s
268 | response_t = gd_t
269 |
270 | loss += F.smooth_l1_loss(response_s, response_t) / len(self.model_t)
271 |
272 | return loss
273 |
274 | def restore_states(self, state: List) -> List[int]:
275 | self._model_t_states = state.copy()
276 | if self.last_only:
277 | self._model_t_states = self._model_t_states[-1:]
278 | return [len(state) - 1]
279 | else:
280 | return list(range(len(state)))
281 |
282 | def get_states(self) -> nn.Module.T_destination:
283 | module = self.model_s.module if isinstance(self.model_s, nn.DataParallel) else self.model_s
284 | return module.state_dict()
285 |
286 |
287 | class CompoundLifelongLoss():
288 | def __init__(self, *losses: LifelongLoss):
289 | self.losses = losses
290 |
291 | def __call__(self, *args, model: nn.Module = None, **kwargs):
292 | return sum(loss(*args, model, **kwargs) for loss in self.losses)
293 |
294 | def load(self, tasks, device):
295 | for loss in self.losses:
296 | loss.load(tasks, device)
297 |
298 | def save(self, task=None, overwrite=True):
299 | for loss in self.losses:
300 | loss.save(task, overwrite)
301 |
302 | def __iter__(self):
303 | return iter(self.losses)
304 |
305 |
306 | def get_ll_loss(args, writer=None, viz=None, viz_start=float('inf'), viz_freq=200, counter=None):
307 | if args.ll_method is None:
308 | return None
309 | assert len(args.ll_method) == len(args.ll_strength)
310 |
311 | # create each lifelong loss
312 | losses = []
313 | extra_kwargs = {}
314 | for method, strength in zip(args.ll_method, args.ll_strength):
315 | method = method.lower()
316 | if method == 'mas':
317 | extra_kwargs['relational'] = False
318 | loss_class = MASLoss
319 | elif method == 'rmas':
320 | loss_class = MASLoss
321 | elif method == 'ewc':
322 | loss_class = EWCLoss
323 | elif method == 'si':
324 | loss_class = SILoss
325 | elif method == 'kd':
326 | extra_kwargs['relational'] = False
327 | loss_class = KDLoss
328 | elif method == 'rkd':
329 | loss_class = KDLoss
330 | elif method == 'ifgir':
331 | loss_class = KDLoss
332 | extra_kwargs['last_only'] = False
333 | else:
334 | raise ValueError(f'Unrecognized lifelong method: {method}')
335 |
336 | losses.append(loss_class(args=args, writer=writer, viz=viz, viz_start=viz_start, viz_freq=viz_freq, counter=counter, lamb=strength, **extra_kwargs))
337 |
338 | return CompoundLifelongLoss(*losses)
339 |
--------------------------------------------------------------------------------
/losses/loss.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from utils import Visualizer
7 | from models.memory import TartanAirMemory, NordlandMemory, RobotCarMemory
8 | from utils import Projector, PairwiseCosine, gen_probe
9 |
10 | from .lifelong import get_ll_loss
11 |
12 |
13 | class MemReplayLoss():
14 | def __init__(self, writer=None, viz_start=float('inf'), viz_freq=200, counter=None, args=None):
15 | super().__init__()
16 | self.args = args
17 | self.writer, self.counter, self.viz_start, self.viz_freq = writer, counter, viz_start, viz_freq
18 | self.viz = Visualizer('tensorboard', writer=self.writer)
19 |
20 | if args.dataset == 'tartanair':
21 | self.memory = TartanAirMemory(capacity=args.mem_size, n_probe=1200, out_device=args.device)
22 | elif args.dataset == 'nordland':
23 | self.memory = NordlandMemory(capacity=args.mem_size, out_device=args.device)
24 | elif args.dataset == 'robotcar':
25 | self.memory = RobotCarMemory(capacity=args.mem_size, dist_tol=20, head_tol=15, out_device=args.device)
26 |
27 | self.n_triplet, self.n_recent, self.n_pair = 4, 0, 1
28 | self.min_sample_size = 32
29 |
30 | self.gd_match = GlobalDescMatchLoss(n_triplet=self.n_triplet, n_pair=self.n_pair, writer=writer, viz=self.viz, viz_start=viz_start, viz_freq=viz_freq, counter=self.counter)
31 | self.ll_loss = get_ll_loss(args, writer=writer, viz=self.viz, viz_start=viz_start, viz_freq=viz_freq, counter=self.counter)
32 |
33 | def __call__(self, net, img, aux, env):
34 | device = img.device
35 | self.store_memory(img, aux, env)
36 |
37 | if len(self.memory) < self.min_sample_size:
38 | return torch.zeros(1).to(device)
39 |
40 | _, (ank_batch, pos_batch, neg_batch), (pos_rel, neg_rel) = \
41 | self.memory.sample_frames(self.n_triplet, self.n_recent, self.n_pair)
42 |
43 | # no suitable triplet
44 | if ank_batch is None:
45 | return torch.zeros(1).to(device)
46 |
47 | img = recombine('img', ank_batch, pos_batch, neg_batch)
48 |
49 | loss = 0
50 | gd = net(img=img)
51 | gd_match = self.gd_match(gd)
52 | loss += gd_match
53 |
54 | # forgetting prevention
55 | if self.ll_loss is not None:
56 | for ll_loss in self.ll_loss:
57 | loss_name = ll_loss.name.lower()
58 | if loss_name in ['mas', 'rmas', 'ewc', 'cewc', 'si']:
59 | loss += ll_loss(model=net, gd=gd)
60 | elif loss_name in ['kd', 'rkd', 'ifgir']:
61 | loss += ll_loss(model=net, gd=gd, img=img)
62 | else:
63 | raise ValueError(f'Unrecognized lifelong loss: {ll_loss}')
64 |
65 | # logging and visualization
66 | if self.writer is not None:
67 | n_iter = self.counter.steps if self.counter is not None else 0
68 | self.writer.add_scalars('Loss', {'global': gd_match}, n_iter)
69 | self.writer.add_histogram('Misc/RelN', neg_rel, n_iter)
70 | self.writer.add_histogram('Misc/RelP', pos_rel, n_iter)
71 | self.writer.add_scalars('Misc/MemoryUsage', {'len': len(self.memory)}, n_iter)
72 |
73 | # show triplets
74 | if self.viz is not None and n_iter >= self.viz_start and n_iter % self.viz_freq == 0:
75 | H, W = img.shape[2:]
76 | if isinstance(self.memory, TartanAirMemory):
77 | N = ank_batch['pos'].shape[1]
78 |
79 | # project points from pos to ank
80 | mem_pts_scr = Projector.world2pix(pos_batch['pos'].reshape(-1, N, 3), (H, W), ank_batch['pose'], ank_batch['K'], ank_batch['depth_map'])[0]
81 | B_total = self.n_triplet * (self.n_pair * 2 + 1)
82 | mem_pts_scr_ = mem_pts_scr.reshape(self.n_triplet, self.n_pair * N, 2)
83 | proj_pts_ = torch.cat([
84 | torch.zeros_like(mem_pts_scr_).fill_(np.nan),
85 | mem_pts_scr_,
86 | torch.zeros_like(mem_pts_scr_).fill_(np.nan)], 1).reshape(B_total, self.n_pair * N, 2)
87 |
88 | proj_pts_color = torch.arange(self.n_pair)[None, :, None].expand(B_total, self.n_pair, N) + 1
89 | proj_pts_color = proj_pts_color.reshape(B_total, self.n_pair * N).detach().cpu().numpy()
90 | else:
91 | proj_pts_ = proj_pts_color = None
92 |
93 | ank_img, pos_img, neg_img = img.split([self.n_triplet, self.n_triplet * self.n_pair, self.n_triplet * self.n_pair])
94 | self.viz.show(
95 | torch.cat([pos_img.reshape(self.n_triplet, self.n_pair, 3, H, W), ank_img[:, None], neg_img.reshape(self.n_triplet, self.n_pair, 3, H, W)], dim=1).reshape(-1, 3, H, W),
96 | proj_pts_, 'tab10', values=proj_pts_color, vmin=0, vmax=10, name='Misc/GlobalDesc/Triplet', step=n_iter,
97 | nrow=(self.n_pair * 2 + 1))
98 |
99 | return loss
100 |
101 | def store_memory(self, imgs, aux, env):
102 | self.memory.swap(env[0])
103 | if isinstance(self.memory, TartanAirMemory):
104 | depth_map, pose, K = aux
105 | points_w = Projector.pix2world(gen_probe(depth_map), depth_map, pose, K)
106 | self.memory.store_fifo(pos=points_w, img=imgs, depth_map=depth_map, pose=pose, K=K)
107 | elif isinstance(self.memory, NordlandMemory):
108 | offset = aux
109 | self.memory.store_fifo(img=imgs, offset=offset)
110 | elif isinstance(self.memory, RobotCarMemory):
111 | location, heading = aux
112 | self.memory.store_fifo(img=imgs, location=location, heading=heading)
113 |
114 |
115 | def recombine(key, *batches):
116 | tensors = [batch[key] for batch in batches]
117 | reversed_shapes = [list(reversed(tensor.shape[1:])) for tensor in tensors]
118 | common_shapes = []
119 | for shape in zip(*reversed_shapes):
120 | if all(s == shape[0] for s in shape):
121 | common_shapes.insert(0, shape[0])
122 | else:
123 | break
124 | return torch.cat([tensor.reshape(-1, *common_shapes) for tensor in tensors])
125 |
126 |
127 | class GlobalDescMatchLoss():
128 |
129 | def __init__(self, n_triplet=8, n_pair=1, writer=None,
130 | viz=None, viz_start=float('inf'), viz_freq=200, counter=None, debug=False):
131 | super().__init__()
132 | self.counter, self.writer = counter, writer
133 | self.viz, self.viz_start, self.viz_freq = viz, viz_start, viz_freq
134 | self.cosine = PairwiseCosine()
135 | self.n_triplet, self.n_pair = n_triplet, n_pair
136 |
137 | def __call__(self, gd):
138 | gd_a, gd_p, gd_n = gd.split([self.n_triplet, self.n_triplet * self.n_pair, self.n_triplet * self.n_pair])
139 | gd_a = gd_a[:, None]
140 | gd_p = gd_p.reshape(self.n_triplet, self.n_pair, *gd_p.shape[1:])
141 | gd_n = gd_n.reshape(self.n_triplet, self.n_pair, *gd_n.shape[1:])
142 |
143 | sim_ap = self.cosine(gd_a, gd_p)
144 | sim_an = self.cosine(gd_a, gd_n)
145 | triplet_loss = (sim_an - sim_ap + 1).clamp(min=0)
146 |
147 | # logging
148 | n_iter = self.counter.steps
149 | if self.writer is not None:
150 | self.writer.add_histogram('Misc/RelevanceLoss', triplet_loss, n_iter)
151 | self.writer.add_histogram('Misc/SimAP', sim_ap, n_iter)
152 | self.writer.add_histogram('Misc/SimAN', sim_an, n_iter)
153 | self.writer.add_scalars('Misc/GD', {'2-Norm': torch.norm(gd, dim=-1).mean()}, n_iter)
154 |
155 | return triplet_loss.mean()
156 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pathlib import Path
4 | import configargparse
5 |
6 | from main_single import run
7 |
8 |
9 | def main(args, extra_args):
10 | # default environment orders
11 | if args.envs is None:
12 | if args.dataset == "tartanair":
13 | args.envs = ["carwelding", "neighborhood", "office2", "abandonedfactory_night", "westerndesert"]
14 | args.epochs = [4, 1, 2, 2, 3] if args.epochs is None else args.epochs
15 | elif args.dataset == "nordland":
16 | args.envs = ["spring", "summer", "fall", "winter"]
17 | args.epochs = [3] if args.epochs is None else args.epochs
18 | elif args.dataset == "robotcar":
19 | args.envs = ["sun", "night", "overcast"]
20 | args.epochs = [3] if args.epochs is None else args.epochs
21 | args.epochs = [1] if args.epochs is None else args.epochs
22 |
23 | n_env = len(args.envs)
24 | if len(args.epochs) == 1:
25 | args.epochs = args.epochs * n_env
26 | else:
27 | assert args.method != 'joint' and len(args.epochs) == n_env
28 |
29 | out_dir = Path(args.out_dir)
30 | all_env_regex = '(' + '|'.join(args.envs) + ')'
31 |
32 | save_path = create_dir(out_dir / 'train') / 'model.pth'
33 | for i, (epoch, env) in enumerate(zip(args.epochs, args.envs)):
34 |
35 | if not args.skip_train:
36 | train_args = ['--task', 'train-joint' if args.method == 'joint' else 'train-seq']
37 | train_args += ['--dataset', args.dataset]
38 | train_args += ['--include', all_env_regex if args.method == 'joint' else env]
39 | train_args += ['--epoch', str(epoch)]
40 |
41 | # load model saved from previous runx
42 | if i > 0:
43 | train_args += ['--load', str(save_path)]
44 | train_args += ['--save', str(save_path)]
45 |
46 | # weights loading for lifelong methods
47 | if args.method not in ['finetune', 'joint']:
48 | train_args += ['--ll-method', args.method]
49 | train_args += ['--ll-weight-dir', str(create_dir(out_dir / 'll-weights'))]
50 | if i > 0:
51 | train_args += ['--ll-weight-load'] + args.envs[:i]
52 |
53 | run(train_args + extra_args)
54 |
55 | if args.method == 'joint':
56 | save_path = save_path.parent / (save_path.name + f'.epoch{epoch - 1}')
57 | else:
58 | save_path = save_path.parent / (save_path.name + (f".{env}.{epoch - 1}" if epoch > 1 else f".{env}"))
59 |
60 | if not args.skip_eval:
61 | eval_args = ['--task', 'eval', '--dataset', args.dataset, '--include', all_env_regex, '--load', str(save_path)]
62 | run(eval_args + extra_args)
63 |
64 |
65 | def create_dir(directory: Path):
66 | directory.mkdir(parents=True, exist_ok=True)
67 | return directory
68 |
69 |
70 | if __name__ == '__main__':
71 | parser = configargparse.ArgParser()
72 | # meta
73 | parser.add_argument('--out-dir', type=str, default="./run/")
74 | parser.add_argument('--skip-eval', action='store_true')
75 | parser.add_argument('--skip-train', action='store_true')
76 | # launch
77 | parser.add_argument("--dataset", type=str, default='tartanair',
78 | choices=['tartanair', 'nordland', 'robotcar'], help="Dataset to use")
79 | parser.add_argument('--envs', type=str, nargs='+')
80 | parser.add_argument('--epochs', type=int, nargs='+')
81 | parser.add_argument('--method', type=str, required=True,
82 | choices=['finetune', 'si', 'ewc', 'kd', 'rkd', 'mas', 'rmas', 'airloop', 'joint'])
83 |
84 | parserd_args, unknown_args = parser.parse_known_args()
85 |
86 | main(parserd_args, unknown_args)
87 |
--------------------------------------------------------------------------------
/main_single.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import tqdm
5 | import torch
6 | import random
7 | import numpy as np
8 | import torch.nn as nn
9 | import configargparse
10 | import torch.optim as optim
11 | from tensorboard import program
12 | from torch.utils.tensorboard import SummaryWriter
13 | import yaml
14 |
15 | from models import FeatureNet
16 | from datasets import get_dataset
17 | from losses import MemReplayLoss
18 | from utils.evaluation import RecognitionEvaluator
19 | from utils.misc import save_model, load_model, GlobalStepCounter, ProgressBarDescription
20 |
21 |
22 | @torch.no_grad()
23 | def evaluate(net, loader, counter, args, writer=None):
24 | net.eval()
25 |
26 | evaluator = RecognitionEvaluator(loader=loader, args=args)
27 |
28 | for images, aux, env_seq in tqdm.tqdm(loader):
29 | images = images.to(args.device)
30 |
31 | gd = net(images)
32 |
33 | evaluator.observe(gd, aux, images, env_seq)
34 |
35 | evaluator.report()
36 |
37 |
38 | def train(model, loader, optimizer, counter, args, writer=None):
39 | model.train()
40 |
41 | if 'train' in args.task:
42 | criterion = MemReplayLoss(writer=writer, viz_start=args.viz_start, viz_freq=args.viz_freq, counter=counter, args=args)
43 |
44 | last_env = None
45 |
46 | for epoch in range(args.epoch):
47 | enumerator = tqdm.tqdm(loader)
48 | pbd = ProgressBarDescription(enumerator)
49 | for images, aux, env_seq in enumerator:
50 | images = images.to(args.device)
51 |
52 | loss = criterion(model, images, aux, env_seq[0])
53 |
54 | # in case loss is manually set to 0 to skip batches
55 | if loss.requires_grad and not loss.isnan():
56 | loss.backward()
57 | optimizer.step(closure=criterion.ll_loss)
58 | optimizer.zero_grad()
59 |
60 | # save model on env change for env-incremental tasks
61 | if 'seq' in args.task and last_env != env_seq[0][0]:
62 | if last_env is not None:
63 | save_model(model, '%s.%s' % (args.save, last_env))
64 | last_env = env_seq[0][0]
65 |
66 | if (args.save_freq is not None and counter.steps % args.save_freq == 0) \
67 | or (args.save_steps is not None and counter.steps in args.save_steps):
68 | save_model(model, '%s.step%d' % (args.save, counter.steps))
69 |
70 | pbd.update(loss)
71 | counter.step()
72 |
73 | if 'seq' in args.task:
74 | if args.save is not None:
75 | save_model(model, '%s.%s' % (args.save, last_env))
76 | if args.ll_method is not None:
77 | criterion.ll_loss.save(task=last_env)
78 | else:
79 | save_model(model, '%s.epoch%d' % (args.save, epoch))
80 |
81 |
82 | def main(args):
83 | if args.deterministic >= 1:
84 | torch.manual_seed(args.seed)
85 | torch.cuda.manual_seed(args.seed)
86 | np.random.seed(args.seed)
87 | random.seed(args.seed)
88 | if args.deterministic >= 2:
89 | torch.backends.cudnn.benchmark = False
90 | if args.deterministic >= 3:
91 | torch.set_deterministic(True)
92 |
93 | loader = get_dataset(args)
94 | if args.devices is None:
95 | args.devices = ['cuda:%d' % i for i in range(torch.cuda.device_count())] if torch.cuda.is_available() else ['cpu']
96 | args.device = args.devices[0]
97 |
98 | model = FeatureNet(args.gd_dim).to(args.device)
99 | if args.load:
100 | load_model(model, args.load, device=args.device)
101 | if not args.no_parallel:
102 | model = nn.DataParallel(model, device_ids=args.devices)
103 |
104 | writer = None
105 | if args.log_dir is not None:
106 | log_dir = args.log_dir
107 | # timestamp runs into the same logdir
108 | if os.path.exists(log_dir) and os.path.isdir(log_dir):
109 | from datetime import datetime
110 | log_dir = os.path.join(log_dir, datetime.now().strftime('%b%d_%H-%M-%S'))
111 | writer = SummaryWriter(log_dir)
112 | tb = program.TensorBoard()
113 | tb.configure(argv=[None, '--logdir', log_dir, '--bind_all', '--samples_per_plugin=images=50'])
114 | print(('TensorBoard at %s \n' % tb.launch()))
115 |
116 | step_counter = GlobalStepCounter(initial_step=1)
117 |
118 | if 'train' in args.task:
119 | optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.w_decay)
120 | train(model, loader, optimizer, step_counter, args, writer)
121 | if 'eval' in args.task:
122 | evaluate(model, loader, step_counter, args, writer)
123 |
124 |
125 | def run(args=None):
126 | # Arguements
127 | parser = configargparse.ArgumentParser(description='Feature Graph Networks', default_config_files=['./config/config.yaml'])
128 | # general
129 | parser.add_argument("--config", is_config_file=True, help="Config file path")
130 | parser.add_argument("--task", type=str, choices=['train-seq', 'train-joint', 'eval'], default='train-seq', help="Task to perform")
131 | parser.add_argument("--catalog-dir", type=str, default='./.cache/catalog', help='Processed dataset catalog')
132 | parser.add_argument("--no-parallel", action='store_true', help="DataParallel")
133 | parser.add_argument("--devices", type=str, nargs='+', default=None, help="Available devices")
134 | parser.add_argument("--deterministic", type=int, default=3, help='Level of determinism.')
135 | parser.add_argument("--seed", type=int, default=0, help='Random seed.')
136 | parser.add_argument("--ll-config", type=str, help='Config file for lifelong losses')
137 | parser.add_argument("--print-configs", action='store_true', help='Print parsed configs to console')
138 | # dataset
139 | parser.add_argument("--dataset-root", type=str, default='/data/datasets/', help="Home for all datasets")
140 | parser.add_argument("--dataset", type=str, choices=['tartanair', 'nordland', 'robotcar'], default='tartanair', help="Dataset to use")
141 | parser.add_argument("--include", type=str, default=None, help="Regex for sequences to include")
142 | parser.add_argument("--exclude", type=str, default=None, help="Regex for sequences to exclude")
143 | parser.add_argument('--scale', type=float, default=0.5, help='Image scale')
144 | parser.add_argument("--num-workers", type=int, default=4, help="Number of workers in dataloader")
145 | # model
146 | parser.add_argument("--gd-dim", type=int, default=1024, help="Global descriptor dimension")
147 | # training
148 | parser.add_argument("--load", type=str, default=None, help="load pretrained model")
149 | parser.add_argument("--save", type=str, default=None, help="Model save path")
150 | parser.add_argument("--save-freq", type=int, help="Model saving frequency")
151 | parser.add_argument("--save-steps", type=int, nargs="+", help="Specific steps to save model")
152 | parser.add_argument("--ll-method", type=str, help="Lifelong learning method")
153 | parser.add_argument("--ll-weight-dir", type=str, default=None, help="Load directory for regularization weights")
154 | parser.add_argument("--ll-weight-load", type=str, nargs='+', help="Environment names for regularization weights")
155 | parser.add_argument("--ll-strength", type=float, nargs='+', help="Weights of lifelong losses")
156 | parser.add_argument("--batch-size", type=int, default=8, help="Minibatch size")
157 | parser.add_argument("--lr", type=float, default=2e-3, help="Learning rate")
158 | parser.add_argument("--w-decay", type=float, default=0, help="Weight decay of optim")
159 | parser.add_argument("--epoch", type=int, default=15, help="Number of epoches")
160 | parser.add_argument("--mem-size", type=int, default=1000, help="Memory size")
161 | parser.add_argument("--log-dir", type=str, default=None, help="Tensorboard Log dir")
162 | parser.add_argument("--viz-start", type=int, default=np.inf, help='Visualize starting from iteration')
163 | parser.add_argument("--viz-freq", type=int, default=1, help='Visualize every * iteration(s)')
164 | # evaluation
165 | parser.add_argument("--eval-split-seed", type=int, default=42, help='Seed for splitting the dataset')
166 | parser.add_argument("--eval-percentage", type=float, default=0.2, help='Percentage of sequences for eval')
167 | parser.add_argument("--eval-save", type=str, help='Raw evaluation result save path')
168 | parser.add_argument("--eval-desc-save", type=str, help='Generated global descriptor save path')
169 | parser.add_argument("--eval-gt-dir", type=str, help='Evaluation groundtruth save directory')
170 | parserd_args = parser.parse_args(args)
171 |
172 | # domain specific configs
173 | if parserd_args.ll_config is not None and parserd_args.ll_method is not None:
174 | with open(parserd_args.ll_config, 'r') as f:
175 | for k, v in yaml.safe_load(f)[parserd_args.ll_method].items():
176 | setattr(parserd_args, k.replace('-', '_'), v)
177 |
178 | if parserd_args.print_configs:
179 | print("Training config:", parserd_args)
180 |
181 | main(parserd_args)
182 |
183 |
184 | if __name__ == "__main__":
185 | run()
186 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from .featurenet import FeatureNet
4 |
--------------------------------------------------------------------------------
/models/featurenet.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from torchvision import models
3 |
4 |
5 | class GeM(nn.Module):
6 | def __init__(self, feat_dim, desc_dim, p=3):
7 | super().__init__()
8 | self.p = p
9 | self.whiten = nn.Sequential(
10 | nn.Linear(feat_dim, desc_dim), nn.LeakyReLU(),
11 | nn.Linear(desc_dim, desc_dim)
12 | )
13 |
14 | def forward(self, features):
15 | mean = (features ** self.p).mean(dim=1)
16 | return self.whiten(mean.sign() * mean.abs() ** (1 / self.p))
17 |
18 |
19 | class FeatureNet(nn.Module):
20 | def __init__(self, gd_dim=1024):
21 | super().__init__()
22 | vgg = models.vgg19(pretrained=True)
23 | del vgg.avgpool, vgg.classifier, vgg.features[-1]
24 | self.features = vgg.features
25 |
26 | self.fea_dim = 512
27 | self.global_desc = GeM(self.fea_dim, gd_dim)
28 |
29 | def forward(self, img):
30 | fea = self.features(img)
31 | gd = self.global_desc(fea.reshape(fea.shape[0], fea.shape[1], -1).transpose(-1, -2))
32 | return gd
33 |
--------------------------------------------------------------------------------
/models/memory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from utils.misc import rectify_savepath
7 | from utils import feature_pt_ncovis
8 |
9 |
10 | class Memory():
11 | MAX_CAP = 2000
12 | STATE_DICT = ['swap_dir', 'out_device', 'capacity', 'n_frame',
13 | 'n_frame_hist', 'property_spec', '_states', '_store', '_rel']
14 |
15 | def __init__(self, property_spec, capacity=MAX_CAP, swap_dir='./memory', out_device='cuda'):
16 | super().__init__()
17 | self.swap_dir = swap_dir
18 | self.out_device = out_device
19 | self.capacity = capacity
20 | self.n_frame = None
21 | self.n_frame_hist = None
22 | self.property_spec = property_spec
23 | self._states = {}
24 | self._store = None
25 | self._rel = None
26 | self.cutoff = None
27 |
28 | def sample_frames(self, n_anchor, n_recent=0, n_pair=1, n_try=10):
29 | n_frame = len(self._store)
30 | for _ in range(n_try):
31 | # combination of most recent frames and random frames prior to those
32 | ank_idx = torch.cat([
33 | torch.randint(n_frame - n_recent, (n_anchor - n_recent,)),
34 | torch.arange(n_frame - n_recent, n_frame)])
35 | relevance = self._rel[ank_idx, :self.n_frame]
36 | # sample based on calculated or predefined cutoff
37 | if self.cutoff is None:
38 | cutoff = relevance.where(
39 | relevance > 0, torch.tensor(np.nan).to(relevance)).nanquantile(
40 | torch.tensor([0.1, 0.9]).to(relevance),
41 | dim=1, keepdim=True)
42 | cutoff[1] = cutoff[1].clamp(min=0.4, max=0.7)
43 | else:
44 | cutoff = torch.tensor(self.cutoff)
45 |
46 | pos_prob = (relevance >= cutoff[1]).to(torch.float)
47 | neg_prob = (relevance <= cutoff[0]).to(torch.float)
48 |
49 | if cutoff.isfinite().all() and (pos_prob > 0).any(1).all() and (neg_prob > 0).any(1).all():
50 | break
51 | else:
52 | # no suitable triplets
53 | return [[None] * 3] * 2 + [[None] * 2]
54 |
55 | pos_idx = torch.multinomial(pos_prob, n_pair, replacement=True)
56 | neg_idx = torch.multinomial(neg_prob, n_pair, replacement=True)
57 |
58 | ank_batch = self._store[ank_idx]
59 | pos_batch = self._store[pos_idx]
60 | neg_batch = self._store[neg_idx]
61 |
62 | return (ank_idx, pos_idx, neg_idx), \
63 | (ank_batch, pos_batch, neg_batch), \
64 | (relevance.gather(1, pos_idx), relevance.gather(1, neg_idx))
65 |
66 | def store_fifo(self, **properties):
67 | frame_addr = torch.arange(len(list(properties.values())[0]))
68 | frame_addr = (frame_addr + self.n_frame_hist) % self.capacity
69 | self.n_frame_hist += len(frame_addr)
70 | self._store.store(frame_addr, **properties)
71 | self.n_frame = len(self._store)
72 | self.update_rel(frame_addr)
73 |
74 | def swap(self, name):
75 | if name in self._states:
76 | self._store, self._rel, self.n_frame_hist = self._states[name]
77 | else:
78 | self.n_frame_hist = np.array(0)
79 | self._store = SparseStore(name=name, out_device=self.out_device, max_cap=self.capacity, **self.property_spec)
80 | self._rel = torch.zeros(self.capacity, self.capacity, device=self.out_device).fill_(np.nan)
81 | self._states[name] = self._store, self._rel, self.n_frame_hist
82 | self.n_frame = len(self._store)
83 |
84 | def update_rel(self, frame_idx):
85 | frame_idx = frame_idx.to(self._rel.device)
86 | # (n_frame, B)
87 | relevance = self.get_rel(torch.arange(self.n_frame), frame_idx).to(self._rel)
88 | self._rel[:self.n_frame, frame_idx] = relevance
89 | self._rel[frame_idx, :self.n_frame] = relevance.T
90 |
91 | def get_rel(self, src_idx, dst_idx):
92 | raise NotImplementedError()
93 |
94 | def save(self, path, overwrite=True):
95 | save_path = rectify_savepath(path, overwrite=overwrite)
96 | torch.save(self, save_path)
97 | print('Saved memory: %s' % save_path)
98 |
99 | def load(self, path):
100 | loaded_mem = torch.load(path)
101 | for attr in self.STATE_DICT:
102 | setattr(self, attr, getattr(loaded_mem, attr))
103 | print('Loaded memory: %s' % path)
104 |
105 | def __len__(self):
106 | return self._store.__len__()
107 |
108 | def envs(self):
109 | return self._states.keys()
110 |
111 |
112 | class TartanAirMemory(Memory):
113 |
114 | def __init__(self, capacity=Memory.MAX_CAP, n_probe=1200, img_size=(240, 320), swap_dir='./memory', out_device='cuda'):
115 | TARTANAIR_SPEC = {
116 | 'pos': {'shape': (n_probe, 3), 'default': np.nan, 'device': out_device},
117 | 'img': {'shape': (3,) + img_size, 'default': np.nan},
118 | 'pose': {'shape': (3, 4), 'default': np.nan},
119 | 'K': {'shape': (3, 3), 'default': np.nan},
120 | 'depth_map': {'shape': (1,) + img_size, 'default': np.nan},
121 | }
122 | super().__init__(TARTANAIR_SPEC, capacity, swap_dir, out_device)
123 | self.STATE_DICT.append('n_probe')
124 |
125 | self.n_probe = n_probe
126 |
127 | def get_rel(self, src_idx, dst_idx):
128 | src_pos = self._store[src_idx, ['pos']]['pos']
129 | dst_info = self._store[dst_idx, ['pos', 'pose', 'depth_map', 'K']]
130 | dst_pos, dst_depth_map, dst_pose, dst_K = dst_info['pos'], dst_info['depth_map'], dst_info['pose'], dst_info['K']
131 |
132 | return feature_pt_ncovis(src_pos, dst_pos, dst_depth_map, dst_pose, dst_K)
133 |
134 |
135 | class NordlandMemory(Memory):
136 |
137 | def __init__(self, window=5, capacity=Memory.MAX_CAP, img_size=(240, 320), swap_dir='./memory', out_device='cuda'):
138 | NORDLAND_SPEC = {
139 | 'img': {'shape': (3,) + img_size, 'default': np.nan},
140 | 'offset': {'shape': (), 'dtype': torch.int, 'default': -1},
141 | }
142 | super().__init__(NORDLAND_SPEC, capacity, swap_dir, out_device)
143 | self.STATE_DICT.append('cutoff')
144 | self.cutoff = [1 / (window + 0.5), 1 / (window + 0.5)]
145 |
146 | def get_rel(self, src_idx, dst_idx):
147 | src_off = self._store[src_idx, ['offset']]['offset']
148 | dst_off = self._store[dst_idx, ['offset']]['offset']
149 |
150 | return 1 / ((src_off[:, None] - dst_off[None, :]).abs() + 1)
151 |
152 |
153 | class RobotCarMemory(Memory):
154 |
155 | def __init__(self, dist_tol=20, head_tol=15, capacity=Memory.MAX_CAP, img_size=(240, 320), swap_dir='./memory', out_device='cuda'):
156 | ROBOTCAR_SPEC = {
157 | 'img': {'shape': (3,) + img_size, 'default': np.nan},
158 | 'location': {'shape': (2,), 'dtype': torch.float64, 'default': np.nan},
159 | 'heading': {'shape': (), 'default': np.nan},
160 | }
161 | super().__init__(ROBOTCAR_SPEC, capacity, swap_dir, out_device)
162 | self.STATE_DICT.extend(['cutoff', 'head_tol'])
163 | self.head_tol = head_tol
164 | self.cutoff = [1 / (dist_tol * 2 + 1), 1 / (dist_tol + 1)]
165 |
166 | def get_rel(self, src_idx, dst_idx):
167 | src_info = self._store[src_idx, ['location', 'heading']]
168 | dst_info = self._store[dst_idx, ['location', 'heading']]
169 | dist = torch.cdist(src_info['location'], dst_info['location']).to(torch.float)
170 | view_diff = (src_info['heading'][:, None] - dst_info['heading'][None, :]).abs()
171 |
172 | return (view_diff < self.head_tol).to(torch.float) / (dist + 1)
173 |
174 |
175 | class SparseStore():
176 |
177 | def __init__(self, name='store', max_cap=2000, device='cpu', out_device='cuda', **property_spec):
178 | super().__init__()
179 | self.name = name
180 | self.buf = {}
181 | for name, specs in property_spec.items():
182 | shape = specs['shape']
183 | cap = specs.get('max_cap', max_cap)
184 | dtype = specs.get('dtype', torch.float32)
185 | dev = specs.get('device', device)
186 | def_val = specs.get('default', 0)
187 |
188 | self.buf[name] = {
189 | 'shape': shape, 'capacity': cap, 'dtype': dtype, 'device': dev, 'default': def_val, 'values': {}}
190 |
191 | self.size = 0
192 | self.out_device = out_device
193 |
194 | @torch.no_grad()
195 | def store(self, _idx=None, **values):
196 | # sanity check
197 | batch = []
198 | for name, val in values.items():
199 | prop_shape = self.buf[name]['shape']
200 | prop_shape_st = len(val.shape) - len(prop_shape)
201 | assert prop_shape == val.shape[prop_shape_st:]
202 | batch.append(val.shape[:prop_shape_st])
203 | # coherent and linear indexing
204 | assert all(b == batch[0] for b in batch) and len(batch[0]) <= 1
205 |
206 | if isinstance(_idx, torch.Tensor):
207 | # avoids copy construct warning
208 | _idx = _idx.to(torch.long)
209 | elif _idx is not None:
210 | # any scalar or iterable
211 | _idx = torch.tensor(_idx, dtype=torch.long)
212 | else:
213 | # default indices
214 | _idx = torch.tensor(self.size, dtype=torch.long) if len(batch[0]) == 0 else \
215 | torch.arange(int(batch[0][0])) + self.size
216 | assert (len(_idx.shape) == len(batch[0]) == 0) or (len(_idx.shape) == 1 and len(_idx) == int(batch[0][0]))
217 |
218 | for name, val in values.items():
219 | self._store(self.buf[name], _idx, val)
220 |
221 | self.size = max(len(buf['values']) for buf in self.buf.values())
222 |
223 | def _store(self, buf, idx, value):
224 | value = value.to(buf['device'])
225 | if len(idx.shape) == 0:
226 | buf['values'][int(idx)] = value
227 | else:
228 | for i, val in zip(idx.tolist(), value.to(buf['device'], non_blocking=True)):
229 | buf['values'][int(i)] = val
230 |
231 | def _get(self, buf, idx):
232 | if int(idx) in buf['values']:
233 | return buf['values'][int(idx)]
234 | else:
235 | return torch.zeros(buf['shape'], dtype=buf['dtype'], device=buf['device']).fill_(buf['default'])
236 |
237 | @torch.no_grad()
238 | def __getitem__(self, idx):
239 | idx, include = idx if isinstance(idx, tuple) else (idx, self.buf.keys())
240 | idx = idx.to(torch.long) if isinstance(idx, torch.Tensor) else torch.tensor(idx, dtype=torch.long)
241 | ret = {name: [] for name in self.buf if name in include}
242 | if len(idx.shape) == 0:
243 | for name, buf in self.buf.items():
244 | ret[name].append(self._get(buf, idx))
245 | return {name: tensors[0] for name, tensors in ret.items()}
246 | else:
247 | for name in ret:
248 | for i in idx.flatten():
249 | ret[name].append(self._get(self.buf[name], i))
250 | # make returned tensor respect shape of index
251 | return {name: torch.stack(tensors).reshape(*idx.shape, *tensors[0].shape).to(self.out_device, non_blocking=True)
252 | for name, tensors in ret.items()}
253 |
254 | def __len__(self):
255 | return self.size
256 |
257 |
258 | def test_store():
259 | store = SparseStore(pos={'shape': (12, 3), 'device': 'cuda', 'default': np.nan}, idx={'shape': (1,), 'dtype': torch.long})
260 | pos = torch.arange(360).reshape(10, 12, 3).to(torch.float)
261 | idx = torch.arange(10).reshape(10, 1)
262 | store.store(0, pos=pos[0], idx=idx[0])
263 | store.store(pos=pos[1:3], idx=idx[1:3])
264 | store.store(torch.arange(1, 4).cuda(), pos=pos[4:7], idx=idx[4:7])
265 | print(len(store))
266 | print(store[1])
267 | print(store[[0, 2]])
268 | print(store[[[0], [4]]])
269 |
270 |
271 | if __name__ == '__main__':
272 | test_store()
273 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from .visualization import Visualizer
4 |
5 | from .geometry import Projector, feature_pt_ncovis, gen_probe
6 |
7 | from .evaluation import RecognitionEvaluator
8 |
9 | from .utils import coord_list_grid_sample, PairwiseCosine
10 |
--------------------------------------------------------------------------------
/utils/evaluation.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | import os
3 | from pathlib import Path
4 | from typing import Any, Dict, List
5 |
6 | import numpy as np
7 | from prettytable import PrettyTable
8 | import torch
9 | import tqdm
10 |
11 | from .geometry import Projector, gen_probe, feature_pt_ncovis
12 | from .misc import GlobalStepCounter
13 | from .utils import PairwiseCosine
14 |
15 |
16 | class RecognitionEvaluator():
17 | def __init__(self, loader, viz=None, writer=None, counter=None, args=None):
18 | self.viz = viz
19 | self.probe_pts, self.gds, self.n_observed = None, None, 0
20 | self.adj_mat = None
21 | self.loader = loader
22 |
23 | self.writer = writer
24 | self.counter = counter if counter is not None else GlobalStepCounter()
25 | self.env_data: Dict[str, _EnvData] = {}
26 | self.args = args
27 | self.chunk_size = 128
28 |
29 | self.gt_path = None
30 | self.eval_gt_dir = args.eval_gt_dir
31 | if args.eval_gt_dir is not None:
32 | self.gt_path = Path(args.eval_gt_dir) / f'{args.dataset}.npz'
33 |
34 | @torch.no_grad()
35 | def observe(self, gd, aux, imgs, env_seq):
36 | B = len(imgs)
37 | env = env_seq[0][0]
38 | env_data = self.env_data.setdefault(env, _EnvData(env=env))
39 |
40 | env_data.gds.append(gd)
41 |
42 | if self.args.dataset == 'tartanair':
43 | seq = env_seq[1][0][0] + '_' + env_seq[1][1][0]
44 | elif self.args.dataset in ['nordland', 'robotcar']:
45 | seq = env_seq[1][0]
46 | # record/update sequence start/end
47 | seq_lims = env_data.seq_lims.setdefault(seq, [env_data.n_observed, env_data.n_observed + B])
48 | seq_lims[1] = env_data.n_observed + B
49 |
50 | if self.gt_path is not None:
51 | if self.args.dataset == 'tartanair':
52 | depth_map, poses, Ks = aux
53 | probe_pts = Projector.pix2world(gen_probe(depth_map), depth_map, poses, Ks)
54 | env_data.aux.extend(probe_pts)
55 | elif self.args.dataset == 'nordland':
56 | env_data.aux.extend(aux)
57 | elif self.args.dataset == 'robotcar':
58 | env_data.aux.extend(zip(*aux))
59 |
60 | env_data.n_observed += B
61 |
62 | @torch.no_grad()
63 | def report(self):
64 | cosine = PairwiseCosine()
65 | sim_dict = {}
66 | for env, env_data in self.env_data.items():
67 | gds = torch.cat(env_data.gds)
68 | # reduce memory consumption
69 | cossim = [[cosine(gds[None, st0:nd0], gds[None, st1:nd1])[0].cpu().numpy()
70 | for st1, nd1 in chunk_index_itr(len(gds), self.chunk_size)]
71 | for st0, nd0 in chunk_index_itr(len(gds), self.chunk_size)]
72 | sim_dict[env] = np.block(cossim)
73 |
74 | if self.args.eval_save is not None:
75 | np.savez_compressed(self.args.eval_save, **sim_dict)
76 | print(f'Saved result: {self.args.eval_save}')
77 |
78 | if self.args.eval_desc_save is not None:
79 | desc_dict = {env: torch.cat(env_data.gds).cpu().numpy() for env, env_data in self.env_data.items()}
80 | np.savez_compressed(self.args.eval_desc_save, **desc_dict)
81 | print(f'Saved global descriptors: {self.args.eval_desc_save}')
82 |
83 | # load or compile groundtruth adjacency
84 | gt_adj = None
85 | if self.gt_path.is_file():
86 | gt_adj = np.load(self.gt_path)
87 | print(f'Loaded ground truth: {self.gt_path}')
88 | elif self.eval_gt_dir is not None:
89 | print('Building groundtruth adjcency matrix')
90 | env_len, gt_adj = {}, {}
91 | for env, env_data in self.env_data.items():
92 | lims = np.array(list(env_data.seq_lims.values()))
93 | env_len[env] = (lims[:, 1] - lims[:, 0]).sum()
94 | gt_adj[env] = np.full((env_len[env], env_len[env]), np.nan, dtype=np.float32)
95 |
96 | env_progress = {env: 0 for env in self.env_data}
97 | for imgs, aux_d, env_seq in tqdm.tqdm(self.loader):
98 | B, env = len(imgs), env_seq[0][0]
99 | n_total = env_len[env]
100 | i = env_progress[env]
101 | for st, nd in chunk_index_itr(n_total, self.chunk_size):
102 | gt_adj[env][st:nd, i:i+B] = self._calc_adjacency(self.env_data[env].aux[st:nd], aux_d)
103 | env_progress[env] += B
104 |
105 | os.makedirs(self.eval_gt_dir, exist_ok=True)
106 | np.savez_compressed(self.gt_path, **gt_adj)
107 | print(f'Saved ground truth: {self.gt_path}')
108 |
109 | if gt_adj is not None:
110 | table = PrettyTable(field_names=['', 'R@100P'], float_format='.3')
111 | criterion = get_criterion(self.args.dataset)
112 | for env in sim_dict:
113 | gt_adj_ = torch.from_numpy(gt_adj[env]).to(self.args.device).fill_diagonal_(0)
114 | cossim = torch.from_numpy(sim_dict[env]).to(self.args.device).fill_diagonal_(0)
115 | r100p = recall_at_100precision(gt_adj_, cossim, criterion)
116 | table.add_row([env, r100p])
117 | print(table.get_string())
118 |
119 | def _calc_adjacency(self, aux, aux_d):
120 | '''Calculate adjacency based on metadata'''
121 | if self.args.dataset == 'tartanair':
122 | probe_pts, (depths, poses, K) = aux, aux_d
123 | probe_pts = torch.stack(probe_pts)
124 | depths, poses, K = depths.to(probe_pts), poses.to(probe_pts), K.to(probe_pts)
125 |
126 | return feature_pt_ncovis(probe_pts, torch.zeros(len(K)), depths, poses, K).cpu().numpy()
127 | elif self.args.dataset == 'nordland':
128 | offset, offset_d = aux, aux_d
129 | offset, offset_d = torch.stack(offset), offset_d
130 |
131 | return (1 / (np.abs(offset[:, None] - offset_d[None, :]) + 1)).cpu().numpy()
132 | elif self.args.dataset == 'robotcar':
133 | HEADING_TOL = 15
134 | (location, heading), (location_d, heading_d) = list(zip(*aux)), aux_d
135 | location, heading = torch.stack(location), torch.stack(heading)
136 |
137 | dist = torch.cdist(location, location_d)
138 | view_diff = (heading[:, None] - heading_d[None, :]).abs()
139 | return ((view_diff < HEADING_TOL).to(torch.float) / (dist + 1)).cpu().numpy()
140 |
141 | @dataclass
142 | class _EnvData():
143 | env: str
144 | gds: List[torch.Tensor] = field(default_factory=list)
145 | n_observed: int = 0
146 | seq_lims: Dict[str, List[int]] = field(default_factory=dict)
147 | aux: List[Any] = field(default_factory=list)
148 |
149 |
150 | def chunk_index_itr(total: int, chunk_size: int):
151 | for chunk_start in range(0, total, chunk_size):
152 | chunk_end = min(total, chunk_start + chunk_size)
153 | yield chunk_start, chunk_end
154 |
155 |
156 | def get_criterion(dataset_name):
157 | if dataset_name == 'tartanair':
158 | return lambda sim: sim > 0.5
159 | elif dataset_name == 'nordland':
160 | return lambda sim: sim > 1 / 3.5
161 | elif dataset_name == 'robotcar':
162 | return lambda sim: sim > 1 / (10 + 1)
163 |
164 |
165 | @torch.no_grad()
166 | def recall_at_100precision(gt_sim, pred_sim, relevant):
167 | pred_rank = pred_sim.sort(dim=1, descending=True).indices
168 | is_relevant = relevant(gt_sim)
169 |
170 | n_rel = is_relevant.sum(dim=1).to(torch.float32)
171 | _relevant = is_relevant.gather(1, pred_rank[:, :int(n_rel.max())]).to(torch.float32)
172 | _n_rec_rel = torch.cumprod(_relevant, dim=1).sum(dim=1)
173 |
174 | return (_n_rec_rel / n_rel.clamp(min=1e-6)).mean().item()
175 |
--------------------------------------------------------------------------------
/utils/geometry.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import kornia as kn
4 | import numpy as np
5 | import torch
6 | import torch.nn.functional as F
7 | from scipy.spatial.transform import Rotation as R
8 |
9 | from .utils import coord_list_grid_sample
10 |
11 |
12 | class Projector():
13 |
14 | @staticmethod
15 | def pix2world(points, depth_map, poses, Ks):
16 | """Unprojects pixels to 3D coordinates."""
17 | H, W = depth_map.shape[2:]
18 | cam = Projector._make_camera(H, W, Ks, poses)
19 | depths = Projector._sample_depths(depth_map, points)
20 | return Projector._pix2world(points, depths, cam)
21 |
22 | @staticmethod
23 | def world2pix(points, res, poses, Ks, depth_map=None, eps=1e-2):
24 | """Projects 3D coordinates to screen."""
25 | cam = Projector._make_camera(res[0], res[1], Ks, poses)
26 | xy, depth = Projector._world2pix(points, cam)
27 |
28 | if depth_map is not None:
29 | depth_dst = Projector._sample_depths(depth_map, xy)
30 | xy[(depth < 0) | (depth > depth_dst + eps) | ((xy.abs() > 1).any(dim=-1))] = np.nan
31 |
32 | return xy, depth
33 |
34 | @staticmethod
35 | def _make_camera(height, width, K, pose):
36 | """Creates a PinholeCamera with specified intrinsics and extrinsics."""
37 | intrinsics = torch.eye(4, 4).to(K).repeat(len(K), 1, 1)
38 | intrinsics[:, 0:3, 0:3] = K
39 |
40 | extrinsics = torch.eye(4, 4).to(pose).repeat(len(pose), 1, 1)
41 | extrinsics[:, 0:3, 0:4] = pose
42 |
43 | height, width = torch.tensor([height]).to(K), torch.tensor([width]).to(K)
44 |
45 | return kn.PinholeCamera(intrinsics, extrinsics, height, width)
46 |
47 | @staticmethod
48 | def _pix2world(p, depth, cam):
49 | """Projects p to world coordinate.
50 |
51 | Args:
52 | p: List of points in pixels (B, N, 2).
53 | depth: Depth of each point(B, N).
54 | cam: Camera with batch size B
55 |
56 | Returns:
57 | World coordinate of p (B, N, 3).
58 | """
59 | p = kn.denormalize_pixel_coordinates(p, int(cam.height), int(cam.width))
60 | p_h = kn.convert_points_to_homogeneous(p)
61 | p_cam = kn.transform_points(cam.intrinsics_inverse(), p_h) * depth.unsqueeze(-1)
62 | return kn.transform_points(kn.inverse_transformation(cam.extrinsics), p_cam)
63 |
64 | @staticmethod
65 | def _world2pix(p_w, cam):
66 | """Projects p to normalized camera coordinate.
67 |
68 | Args:
69 | p_w: List of points in world coordinate (B, N, 3).
70 | cam: Camera with batch size B
71 |
72 | Returns:
73 | Normalized coordinates of p in pose cam_dst (B, N, 2) and screen depth (B, N).
74 | """
75 | proj = kn.compose_transformations(cam.intrinsics, cam.extrinsics)
76 | p_h = kn.transform_points(proj, p_w)
77 | p, d = kn.convert_points_from_homogeneous(p_h), p_h[..., 2]
78 | return kn.normalize_pixel_coordinates(p, int(cam.height), int(cam.width)), d
79 |
80 | @staticmethod
81 | def _project_points(p, depth_src, cam_src, cam_dst):
82 | """Projects p visible in pose T_p to pose T_q.
83 |
84 | Args:
85 | p: List of points in pixels (B, N, 2).
86 | depth: Depth of each point(B, N).
87 | cam_src, cam_dst: Source and destination cameras with batch size B
88 |
89 | Returns:
90 | Normalized coordinates of p in pose cam_dst (B, N, 2).
91 | """
92 | return Projector._world2pix(Projector._pix2world(p, depth_src, cam_src), cam_dst)
93 |
94 | @staticmethod
95 | def _sample_depths(depths_map, points):
96 | """Samples the depth of each point in points"""
97 | assert depths_map.shape[:2] == (len(points), 1)
98 | return F.grid_sample(depths_map, points[:, None], align_corners=False)[:, 0, 0, ...]
99 |
100 | def src_repeat(x, n_dst=None):
101 | """[b0 b1 b2 ...] -> [b0 b0 ... b1 b1 ...]"""
102 | B, shape = x.shape[0], x.shape[1:]
103 | n_dst = n_dst if n_dst is not None else B
104 | return x.unsqueeze(1).expand(B, n_dst, *shape).reshape(B * n_dst, *shape)
105 |
106 | def dst_repeat(x, n_src=None):
107 | """[b0 b1 b2 ...] -> [b0 b1 ... b0 b1 ...]"""
108 | B, shape = x.shape[0], x.shape[1:]
109 | n_src = n_src if n_src is not None else B
110 | return x.unsqueeze(0).expand(n_src, B, *shape).reshape(n_src * B, *shape)
111 |
112 | # from matplotlib import pyplot as plt
113 |
114 | def feature_pt_ncovis(pos0, pts1, depth1, pose1, K1, eps=1e-2, ret_proj=False, grid_size=(12, 16)):
115 | B0, B1 = len(pos0), len(pts1)
116 | _, _, H, W = depth1.shape
117 |
118 | # find where points from other frames land
119 | pts0_scr1, pts0_depth1 = Projector.world2pix(src_repeat(pos0, B1), (H, W),
120 | dst_repeat(pose1, B0), dst_repeat(K1, B0))
121 | _, N, _ = pts0_scr1.shape
122 | pts0_scr1_depth1 = coord_list_grid_sample(depth1, pts0_scr1.reshape(B0, B1, N, 2).transpose(0, 1).reshape(B1, B0 * N, 2)).reshape(B1, B0, N).transpose(0, 1).reshape(B0 * B1, N)
123 | pts0_scr1[(pts0_depth1 < 0) | (pts0_depth1 > pts0_scr1_depth1 + eps) | ((pts0_scr1.abs() > 1).any(dim=-1))] = np.nan
124 |
125 | Ax = pts0_scr1.isfinite().all(dim=-1).to(torch.float).mean(dim=-1)
126 | binned_pts0_scr1 = kn.denormalize_pixel_coordinates(pts0_scr1, *grid_size).round()
127 | # binning
128 | B, N, _ = binned_pts0_scr1.shape
129 | valid_b, valid_n = binned_pts0_scr1.isfinite().all(dim=-1).nonzero(as_tuple=True)
130 | bp0s1_b = torch.cat([valid_b[:, None], binned_pts0_scr1[valid_b, valid_n]], axis=-1)
131 |
132 | A1 = torch.zeros(B).to(Ax)
133 | if bp0s1_b.numel() != 0:
134 | # count unique (b, x, y) for each b
135 | bs, count = bp0s1_b.unique(dim=0)[:, 0].unique_consecutive(return_counts=True)
136 |
137 | A1[bs.to(torch.long)] = count / np.product(grid_size)
138 |
139 | covis = Ax / (1 + Ax / A1.clamp(min=1e-6) - Ax)
140 |
141 | if ret_proj:
142 | return covis.reshape(B0, B1), pts0_scr1.reshape(B0, B1, N, 2)
143 | return covis.reshape(B0, B1)
144 |
145 |
146 | def gen_probe(depth_map, scale=8):
147 | B, _, H, W = depth_map.shape
148 | h, w = H // scale, W // scale
149 | points = torch.stack(torch.meshgrid(torch.arange(h), torch.arange(w)), dim=2) + 0.5
150 | points = kn.normalize_pixel_coordinates(points, w + 1, h + 1).unsqueeze(0).expand(B, -1, -1, -1)
151 | return points.reshape(B, -1, 2).to(depth_map)
152 |
153 |
154 | def pose2mat(pose):
155 | """Converts pose vectors to matrices.
156 | Args:
157 | pose: [tx, ty, tz, qx, qy, qz, qw] (N, 7).
158 | Returns:
159 | [R t] (N, 3, 4).
160 | """
161 | t = pose[:, 0:3, None]
162 | rot = R.from_quat(pose[:, 3:7]).as_matrix().astype(np.float32).transpose(0, 2, 1)
163 | t = -rot @ t
164 | return torch.cat([torch.from_numpy(rot), torch.from_numpy(t)], dim=2)
165 |
--------------------------------------------------------------------------------
/utils/misc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import time
5 | import torch
6 | import numpy as np
7 | from collections import deque
8 | import torch.nn as nn
9 |
10 |
11 | def rectify_savepath(path, overwrite=False):
12 | os.makedirs(os.path.dirname(path), exist_ok=True)
13 |
14 | save_path, save_file_dup = path, 0
15 | while os.path.exists(save_path) and not overwrite:
16 | save_file_dup += 1
17 | save_path = path + '.%d' % save_file_dup
18 |
19 | return save_path
20 |
21 |
22 | def save_model(model, path):
23 | model = model.module if isinstance(model, nn.DataParallel) else model
24 |
25 | save_path = rectify_savepath(path)
26 |
27 | torch.save(model.state_dict(), save_path)
28 | print('Saved model: %s' % save_path)
29 |
30 |
31 | def load_model(model, path, device='cuda', strict=False):
32 | #! tmp
33 | state_dict = torch.load(path, map_location=device)
34 | state_dict_ = {}
35 | for k, v in state_dict.items():
36 | k: str = k[25:] if k.startswith('features.encoder.encoder') else k
37 | state_dict_[k] = v
38 | state_dict = state_dict_
39 | missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=strict)
40 | if not strict:
41 | if len(missing_keys) > 0:
42 | print(f'Warning: Missing key(s): {missing_keys}')
43 | if len(unexpected_keys) > 0:
44 | print(f'Warning: Unexpected key(s): {unexpected_keys}')
45 | print('Loaded model: %s' % path)
46 | return model
47 |
48 |
49 | class GlobalStepCounter():
50 | def __init__(self, initial_step=0):
51 | self._steps = initial_step
52 |
53 | @property
54 | def steps(self):
55 | return self._steps
56 |
57 | def step(self, step=1):
58 | self._steps += 1
59 | return self._steps
60 |
61 |
62 | class ProgressBarDescription():
63 | def __init__(self, tq, ave_steps=50):
64 | self.losses = deque()
65 | self.tq = tq
66 | self.ave_steps = ave_steps
67 |
68 | def update(self, loss):
69 | loss = loss.item()
70 | if np.isnan(loss):
71 | print('Warning: nan loss.')
72 | else:
73 | self.losses.append(loss)
74 | if len(self.losses) > self.ave_steps:
75 | self.losses.popleft()
76 | self.tq.set_description("Loss: %.4f at" % (np.average(self.losses)))
77 |
78 |
79 | class Timer:
80 | def __init__(self):
81 | self.hist = []
82 | self.start_time = None
83 | self.n_iter = 0
84 |
85 | def __enter__(self):
86 | torch.cuda.synchronize()
87 | self.start_time = time.time()
88 |
89 | def __exit__(self, exc_type, exc_value, exc_trace):
90 | torch.cuda.synchronize()
91 | self.hist.append(time.time() - self.start_time)
92 | self.start_time = None
93 |
94 | def get_ave(self):
95 | return np.average(self.hist)
96 |
97 |
98 | def count_parameters(model):
99 | return sum(p.numel() for p in model.parameters() if p.requires_grad)
100 |
--------------------------------------------------------------------------------
/utils/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | def coord_list_grid_sample(values, points, mode='bilinear'):
7 | dim = len(points.shape)
8 | points = points.view(values.size(0), 1, -1, 2) if dim == 3 else points
9 | output = F.grid_sample(values, points, mode, align_corners=True).permute(0, 2, 3, 1)
10 | return output.squeeze(1) if dim == 3 else output
11 |
12 |
13 | class PairwiseCosine(nn.Module):
14 | def __init__(self, inter_batch=False, dim=-1, eps=1e-8):
15 | super(PairwiseCosine, self).__init__()
16 | self.inter_batch, self.dim, self.eps = inter_batch, dim, eps
17 | self.eqn = 'amd,bnd->abmn' if inter_batch else 'bmd,bnd->bmn'
18 |
19 | def forward(self, x, y):
20 | xx = torch.sum(x**2, dim=self.dim).unsqueeze(-1) # (A, M, 1)
21 | yy = torch.sum(y**2, dim=self.dim).unsqueeze(-2) # (B, 1, N)
22 | if self.inter_batch:
23 | xx, yy = xx.unsqueeze(1), yy.unsqueeze(0) # (A, 1, M, 1), (1, B, 1, N)
24 | xy = torch.einsum(self.eqn, x, y) if x.shape[1] > 0 else torch.zeros_like(xx * yy)
25 | return xy / (xx * yy).clamp(min=self.eps**2).sqrt()
26 |
--------------------------------------------------------------------------------
/utils/visualization.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import cv2
5 | import torch
6 | import numpy as np
7 | import torchvision
8 | from PIL import Image
9 | from matplotlib import cm
10 | import matplotlib.colors as mc
11 | from matplotlib import pyplot as plt
12 | import kornia.geometry.conversions as C
13 | from matplotlib.animation import FuncAnimation
14 |
15 |
16 | class Visualizer():
17 | vis_id = 0
18 |
19 | def __init__(self, display='imshow', default_name=None, **kwargs):
20 | self.radius, self.thickness = 1, 1
21 | self.default_name = 'Visualizer %d' % self.vis_id if default_name is None else default_name
22 | Visualizer.vis_id += 1
23 |
24 | if display == 'imshow':
25 | self.displayer = ImshowDisplayer()
26 | if display == 'plt':
27 | self.displayer = PltDisplayer(**kwargs)
28 | elif display == 'tensorboard':
29 | self.displayer = TBDisplayer(**kwargs)
30 | elif display == 'video':
31 | self.displayer = VideoFileDisplayer(**kwargs)
32 | elif display == 'gif':
33 | self.displayer = GIFFileDisplayer(**kwargs)
34 |
35 | def show(self, images, points=None, color='red', nrow=4, values=None, vmin=None, vmax=None, name=None, step=0):
36 | b, c, h, w = images.shape
37 | if c == 3:
38 | images = torch2cv(images)
39 | elif c == 1: # show colored values
40 | images = images.detach().cpu().numpy().transpose((0, 2, 3, 1))
41 | images = get_colors(color, images.squeeze(-1), vmin, vmax)
42 |
43 | if points is not None:
44 | points = C.denormalize_pixel_coordinates(points, h, w).to(torch.int)
45 | for i, pts in enumerate(points):
46 | colors = get_colors(color, [0]*len(pts) if values is None else values[i], vmin, vmax)
47 | images[i] = circles(images[i], pts, self.radius, colors, self.thickness)
48 |
49 | disp_name = name if name is not None else self.default_name
50 |
51 | if nrow is not None:
52 | images = torch.tensor(images.copy()).permute((0, 3, 1, 2))
53 | grid = torchvision.utils.make_grid(images, nrow=nrow, padding=1).permute((1, 2, 0))
54 | self.displayer.display(disp_name, grid.numpy(), step)
55 | else:
56 | for i, img in enumerate(images):
57 | self.displayer.display(disp_name, img, step)
58 |
59 | def showmatch(self, imges1, points1, images2, points2, color='blue', values=None, vmin=None, vmax=None, name=None, step=0, nrow=2):
60 | match_pairs = []
61 | for i, (img1, pts1, img2, pts2) in enumerate(zip(imges1, points1, images2, points2)):
62 | assert len(pts1) == len(pts2)
63 | h, w = img1.size(-2), img1.size(-1)
64 | pts1 = C.denormalize_pixel_coordinates(pts1, h, w)
65 | pts2 = C.denormalize_pixel_coordinates(pts2, h, w)
66 | img1, img2 = torch2cv(torch.stack([img1, img2]))
67 | colors = get_colors(color, [0]*len(pts1) if values is None else values[i], vmin, vmax)
68 | match_pairs.append(torch.tensor(matches(img1, pts1, img2, pts2, colors)))
69 |
70 | images = torch.stack(match_pairs).permute((0, 3, 1, 2))
71 | grid = torchvision.utils.make_grid(images, nrow=nrow, padding=1).permute((1, 2, 0))
72 | self.displayer.display(name if name is not None else self.default_name, grid.numpy(), step)
73 |
74 | def reprojectshow(self, imgs, pts_src, pts_dst, src, dst):
75 | # TODO not adapted for change in torch2cv
76 | pts_src, pts_dst = pts_src[src], pts_src[dst]
77 | for i in range(src[0].size(0)):
78 | pts1 = pts_src[i].unsqueeze(0)
79 | pts2 = pts_dst[i].unsqueeze(0)
80 | img1 = torch2cv(imgs[src[0][i]]).copy()
81 | img2 = torch2cv(imgs[dst[0][i]]).copy()
82 | image = matches(img1,pts1,img2,pts2,self.blue,2)
83 | cv2.imshow(self.winname+'-dst', image)
84 | cv2.waitKey(1)
85 |
86 | def close(self):
87 | self.displayer.close()
88 |
89 |
90 | class VisDisplayer():
91 | def display(self, name, frame, step=0):
92 | raise NotImplementedError()
93 |
94 | def close(self):
95 | pass
96 |
97 |
98 | class PltDisplayer(VisDisplayer):
99 | def __init__(self, fig_size=None):
100 | self.fig_size = fig_size
101 |
102 | def display(self, name, frame, step=0):
103 | fig = plt.figure(figsize=self.fig_size)
104 | ax = fig.gca()
105 | ax.imshow(frame[:, :, ::-1])
106 | fig.tight_layout()
107 |
108 | def close(self):
109 | plt.close('all')
110 |
111 |
112 | class ImshowDisplayer(VisDisplayer):
113 | def display(self, name, frame, step=0):
114 | cv2.imshow(name, frame)
115 | cv2.waitKey(1)
116 |
117 | def close(self):
118 | cv2.destroyAllWindows()
119 |
120 |
121 | class TBDisplayer(VisDisplayer):
122 | def __init__(self, writer):
123 | self.writer = writer
124 |
125 | def display(self, name, frame, step=0):
126 | self.writer.add_image(name, frame[:, :, ::-1], step, dataformats='HWC')
127 |
128 |
129 | class VideoFileDisplayer(VisDisplayer):
130 | def __init__(self, save_dir=None, framerate=10):
131 | if save_dir is None:
132 | from datetime import datetime
133 | current_time = datetime.now().strftime('%b%d_%H-%M-%S')
134 | self.save_dir = os.path.join('.', 'vidout', current_time)
135 | else:
136 | self.save_dir = save_dir
137 | self.framerate = framerate
138 | self.writer = {}
139 |
140 | def display(self, name, frame, step=0):
141 | if name not in self.writer:
142 | os.makedirs(self.save_dir, exist_ok=True)
143 | self.writer[name] = cv2.VideoWriter(os.path.join(self.save_dir, '%s.avi' % name),
144 | cv2.VideoWriter_fourcc(*'avc1'),
145 | self.framerate, (frame.shape[1], frame.shape[0]))
146 | self.writer[name].write(frame)
147 |
148 | def close(self):
149 | for wn in self.writer:
150 | self.writer[wn].release()
151 |
152 |
153 | class GIFFileDisplayer(VisDisplayer):
154 | def __init__(self, save_dir=None, framerate=10, fig_size=None):
155 | if save_dir is None:
156 | from datetime import datetime
157 | current_time = datetime.now().strftime('%b%d_%H-%M-%S')
158 | self.save_dir = os.path.join('.', 'vidout', current_time)
159 | else:
160 | self.save_dir = save_dir
161 | self.framerate, self.fig_size = framerate, fig_size
162 | self.figure = {}
163 |
164 | def display(self, name, frame, step=0):
165 | if name not in self.figure:
166 | os.makedirs(self.save_dir, exist_ok=True)
167 | self.figure[name] = []
168 | self.figure[name].append(frame)
169 |
170 | def close(self):
171 | for name, frames in self.figure.items():
172 | def _get_frame(i):
173 | im.set_array(frames[i + 1])
174 | return im,
175 | fig = plt.figure(figsize=self.fig_size)
176 | plt.axis('off')
177 | im = plt.imshow(frames[0], animated=True, aspect='auto')
178 | plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
179 | anim = FuncAnimation(fig, _get_frame, len(frames) - 1, blit=True)
180 | anim.save(os.path.join(self.save_dir, '%s.gif' % name), fps=self.framerate)
181 |
182 |
183 | def matches(img1, pts1, img2, pts2, colors, circ_radius=3, thickness=1):
184 | ''' Assume pts1 are matched with pts2, respectively.
185 | '''
186 | H1, W1, C = img1.shape
187 | H2, W2, _ = img2.shape
188 | new_img = np.zeros((max(H1, H2), W1 + W2, C), img1.dtype)
189 | new_img[:H1, :W1], new_img[:H2, W1:W1+W2] = img1, img2
190 | new_img = circles(new_img, pts1, circ_radius, colors, thickness)
191 | pts2[:, 0] += W1
192 | new_img = circles(new_img, pts2, circ_radius, colors, thickness)
193 | return lines(new_img, pts1, pts2, colors, thickness)
194 |
195 |
196 | def circles(image, points, radius, colors, thickness):
197 | for pt, c in zip(points, colors):
198 | if not torch.any(pt.isnan()):
199 | image = cv2.circle(image.copy(), tuple(pt), radius, tuple(c.tolist()), thickness, cv2.LINE_AA)
200 | return image
201 |
202 |
203 | def lines(image, pts1, pts2, colors, thickness):
204 | for pt1, pt2, c in zip(pts1, pts2, colors):
205 | if not torch.any(pt1.isnan() | pt2.isnan()):
206 | image = cv2.line(image.copy(), tuple(pt1), tuple(pt2), tuple(c.tolist()), thickness, cv2.LINE_AA)
207 | return image
208 |
209 |
210 | def get_colors(name, values=[0], vmin=None, vmax=None):
211 | if name in mc.get_named_colors_mapping():
212 | rgb = mc.to_rgba_array(name)[0, :3]
213 | rgb = np.tile(rgb, (len(values), 1))
214 | else:
215 | values = np.array(values)
216 | normalize = mc.Normalize(vmin=vmin, vmax=vmax)
217 | cmap = cm.get_cmap(name)
218 | rgb = cmap(normalize(values))
219 | return (rgb[..., 2::-1] * 255).astype(np.uint8)
220 |
221 |
222 | def torch2cv(images):
223 | rgb = (255 * images).type(torch.uint8).cpu().numpy()
224 | bgr = rgb[:, ::-1, ...].transpose((0, 2, 3, 1))
225 | return bgr
226 |
--------------------------------------------------------------------------------