├── .github
└── CODEOWNERS
├── .gitignore
├── README.md
├── download_astrovision.py
└── lists
├── clusters.json
├── segments.json
├── test.json
└── train.json
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @travisdriver
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # AstroVision stuff
2 | data/
3 |
4 | # macOS
5 | *.DS_store
6 |
7 | # Byte-compiled / optimized / DLL files
8 | __pycache__/
9 | *.py[cod]
10 | *$py.class
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | lib64/
25 | parts/
26 | sdist/
27 | var/
28 | wheels/
29 | share/python-wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .nox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | *.py,cover
56 | .hypothesis/
57 | .pytest_cache/
58 | cover/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 | local_settings.py
67 | db.sqlite3
68 | db.sqlite3-journal
69 |
70 | # Flask stuff:
71 | instance/
72 | .webassets-cache
73 |
74 | # Scrapy stuff:
75 | .scrapy
76 |
77 | # Sphinx documentation
78 | docs/_build/
79 |
80 | # PyBuilder
81 | .pybuilder/
82 | target/
83 |
84 | # Jupyter Notebook
85 | .ipynb_checkpoints
86 |
87 | # IPython
88 | profile_default/
89 | ipython_config.py
90 |
91 | # pyenv
92 | # For a library or package, you might want to ignore these files since the code is
93 | # intended to run in multiple environments; otherwise, check them in:
94 | # .python-version
95 |
96 | # pipenv
97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
100 | # install all needed dependencies.
101 | #Pipfile.lock
102 |
103 | # poetry
104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | # This is especially recommended for binary packages to ensure reproducibility, and is more
106 | # commonly ignored for libraries.
107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 |
110 | # pdm
111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | # in version control.
115 | # https://pdm.fming.dev/#use-with-ide
116 | .pdm.toml
117 |
118 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119 | __pypackages__/
120 |
121 | # Celery stuff
122 | celerybeat-schedule
123 | celerybeat.pid
124 |
125 | # SageMath parsed files
126 | *.sage.py
127 |
128 | # Environments
129 | .env
130 | .venv
131 | env/
132 | venv/
133 | ENV/
134 | env.bak/
135 | venv.bak/
136 |
137 | # Spyder project settings
138 | .spyderproject
139 | .spyproject
140 |
141 | # Rope project settings
142 | .ropeproject
143 |
144 | # mkdocs documentation
145 | /site
146 |
147 | # mypy
148 | .mypy_cache/
149 | .dmypy.json
150 | dmypy.json
151 |
152 | # Pyre type checker
153 | .pyre/
154 |
155 | # pytype static type analyzer
156 | .pytype/
157 |
158 | # Cython debug symbols
159 | cython_debug/
160 |
161 | # PyCharm
162 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
163 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
164 | # and can be added to the global gitignore or merged into this file. For a more nuclear
165 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
166 | #.idea/
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # _This repository features scripts for downloading and manipulating the AstroVision datasets_ [](https://colab.research.google.com/drive/1A79acc-RwQG2X1OoNd_UL1fT7T3FNtpN)
4 |
5 |
6 |
7 | # About
8 |
9 | AstroVision is a first-of-a-kind, large-scale dataset of real small body images from both legacy and ongoing deep space missions, which currently features over 110,000 densely annotated, real images of sixteen small bodies from eight missions. AstroVision was developed to facilitate the study of computer vision and deep learning for autonomous navigation in the vicinity of a small body, with speicial emphasis on training and evaluation of deep learning-based keypoint detection and feature description methods.
10 |
11 | If you find our datasets useful for your research, please cite the [AstroVision paper](https://www.sciencedirect.com/science/article/pii/S0094576523000103):
12 |
13 | ```bibtex
14 | @article{driver2023astrovision,
15 | title={{AstroVision}: Towards Autonomous Feature Detection and Description for Missions to Small Bodies Using Deep Learning},
16 | author={Driver, Travis and Skinner, Katherine and Dor, Mehregan and Tsiotras, Panagiotis},
17 | journal={Acta Astronautica: Special Issue on AI for Space},
18 | year={2023},
19 | volume={210},
20 | pages={393--410}
21 | }
22 | ```
23 |
24 | Please make sure to :star: star and :eye: watch the repository to show support and get notified of any updates!
25 |
26 | # Downloading the datasets
27 |
28 | ### **Note:** We will continue to release datasets and update this repository over the coming months. Available datasets can be found by checking the `lists/` directory or our 🤗 [Hugging Face page](https://huggingface.co/datasets/travisdriver/astrovision-data)
29 |
30 | The AstroVision datasets may be downloaded using the provided `download_astrovision.py` script or by downloading them directly from our 🤗 [Hugging Face page](https://huggingface.co/datasets/travisdriver/astrovision-data). The train and test data may be downloaded using the `--train` and `--test` options, respectively:
31 |
32 | ```bash
33 | python download_astrovision.py --train # downloads training data to data/test
34 | python download_astrovision.py --test # downloads training data to data/train
35 | ```
36 |
37 | The `--segments` (`--clusters`) option will download _all available_ segments (clusters). If you'd like to download segments (clusters) from a specific dataset, use the `--dataset_name ` option, e.g., `--dataset_name dawn_ceres`. Specific segments (clusters) from a specific dataset may be downloaded using the `--dataset_name ` and `--segment_name ` (`--cluster_name `) options, e.g.,
38 |
39 | ```bash
40 | python download_astrovision.py --segments --dataset_name dawn_ceres --segment_name 2015293_c6_orbit125 # download specific segment
41 | python download_astrovision.py --clusters --dataset_name dawn_ceres --cluster_name 00000032 # download specific cluster
42 | ```
43 |
44 | Below we provide more detailed information about each dataset.
45 |
46 | | Mission | Target | Launch (yyyy/mm/dd) | # Images | Disk (GB) | Clusters | Segments |
47 | |:----------------:|:-------------------------:|:-------------------:|:--------:|:----------:|:--------:|:--------:|
48 | | NEAR | 433 Eros | 1996/02/17 | 12827 | 13.1 | TBA | TBA |
49 | | Cassini | Mimas (Saturn I) | 1997/10/15 | 307 | 3.0 | N/A | [cas_mimas](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_mimas) |
50 | | | Tethys (Saturn III) | | 751 | 9.2 | N/A | [cas_tethys](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_tethys) |
51 | | | Dione (Saturn IV) | | 1381 | 12.0 | N/A | [cas_dione](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_dione) |
52 | | | Rhea (Saturn V) | | 665 | 5.1 | N/A | [cas_rhea](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_rhea) |
53 | | | Phoebe (Saturn IX) | | 96 | 0.8 | N/A | [cas_phoebe](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_phoebe) |
54 | | | Janus (Saturn X) | | 184 | 2.0 | N/A | [cas_janus](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_janus) |
55 | | | Epimetheus (Saturn XI) | | 133 | 1.3 | N/A | [cas_epim](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/cas_epim) |
56 | | Hayabusa | 25143 Itokawa | 2003/05/09 | 560 | 5.4 | N/A | [haya_itokawa](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/haya_itokawa) |
57 | | Mars Express | Phobos (Mars I) | 2003/06/02 | 890 | 4.1 | N/A | TBA |
58 | | Rosetta (NavCam) | 67P/Churyumov–Gerasimenko | 2004/03/02 | 12315 | 95.0 | TBA / [test](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/test/rosetta_67p) | TBA |
59 | | Rosetta (OSIRIS) | 67P/Churyumov–Gerasimenko | 2004/03/02 | 13993 | 95.0 | TBA / [test](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/test/rosiris_67p) | TBA |
60 | | | 21 Lutetia | | 40 | 2.1 | N/A | [rosetta_lutetia](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/rosetta_lutetia) |
61 | | Dawn | 1 Ceres | 2007/09/27 | 38540 | 204.8 | [train](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/train/dawn_ceres) / [test](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/test/dawn_ceres) | TBA |
62 | | | 4 Vesta | | 17504 | 93.3 | [train](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/train/dawn_vesta) / [test](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/test/dawn_vesta) | [dawn_vesta](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/segments/dawn_vesta) |
63 | | Hayabusa2 | 162173 Ryugu | 2014/12/03 | 640 | 6.0 | N/A | TBA |
64 | | OSIRIS-REx | 101955 Bennu | 2016/09/08 | 16618 | 106.5 | [train](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/train/orex_bennu) / [test](https://huggingface.co/datasets/travisdriver/astrovision-data/tree/main/clusters/test/orex_bennu) | TBA |
65 | | TOTAL | | | 117493 | 658.7 | | |
66 |
67 | # Data format
68 |
69 | Following the popular [COLMAP data format](https://colmap.github.io/format.html), each data segment contains the files `images.bin`, `cameras.bin`, and `points3D.bin`, which contain the camera extrinsics and keypoints, camera intrinsics, and 3D point cloud data, respectively.
70 |
71 | - `cameras.bin` encodes a dictionary of `camera_id` and [`Camera`](third_party/colmap/scripts/python/read_write_model.py) pairs. `Camera` objects are structured as follows:
72 | - `Camera.id`: defines the unique (and possibly noncontiguious) identifier for the `Camera`.
73 | - `Camera.model`: the camera model. We utilize the "PINHOLE" camera model, as AstroVision contains undistorted images.
74 | - `Camera.width` & `Camera.height`: the width and height of the sensor in pixels.
75 | - `Camera.params`: `List` of cameras parameters (intrinsics). For the "PINHOLE" camera model, `params = [fx, fy, cx, cy]`, where `fx` and `fy` are the focal lengths in $x$ and $y$, respectively, and (`cx`, `cy`) is the principal point of the camera.
76 |
77 | - `images.bin` encodes a dictionary of `image_id` and [`Image`](third_party/colmap/scripts/python/read_write_model.py) pairs. `Image` objects are structured as follows:
78 | - `Image.id`: defines the unique (and possibly noncontiguious) identifier for the `Image`.
79 | - `Image.tvec`: $\mathbf{r}^\mathcal{C_ i}_ {\mathrm{BC}_ i}$, i.e., the relative position of the origin of the camera frame $\mathcal{C}_ i$ with respect to the origin of the body-fixed frame $\mathcal{B}$ expressed in the $\mathcal{C}_ i$ frame.
80 | - `Image.qvec`: $\mathbf{q}_ {\mathcal{C}_ i\mathcal{B}}$, i.e., the relative orientation of the camera frame $\mathcal{C}_ i$ with respect to the body-fixed frame $\mathcal{B}$. The user may call `Image.qvec2rotmat()` to get the corresponding rotation matrix $R_ {\mathcal{C}_ i\mathcal{B}}$.
81 | - `Image.camera_id`: the identifer for the camera that was used to capture the image.
82 | - `Image.name`: the name of the corresponding file, e.g., `00000000.png`.
83 | - `Image.xys`: contains all of the keypoints $\mathbf{p}^{(i)}_ k$ in image $i$, stored as a ($N$, 2) array. In our case, the keypoints are the forward-projected model vertices.
84 | - `Image.point3D_ids`: stores the `point3D_id` for each keypoint in `Image.xys`, which can be used to fetch the corresponding `point3D` from the `points3D` dictionary.
85 |
86 | - `points3D.bin` enocdes a dictionary of `point3D_id` and [`Point3D`](third_party/colmap/scripts/python/read_write_model.py) pairs. `Point3D` objects are structured as follows:
87 | - `Point3D.id`: defines the unique (and possibly noncontiguious) identifier for the `Point3D`.
88 | - `Point3D.xyz`: the 3D-coordinates of the landmark in the body-fixed frame, i.e., $\mathbf{\ell} _{k}^\mathcal{B}$.
89 | - `Point3D.image_ids`: the ID of the images in which the landmark was observed.
90 | - `Point3D.point2D_idxs`: the index in `Image.xys` that corresponds to the landmark observation, i.e., `xy = images[Point3D.image_ids[k]].xys[Point3D.point2D_idxs[k]]` given some index `k`.
91 |
92 | These three data containers, along with the ground truth shape model, completely describe the scene.
93 |
94 | In addition to the scene geometry, each image is annotated with a landmark map, a depth map, and a visibility mask.
95 |
96 |
97 |
98 | - The _landmark map_ provides a consistent, discrete set of reference points for sparse correspondence computation and is derived by forward-projecting vertices from a medium-resolution (i.e., $\sim$ 800k facets) shape model onto the image plane. We classify visible landmarks by tracing rays (via the [Trimesh library](https://trimsh.org/)) from the landmarks toward the camera origin and recording landmarks whose line-of-sight ray does not intersect the 3D model.
99 | - The _depth map_ provides a dense representation of the imaged surface and is computed by backward-projecting rays at each pixel in the image and recording the depth of the intersection between the ray and a high-resolution (i.e., $\sim$ 3.2 million facets) shape model.
100 | - The _visbility mask_ provides an estimate of the non-occluded portions of the imaged surface.
101 |
102 | **Note:** Instead of the traditional $z$-depth parametrization used for depth maps, we use the _absolute depth_, similar to the inverse depth parameterization. Let $\mathbf{m}^{(i)}_ k = K^{-1} \underline{\mathbf{p}}^{(i)}_ k$, where $K$ is the calibration matrix. Then, the landmark $\mathbf{\ell}_ k$ corresponding to keypoint $\mathbf{p}^{(i)}_ {k}$ with depth $d^{\mathcal{C}_ i}_ k$ (from the depth map) can be computed via
103 |
104 | $$
105 | \begin{align}
106 | \underline{\mathbf{\ell}}_ {k}^\mathcal{B} = \Pi^{-1}\left(\mathbf{p}^{(i)}_ k, d^{\mathcal{C}_ i}_ k, T_ {\mathcal{C}_ i\mathcal{B}}; K\right) &= T_ {\mathcal{C}_ i\mathcal{B}}^{-1} \begin{bmatrix} d^{\mathcal{C}_ i}_ k \mathbf{m}^{(i)}_ k / \|\mathbf{m}^{(i)}_ k\| \\ 1 \end{bmatrix} \\
107 | &= \frac{d^{\mathcal{C}_ i}_ k}{\|\mathbf{m}^{(i)}_ k\|} R_ {\mathcal{BC_i}} \mathbf{m}^{(i)} _k + \mathbf{r}^\mathcal{B} _{\mathrm{C} _i\mathrm{B}}.
108 | \end{align}
109 | $$
110 |
111 | Please refer to our [Google Colaboratory demo](https://colab.research.google.com/drive/1A79acc-RwQG2X1OoNd_UL1fT7T3FNtpN?usp=sharing) for more details. [](https://colab.research.google.com/drive/1A79acc-RwQG2X1OoNd_UL1fT7T3FNtpN)
112 |
113 | # Dataset tools
114 |
115 | We will be releasing tools for manipulating (e.g., splitting and stitching) and generating the AstroVision dataset in a future release.
116 |
117 | # Projects that have used AstroVision
118 |
119 | Below is a list of papers that have utilized the AstroVision datasets:
120 |
121 | - [Deep Monocular Hazard Detection for Safe Small Body Landing](https://arxiv.org/abs/2301.13254). AIAA/AAS Space Flight Mechanics Meeting, 2023
122 | - [Efficient Feature Description for Small Body Relative Navigation using Binary Convolutional Neural Networks](https://arxiv.org/abs/2304.04985). AAS Guidance, Navigation, and Control (GN&C) Conference, 2023.
123 |
--------------------------------------------------------------------------------
/download_astrovision.py:
--------------------------------------------------------------------------------
1 | """Script for downloading AstroVision datasets from Hugging Face.
2 |
3 | Author: Travis Driver
4 | """
5 |
6 | import sys
7 | import os
8 | import json
9 | from pathlib import Path
10 |
11 | import argparse
12 | import zipfile
13 |
14 | from huggingface_hub import hf_hub_download
15 |
16 |
17 | DEFAULT_DATA_ROOT = str(Path(__file__).resolve().parent / "data")
18 |
19 |
20 | def unzip_file(in_path: str, out_dirpath: str) -> None:
21 | """Unzips the file at `in_path` to the directory specified in `dirpath`"""
22 | with zipfile.ZipFile(in_path, "r") as zip_ref:
23 | sys.stdout.write(f"\rUnzipping {in_path}...")
24 | zip_ref.extractall(out_dirpath)
25 | sys.stdout.write(f"\rUnzipping {in_path}... Done.\n")
26 | sys.stdout.flush()
27 |
28 |
29 | def main(argv: None = None) -> None: # pylint: disable=unused-argument
30 | """Program entrance."""
31 | # Parse arguments.
32 | parser = argparse.ArgumentParser("Download AstroVision datasets.")
33 | parser.add_argument("--local_dir", type=str, default=DEFAULT_DATA_ROOT, help="Path where data will be saved.")
34 | parser.add_argument("--test", action="store_true", help="Download test clusters.")
35 | parser.add_argument("--train", action="store_true", help="Download train clusters.")
36 | parser.add_argument(
37 | "--clusters", action="store_true", help="Download clusters (see lists/clusters.json for valid cluster names)."
38 | )
39 | parser.add_argument(
40 | "--segments", action="store_true", help="Download segments (see lists/segments.json for valid segment names)."
41 | )
42 | parser.add_argument(
43 | "--dataset_name", type=str, default=None, help="Name of dataset to download (e.g., `dawn_vesta`)."
44 | )
45 | parser.add_argument(
46 | "--segment_name", type=str, default=None, help="Name of segment to download (e.g., `2011205_rc3`)."
47 | )
48 | parser.add_argument(
49 | "--cluster_name", type=str, default=None, help="Name of cluster to download (e.g., `00000007`)."
50 | )
51 | parser.add_argument("--unpack_off", action="store_true", help="Do not un-zip the folders after download.")
52 | args = parser.parse_args()
53 |
54 | # Initialize download URLs.
55 | dataset_urls_dict = {}
56 | if args.test: # download testing data
57 | download_root = ""
58 | with open(os.path.join("lists", "test.json")) as fin:
59 | dataset_urls_dict = json.load(fin)
60 | elif args.train: # download training data
61 | download_root = ""
62 | with open(os.path.join("lists", "train.json")) as fin:
63 | dataset_urls_dict = json.load(fin)
64 | elif args.clusters: # download clusters
65 | download_root = "clusters"
66 | with open(os.path.join("lists", "clusters.json")) as fin:
67 | dataset_urls_dict = json.load(fin)
68 | if args.dataset_name is not None:
69 | train_clusters = {"train/" + args.dataset_name: dataset_urls_dict["train"][args.dataset_name]}
70 | test_clusters = {"test/" + args.dataset_name: dataset_urls_dict["test"][args.dataset_name]}
71 | dataset_urls_dict = {**train_clusters, **test_clusters}
72 | if args.cluster_name is not None:
73 | if args.cluster_name in train_clusters["train/" + args.dataset_name]:
74 | dataset_urls_dict = {"train/" + args.dataset_name: [args.cluster_name]}
75 | elif args.cluster_name in test_clusters["test/" + args.dataset_name]:
76 | dataset_urls_dict = {"test/" + args.dataset_name: [args.cluster_name]}
77 | else:
78 | raise ValueError(f"No cluster with name {args.cluster_name} exists in {args.dataset_name}.")
79 | else: # download all clusters
80 | train_clusters = {"train/" + dname: clusters for (dname, clusters) in dataset_urls_dict["train"].items()}
81 | test_clusters = {"test/" + dname: clusters for (dname, clusters) in dataset_urls_dict["test"].items()}
82 | dataset_urls_dict = {**train_clusters, **test_clusters}
83 | elif args.segments: # download segments
84 | download_root = "segments"
85 | with open(os.path.join("lists", "segments.json")) as fin:
86 | dataset_urls_dict = json.load(fin)
87 | if args.dataset_name is not None:
88 | if args.dataset_name not in dataset_urls_dict:
89 | raise ValueError(
90 | f"No dataset with name {args.dataset_name}. See lists/segments.json for available datasets."
91 | )
92 | dataset_urls_dict = {args.dataset_name: dataset_urls_dict[args.dataset_name]}
93 | if args.segment_name is not None:
94 | if args.segment_name in dataset_urls_dict[args.dataset_name]:
95 | dataset_urls_dict = {args.dataset_name: [args.segment_name]}
96 | else:
97 | raise ValueError(f"No segment with name {args.segment_name} exists in {args.dataset_name}.")
98 |
99 | # Loop through URLs and download.
100 | os.makedirs(args.local_dir, exist_ok=True)
101 | for dataset_name, segment_names in dataset_urls_dict.items():
102 | for sname in segment_names:
103 | fpath = os.path.join(download_root, dataset_name, sname + ".zip")
104 | hf_hub_download(
105 | repo_id="travisdriver/astrovision-data",
106 | filename=fpath,
107 | repo_type="dataset",
108 | local_dir=args.local_dir,
109 | local_dir_use_symlinks=False,
110 | )
111 | if not args.unpack_off:
112 | unzip_file(os.path.join(args.local_dir, fpath), os.path.dirname(os.path.join(args.local_dir, fpath)))
113 |
114 |
115 | if __name__ == "__main__":
116 | main()
117 |
--------------------------------------------------------------------------------
/lists/clusters.json:
--------------------------------------------------------------------------------
1 | {
2 | "train": {
3 | "dawn_ceres": [],
4 | "dawn_vesta": [
5 | "00000000",
6 | "00000001",
7 | "00000002",
8 | "00000003",
9 | "00000004",
10 | "00000005",
11 | "00000006",
12 | "00000008",
13 | "00000009",
14 | "00000010",
15 | "00000012",
16 | "00000013",
17 | "00000015",
18 | "00000016",
19 | "00000017",
20 | "00000018",
21 | "00000019",
22 | "00000020",
23 | "00000021",
24 | "00000023",
25 | "00000024",
26 | "00000025",
27 | "00000026",
28 | "00000027",
29 | "00000028",
30 | "00000029"
31 | ],
32 | "orex_bennu": [
33 | "00000000",
34 | "00000001",
35 | "00000002",
36 | "00000003",
37 | "00000004",
38 | "00000005",
39 | "00000006",
40 | "00000008",
41 | "00000009",
42 | "00000010",
43 | "00000012",
44 | "00000013",
45 | "00000015",
46 | "00000016",
47 | "00000017",
48 | "00000018",
49 | "00000019",
50 | "00000020",
51 | "00000021",
52 | "00000023",
53 | "00000024",
54 | "00000025",
55 | "00000026",
56 | "00000027",
57 | "00000028",
58 | "00000029",
59 | "00000031",
60 | "00000033",
61 | "00000034",
62 | "00000035",
63 | "00000036",
64 | "00000037",
65 | "00000038",
66 | "00000040",
67 | "00000041",
68 | "00000042",
69 | "00000043",
70 | "00000044",
71 | "00000045",
72 | "00000046",
73 | "00000047",
74 | "00000048",
75 | "00000049",
76 | "00000050",
77 | "00000051",
78 | "00000052",
79 | "00000053",
80 | "00000054",
81 | "00000055",
82 | "00000056",
83 | "00000057",
84 | "00000058",
85 | "00000059",
86 | "00000060",
87 | "00000061",
88 | "00000062",
89 | "00000063"
90 | ],
91 | "rosetta_67p": [],
92 | "rosiris_67p": [],
93 | "near_eros": []
94 | },
95 | "test": {
96 | "dawn_ceres": [
97 | "00000032"
98 | ],
99 | "dawn_vesta": [
100 | "00000032"
101 | ],
102 | "orex_bennu": [
103 | "00000007",
104 | "00000011",
105 | "00000014",
106 | "00000022",
107 | "00000030",
108 | "00000032",
109 | "00000039"
110 | ],
111 | "rosetta_67p": [
112 | "00000032"
113 | ],
114 | "rosiris_67p": [
115 | "00000032"
116 | ],
117 | "near_eros": []
118 | }
119 | }
--------------------------------------------------------------------------------
/lists/segments.json:
--------------------------------------------------------------------------------
1 | {
2 | "dawn_ceres": [
3 | "2015293_c6_orbit125"
4 | ],
5 | "dawn_vesta": [
6 | "2011205_rc3",
7 | "2011205_rc3b"
8 | ],
9 | "orex_bennu": [],
10 | "rosetta_67p": [],
11 | "rosiris_67p": [],
12 | "near_eros": [],
13 | "haya_itokawa": [
14 | "20050909_20051119"
15 | ],
16 | "cas_mimas": [
17 | "sbmt"
18 | ],
19 | "cas_tethys": [],
20 | "cas_dione": [],
21 | "cas_rhea": [],
22 | "cas_janus": [],
23 | "cas_phoebe": [],
24 | "cas_epim": [
25 | "opus"
26 | ],
27 | "mexp_phobos": [],
28 | "rosetta_lutetia": [
29 | "sbmt"
30 | ],
31 | "haya2_ryugu": []
32 | }
--------------------------------------------------------------------------------
/lists/test.json:
--------------------------------------------------------------------------------
1 | {
2 | "clusters/test/dawn_ceres": [
3 | "00000032"
4 | ],
5 | "clusters/test/dawn_vesta": [
6 | "00000032"
7 | ],
8 | "clusters/test/orex_bennu": [
9 | "00000032"
10 | ],
11 | "clusters/test/rosetta_67p": [
12 | "00000032"
13 | ],
14 | "clusters/test/rosiris_67p": [
15 | "00000032"
16 | ],
17 | "segments/haya_itokawa": [
18 | "20050909_20051119"
19 | ],
20 | "segments/cas_epim": [
21 | "opus"
22 | ],
23 | "segments/cas_mimas": [
24 | "sbmt"
25 | ],
26 | "segments/rosetta_lutetia": [
27 | "sbmt"
28 | ]
29 | }
--------------------------------------------------------------------------------
/lists/train.json:
--------------------------------------------------------------------------------
1 | {
2 | "clusters/train/dawn_ceres": [],
3 | "clusters/train/dawn_vesta": [
4 | "00000000"
5 | ],
6 | "clusters/train/orex_bennu": [],
7 | "clusters/train/rosetta_67p": [],
8 | "clusters/train/rosiris_67p": [],
9 | "clusters/train/near_eros": [],
10 | "segments/cas_tethys": [],
11 | "segments/cas_dione": [],
12 | "segments/cas_rhea": [],
13 | "segments/cas_janus": [],
14 | "segments/cas_phoebe": [],
15 | "segments/mexp_phobos": [],
16 | "segments/haya2_ryugu": []
17 | }
--------------------------------------------------------------------------------